content
stringlengths 1
1.02M
⌀ |
---|
#!/bin/bash
# Install script for Anaconda environments on macOS and linux.
# This script is not supposed to be called directly, but should be run by:
#
# $ cd <path to ideep, e.g. ~/ideep>
# $ conda build conda
#
#
# If you're debugging this, it may be useful to use the env that conda build is
# using:
# $ cd <anaconda_root>/conda-bld/ideep_<timestamp>
# $ source activate _h_env_... # some long path with lots of placeholders
#
# Also, failed builds will accumulate those ideep_<timestamp> directories. You
# can remove them after a succesfull build with
# $ conda build purge
#
git submodule update --init
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX=$HOME/.local ..
cd ../python
python setup.py install
|
#!/bin/bash
curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1VRtI6_5gQZINHm1YsNrnpipscQQm0niC" > /dev/null
CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1VRtI6_5gQZINHm1YsNrnpipscQQm0niC" -o resources.tar.gz
tar -zxvf resources.tar.gz
rm resources.tar.gz
echo Download finished.
|
#!/bin/bash
find -iname build.gradle | xargs sed -i "s/AGP_LATEST/libs.androidGradlePlugin/"
find -iname build.gradle | xargs sed -i "s/LINT_CORE/libs.androidLint/"
find -iname build.gradle | xargs sed -i "s/LINT_API_LATEST/libs.androidLintApi/"
find -iname build.gradle | xargs sed -i "s/LINT_TESTS/libs.androidLintTests/"
find -iname build.gradle | xargs sed -i "s/AUTO_COMMON/libs.autoCommon/"
find -iname build.gradle | xargs sed -i "s/AUTO_SERVICE_ANNOTATIONS/libs.autoServiceAnnotations/"
find -iname build.gradle | xargs sed -i "s/AUTO_SERVICE_PROCESSOR/libs.autoService/"
find -iname build.gradle | xargs sed -i "s/AUTO_VALUE_ANNOTATIONS/libs.autoValueAnnotations/"
find -iname build.gradle | xargs sed -i "s/AUTO_VALUE_PARCEL/libs.autoValueParcel/"
find -iname build.gradle | xargs sed -i "s/AUTO_VALUE/libs.autoValue/"
find -iname build.gradle | xargs sed -i "s/ANTLR/libs.antlr4/"
find -iname build.gradle | xargs sed -i "s/APACHE_COMMONS_CODEC/libs.apacheCommonsCodec/"
find -iname build.gradle | xargs sed -i "s/ASSERTJ/libs.assertj/"
find -iname build.gradle | xargs sed -i "s/CHECKER_FRAMEWORK/libs.checkerframework/"
find -iname build.gradle | xargs sed -i "s/DAGGER/libs.dagger/"
find -iname build.gradle | xargs sed -i "s/DEXMAKER_MOCKITO/libs.dexmakerMockito/"
find -iname build.gradle | xargs sed -i "s/DEXMAKER_MOCKITO_INLINE/libs.dexmakerMockitoInline/"
find -iname build.gradle | xargs sed -i "s/ESPRESSO_CONTRIB/libs.espressoContrib/"
find -iname build.gradle | xargs sed -i "s/ESPRESSO_CORE/libs.espressoCore/"
find -iname build.gradle | xargs sed -i "s/ESPRESSO_IDLING_NET/libs.espressoIdlingNet/"
find -iname build.gradle | xargs sed -i "s/ESPRESSO_IDLING_RESOURCE/libs.espressoIdlingResource/"
find -iname build.gradle | xargs sed -i "s/ESPRESSO_INTENTS/libs.espressoIntents/"
find -iname build.gradle | xargs sed -i "s/ESPRESSO_WEB/libs.espressoWeb/"
find -iname build.gradle | xargs sed -i "s/FINDBUGS/libs.findbugs/"
find -iname build.gradle | xargs sed -i "s/FIREBASE_APPINDEXING/libs.firebaseAppindexing/"
find -iname build.gradle | xargs sed -i "s/HILT_ANDROID/libs.hiltAndroid/"
find -iname build.gradle | xargs sed -i "s/HILT_ANDROID_TESTING/libs.hiltAndroidTesting/"
find -iname build.gradle | xargs sed -i "s/HILT_ANDROID_GRADLE_PLUGIN/libs.hiltAndroidGradlePlugin/"
find -iname build.gradle | xargs sed -i "s/HILT_COMPILER/libs.hiltCompiler/"
find -iname build.gradle | xargs sed -i "s/HILT_CORE/libs.hiltCore/"
find -iname build.gradle | xargs sed -i "s/INTELLIJ_ANNOTATIONS/libs.intellijAnnotations/"
find -iname build.gradle | xargs sed -i "s/JAVAPOET/libs.javapoet/"
find -iname build.gradle | xargs sed -i "s/JSQLPARSER/libs.jsqlparser/"
find -iname build.gradle | xargs sed -i "s/JSR250/libs.jsr250/"
find -iname build.gradle | xargs sed -i "s/GCM_NETWORK_MANAGER/libs.gcmNetworkManager/"
find -iname build.gradle | xargs sed -i "s/GOOGLE_COMPILE_TESTING/libs.googleCompileTesting/"
find -iname build.gradle | xargs sed -i "s/GSON/libs.gson/"
find -iname build.gradle | xargs sed -i "s/GUAVA_ANDROID/libs.guavaAndroid/"
find -iname build.gradle | xargs sed -i "s/GUAVA_LISTENABLE_FUTURE/libs.guavaListenableFuture/"
find -iname build.gradle | xargs sed -i "s/GRADLE_INCAP_HELPER_PROCESSOR/libs.gradleIncapHelperProcessor/"
find -iname build.gradle | xargs sed -i "s/GRADLE_INCAP_HELPER/libs.gradleIncapHelper/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_ANNOTATION_PROCESSING_EMBEDDABLE/libs.kotlinAnnotationProcessingEmbeddable/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COMPILER_EMBEDDABLE/libs.kotlinCompilerEmbeddable/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COMPILE_TESTING_KSP/libs.kotlinCompileTestingKsp/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COMPILE_TESTING/libs.kotlinCompileTesting/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COROUTINES_ANDROID/libs.kotlinCoroutinesAndroid/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COROUTINES_SWING/libs.kotlinCoroutinesSwing/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COROUTINES_CORE/libs.kotlinCoroutinesCore/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COROUTINES_GUAVA/libs.kotlinCoroutinesGuava/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COROUTINES_TEST/libs.kotlinCoroutinesTest/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COROUTINES_RX2/libs.kotlinCoroutinesRx2/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COROUTINES_RX3/libs.kotlinCoroutinesRx3/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_COMPILER_DAEMON_EMBEDDABLE/libs.kotlinDaemonEmbeddable/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_METADATA_JVM/libs.kotlinMetadataJvm/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_STDLIB_COMMON/libs.kotlinStdlibCommon/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_STDLIB_JDK8/libs.kotlinStdlibJdk8/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_STDLIB_JS/libs.kotlinStdlibJs/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_STDLIB/libs.kotlinStdlib/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_TEST_ANNOTATIONS_COMMON/libs.kotlinTestAnnotationsCommon/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_TEST_COMMON/libs.kotlinTestCommon/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_TEST_JUNIT/libs.kotlinTestJunit/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_TEST_JS/libs.kotlinTestJs/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_TEST/libs.kotlinTest/"
find -iname build.gradle | xargs sed -i "s/KOTLIN_REFLECT/libs.kotlinReflect/"
find -iname build.gradle | xargs sed -i "s/KOTLINPOET_METADATA/libs.kotlinPoetMetadata/"
find -iname build.gradle | xargs sed -i "s/KOTLINPOET_METADATA_SPECS/libs.kotlinPoetMetadataSpecs/"
find -iname build.gradle | xargs sed -i "s/KOTLINPOET_CLASSINSPECTOR_ELEMENTS/libs.kotlinPoetClassinspector/"
find -iname build.gradle | xargs sed -i "s/KOTLINPOET/libs.kotlinPoet/"
find -iname build.gradle | xargs sed -i "s/LEAKCANARY_INSTRUMENTATION/libs.leakcanaryInstrumentation/"
find -iname build.gradle | xargs sed -i "s/LEAKCANARY/libs.leakcanary/"
find -iname build.gradle | xargs sed -i "s/MATERIAL/libs.material/"
find -iname build.gradle | xargs sed -i "s/MOCKITO_CORE/libs.mockitoCore/"
find -iname build.gradle | xargs sed -i "s/MOCKITO_ANDROID/libs.mockitoAndroid/"
find -iname build.gradle | xargs sed -i "s/MOCKITO_KOTLIN/libs.mockitoKotlin/"
find -iname build.gradle | xargs sed -i "s/MULTIDEX/libs.multidex/"
find -iname build.gradle | xargs sed -i "s/NULLAWAY/libs.nullaway/"
find -iname build.gradle | xargs sed -i "s/OKHTTP_MOCKWEBSERVER/libs.okhttpMockwebserver/"
find -iname build.gradle | xargs sed -i "s/PLAY_CORE/libs.playCore/"
find -iname build.gradle | xargs sed -i "s/PLAY_SERVICES_BASE/libs.playServicesBase/"
find -iname build.gradle | xargs sed -i "s/PROTOBUF_COMPILER/libs.protobufCompiler/"
find -iname build.gradle | xargs sed -i "s/PROTOBUF_LITE/libs.protobufLite/"
find -iname build.gradle | xargs sed -i "s/PROTOBUF/libs.protobuf/"
find -iname build.gradle | xargs sed -i "s/REACTIVE_STREAMS/libs.reactiveStreams/"
find -iname build.gradle | xargs sed -i "s/RETROFIT/libs.retrofit/"
find -iname build.gradle | xargs sed -i "s/ROBOLECTRIC/libs.robolectric/"
find -iname build.gradle | xargs sed -i "s/RX_JAVA3/libs.rxjava3/"
find -iname build.gradle | xargs sed -i "s/RX_JAVA/libs.rxjava2/"
find -iname build.gradle | xargs sed -i "s/SQLDELIGHT_ANDROID/libs.sqldelightAndroid/"
find -iname build.gradle | xargs sed -i "s/SQLDELIGHT_COROUTINES_EXT/libs.sqldelightCoroutinesExt/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_CORE/libs.testCore/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_EXT_JUNIT/libs.testExtJunit/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_EXT_KTX/libs.testExtJunitKtx/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_EXT_TRUTH/libs.testExtTruth/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_MONITOR/libs.testMonitor/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_RULES/libs.testRules/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_RUNNER/libs.testRunner/"
find -iname build.gradle | xargs sed -i "s/ANDROIDX_TEST_UIAUTOMATOR/libs.testUiautomator/"
find -iname build.gradle | xargs sed -i "s/TRUTH/libs.truth/"
find -iname build.gradle | xargs sed -i "s/VIEW_BINDING/libs.viewBinding/"
find -iname build.gradle | xargs sed -i "s/WIRE_RUNTIME/libs.wireRuntime/"
find -iname build.gradle | xargs sed -i "s/XERIAL/libs.xerial/"
find -iname build.gradle | xargs sed -i "s/XPP3/libs.xpp3/"
find -iname build.gradle | xargs sed -i "s/XMLPULL/libs.xmlpull/"
find -iname build.gradle | xargs sed -i "s/JUNIT/libs.junit/"
find -iname build.gradle | xargs sed -i "s/GUAVA/libs.guava/"
|
#!/bin/bash
# Name of Schema
SCHEMA_NAME=ILA
# Name of default workspace that applications are associated with
APEX_WORKSPACE=ILA
# Comma delimited list of APEX Applications to export. Ex: 100,200
APEX_APP_IDS=123
# File extensions
# Will be used throughought the scripts to generate lists of packages, views, etc from the filesystem
EXT_PACKAGE_SPEC=pks
EXT_PACKAGE_BODY=pkb
EXT_VIEW=sql
# File Mappings
# This will be used in VSCode to allow for quick generate of a given file based on template data
# Format:
# <name>:<template_file prefix (no extension)>:<file extensions (; delimited)>:<destination directory>
#
# Definitions:
# - name: Name that will be mapped to VSCode task
# - template file: Template file prefix to use (no extension)
# - file extensions: ";" delimited list of file extensions to reference each template file
# - destination directory: where to store the new file
OBJECT_FILE_TEMPLATE_MAP=""
OBJECT_FILE_TEMPLATE_MAP="$OBJECT_FILE_TEMPLATE_MAP,package:templates/template_pkg:$EXT_PACKAGE_SPEC;$EXT_PACKAGE_BODY:packages"
OBJECT_FILE_TEMPLATE_MAP="$OBJECT_FILE_TEMPLATE_MAP,view:templates/template_view:$EXT_VIEW:views"
OBJECT_FILE_TEMPLATE_MAP="$OBJECT_FILE_TEMPLATE_MAP,data_array:templates/template_data_array:sql:data"
OBJECT_FILE_TEMPLATE_MAP="$OBJECT_FILE_TEMPLATE_MAP,data_json:templates/template_data_json:sql:data" |
#!/bin/bash
if [ "$(id -u)" != "0" ]; then
echo “This script must be run as root” 2>&1
exit 1
fi
rm -rf /usr/share/4gindicator
rm -f /usr/share/applications/4gindicator.desktop
rm -f /etc/xdg/autostart/4gindicator.desktop
rm -f /usr/local/bin/4gindicator
|
#!/bin/bash
GLE_DIR=$1
if [ "$#" -ne 1 ]; then
echo "Illegal number of parameters: need GLE src path"
exit 1
fi
# how to add a new version:
# make sure GLE repo is available.
# 1. Add 2 lines in this file, section 2.
# 2. Change pom.xml with the new version
#######################################
# 1. create Driver.java begin section #
#######################################
rm -rf com/tigergraph/*
mkdir -p com/tigergraph/client
cat <<EOT >> com/tigergraph/client/Driver.java
package com.tigergraph.client;
import java.io.OutputStream;
public class Driver {
public static void main(String[] args) {
String Gsql_Client_Version= System.getenv("GSQL_CLIENT_VERSION");
if (Gsql_Client_Version == null) {
Gsql_Client_Version = System.getProperty("GSQL_CLIENT_VERSION");
}
if (Gsql_Client_Version == null) {
Gsql_Client_Version = "";
}
// do two loops: 1st to try the given Gsql_Client_Version;
// 2nd is to try each one except the given Gsql_Client_Version
String Supported_Versions="";
for (int i = 1; i <= 2; i++) {
if ( i==2) {
System.out.println( "Supported Versions ( " + Supported_Versions +")");
System.out.println( "You may use 'GSQL_CLIENT_VERSION=v? java ...' or \n 'java -DGSQL_CLIENT_VERSION=v? ...' to specify the version");
}
EOT
########################################################################
# 2. add each version. Make sure we can CD to the first GLE directory. #
########################################################################
cd $GLE_DIR; git pull && git checkout tg_2.6.0_dev; cd -
# client path branch/tag version_string is_tag?
./new_gsql_version.sh $GLE_DIR tg_2.6.0_dev v2_6_0
./new_gsql_version.sh $GLE_DIR tg_2.5.2_dev v2_5_2
./new_gsql_version.sh $GLE_DIR tg_2.5.0_dev v2_5_0
./new_gsql_version.sh $GLE_DIR tg_2.4.1_dev v2_4_1
./new_gsql_version.sh $GLE_DIR tg_2.4.0_dev v2_4_0
./new_gsql_version.sh $GLE_DIR tg_2.3.2_dev v2_3_2
######################################
# 3. finish Driver.java endi section #
######################################
cat <<EOT >> com/tigergraph/client/Driver.java
}
} // end main
}
EOT
# Continue with " mvn package "
|
#!/bin/sh
CUR_PATH=`pwd`
cd third_party
sh -x build.sh && \
cd $CUR_PATH && \
mkdir -p build && \
mkdir -p output && \
cd build && \
cmake -DCMAKE_INSTALL_PREFIX=$CUR_PATH/output .. && \
make -j 4 && \
make install && \
exit 0
exit 1
|
#!/bin/bash
# shellcheck disable=SC2059,SC2230
# makes windows work with bazel's prefix for targets
export MSYS2_ARG_CONV_EXCL="//"
fmt="\nBuilding with %s\n"
if [ "$TRAVIS_OS_NAME" = "windows" ]; then
CCWARN=('--copt=-Wall')
else
CCWARN=('--copt=-Wall' '--copt=-Wextra' '--copt=-Werror')
fi
set -e
printf "$fmt" "Uncompressed tables and 128-bit types allowed"
bazel build "${CCWARN[@]}" //doubleback/...
printf "$fmt" "64-bit only, 128-bit types not allowed"
bazel build "${CCWARN[@]}" --copt=-DRYU_ONLY_64_BIT_OPS //doubleback/...
printf "$fmt" "Compressed tables"
bazel build "${CCWARN[@]}" --copt=-DRYU_OPTIMIZE_SIZE //doubleback/...
printf "$fmt" "Compressed tables, 64-bit only, 128-bit types not allowed"
bazel build "${CCWARN[@]}" --copt=-DRYU_OPTIMIZE_SIZE --copt=-DRYU_ONLY_64_BIT_OPS //doubleback/...
printf "$fmt" "64-bit only (no 128-bit) and optimize for 32-bit platform"
bazel build "${CCWARN[@]}" --copt=-DRYU_ONLY_64_BIT_OPS --copt=-DRYU_32_BIT_PLATFORM //doubleback/...
if [ "$TRAVIS_OS_NAME" = "windows" ]; then
printf "Skipping static analysis on windows.\n"
exit 0
fi
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
# note: if you want to enable osx, add llvm to the homebrew package list in .travis.yml
# and add /usr/local/opt/llvm/bin to PATH.
printf "Skipping static analysis on osx.\n"
exit 0
fi
CC=gcc
CCOPTS=('-Wall' '-Wextra' '-Werror' '-std=c99')
CHECK="scan-build"
if [ -n "$(which $CHECK)" ]; then
"$CHECK" "$CC" "${CCOPTS[@]}" -I . -c doubleback/dfmt.c -o dfmt.o
"$CHECK" "$CC" "${CCOPTS[@]}" -I . -c doubleback/dparse.c -o dparse.o
rm dfmt.o dparse.o
else
printf "Skipping static analysis, %s not installed.\n" "$CHECK"
fi
printf "Finished build.\n"
|
set -e
### Part of automated testing: only required when this script is called via vm run-command invoke inorder to gather the parameters ###
for args in "$@"
do
keyname=$(echo $args | cut -d ':' -f 1)
result=$(echo $args | cut -d ':' -f 2)
export $keyname=$result
done
# login using the user assigned identity.
az login --identity -u /subscriptions/$SUBSCRIPTION/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.ManagedIdentity/userAssignedIdentities/$IDENTITY_NAME
az account set --subscription $SUBSCRIPTION
az configure --defaults group=$RESOURCE_GROUP workspace=$WORKSPACE location=$LOCATION
# <build_image>
# Navigate to the samples
cd /home/samples/azureml-examples/cli/$ENV_DIR_PATH
# login to acr. Optionally, to avoid using sudo, complete the docker post install steps: https://docs.docker.com/engine/install/linux-postinstall/
sudo az acr login -n $ACR_NAME
# Build the docker image with the sample docker file
sudo docker build -t $ACR_NAME.azurecr.io/repo/$IMAGE_NAME:v1 .
# push the image to the ACR
sudo docker push $ACR_NAME.azurecr.io/repo/$IMAGE_NAME:v1
# check if the image exists in acr
az acr repository show -n $ACR_NAME --repository repo/$IMAGE_NAME
# </build_image> |
#!/bin/bash
sudo mkdir /var/www/proak-dota2
sudo chmod -R 755 /var/www/proak-dota2
cd /var/www/proak-dota2
sudo npm install
sudo service dota2 restart
|
#!/bin/bash
# linux-install-scripts
#
# Copyright 2020-2021 Mark Stephen Sobkow [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
sudo apt-get install git aptitude ssh zip unzip bzip2 gzip unrar lzip lrzip p7zip-rar rar dpkg apt libc-bin bash findutils gnupg2 gnupg-agent vim-common manpages texlive-binaries docbook-xsl-doc-html docbook-xsl-doc-pdf docbook-xsl-doc-text docbook-xsl-saxon aspell-doc spellutils exfat-utils f2fs-tools npm setserial ir-keytable cifs-utils tcl-tclreadline psutils sysstat memstat samba smbclient ssh-askpass csh dnsutils
|
#!/usr/bin/env bash
set -e
PKGS=$(go list ./... | grep -v '/vendor')
set -e
echo "mode: atomic" > coverage.txt
for pkg in ${PKGS[@]}; do
go test -v -timeout 30m -race -coverprofile=profile.out -covermode=atomic "$pkg"
if [ -f profile.out ]; then
tail -n +2 profile.out >> coverage.txt;
rm profile.out
fi
done
|
if [ $UID -ne 0 ]; then
sudo ../tomcat/bin/startup.sh
exit
fi
exec ../tomcat/bin/startup.sh
|
#!/bin/sh
#
# This file is part of LXE project. See LICENSE file for licensing information.
(
PKG=protobuf
PKG_VERSION=${PROTOBUF_VER}
PKG_SUBDIR=${PKG}-${PKG_VERSION}
PKG_FILE=${PKG_SUBDIR}.tar.gz
PKG_URL="https://github.com/google/protobuf/releases/download/v${PKG_VERSION}/protobuf-cpp-${PKG_VERSION}.tar.gz"
PKG_DEPS="gcc zlib"
[ ! -z "${GCC_EXTRA_VER}" ] && PKG_DEPS="${PKG_DEPS} gcc-extra"
CheckPkgVersion
CheckSourcesAndDependencies
if IsBuildRequired
then
PrintSystemInfo
BeginOfPkgBuild
UnpackSources
cd "${PKG_SRC_DIR}"
cp -aT "${PKG_SUBDIR}" "${PKG_SUBDIR}-native-build"
# Native build
PKG_SUBDIR=${PKG}-${PKG_VERSION}-native-build
PrepareBuild
SetBuildFlags
SetSystemPath
UnsetCrossToolchainVariables
ConfigurePkg \
--prefix="${PREFIX}" \
--enable-static \
--disable-shared
BuildPkg -j ${JOBS}
cp -a "${BUILD_DIR}/${PKG_SUBDIR}/src/protoc" "${PREFIX}/bin/"
CleanPkgBuildDir
CleanPkgSrcDir
# End of native build
PKG_SUBDIR=${PKG}-${PKG_VERSION}
PrepareBuild
SetBuildFlags "${GCC_EXTRA_VER}"
UpdateGCCSymlinks "${GCC_EXTRA_VER}"
SetCrossToolchainVariables "${GCC_EXTRA_VER}"
SetCrossToolchainPath
if IsVer1GreaterOrEqualToVer2 "${LIBTOOL_VER}" "2.4.2"
then
cd "${PKG_SRC_DIR}/${PKG_SUBDIR}"
./autogen.sh &>> "${LOG_DIR}/${PKG_SUBDIR}/configure.log"
CheckFail "${LOG_DIR}/${PKG_SUBDIR}/configure.log"
fi
ConfigureAutotoolsProject \
--with-zlib
BuildPkg -j ${JOBS}
InstallPkg install
CleanPkgBuildDir
CleanPkgSrcDir
UpdateGCCSymlinks
fi
)
|
#!/bin/bash -eu
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Run the Closure compiler on GameBuilder Javascript library to produce a final
# compiled version.
readonly GAMEBUILDER="${GAMEBUILDER:-$(pwd)}"
readonly CLOSURE_LIBRARY="${GAMEBUILDER}/third_party/closure-library"
readonly CLOSURE_BUILDER="${CLOSURE_LIBRARY}/closure/bin/build/closurebuilder.py"
readonly CLOSURE_COMPILER="${GAMEBUILDER}/third_party/closure-compiler"
readonly CLOSURE_COMPILER_JAR="${CLOSURE_COMPILER}/compiler.jar"
# Externally-passed in flags via environment.
readonly COMPILER_FLAGS="${COMPILER_FLAGS:-}"
err_exit() {
echo -e "$(basename $0): $@" >&2
exit 1
}
notfound_err_exit() {
err_exit "$1 not found; run\ntools/setup_workspace.sh"
}
if [ ! -d ${CLOSURE_LIBRARY} ]; then
notfound_err_exit "directory ${CLOSURE_LIBRARY}"
elif [ ! -x "${CLOSURE_BUILDER}" ]; then
notfound_err_exit "${CLOSURE_BUILDER}"
elif [ ! -d "${CLOSURE_COMPILER}" ]; then
notfound_err_exit "directory ${CLOSURE_COMPILER}"
elif [ ! -f "${CLOSURE_COMPILER_JAR}" ]; then
notfound_err_exit "Closure compiler jar ${CLOSURE_COMPILER_JAR}"
fi
jscomp_error_flags() {
readonly ERROR_CLASSES="\
accessControls ambiguousFunctionDecl checkRegExp checkTypes checkVars \
constantProperty deprecated es5Strict externsValidation fileoverviewTags \
globalThis internetExplorerChecks invalidCasts missingProperties \
nonStandardJsDocs strictModuleDepCheck typeInvalidation undefinedVars \
unknownDefines uselessCode visibility"
for class in $ERROR_CLASSES; do
echo "-f" "--jscomp_error=$class"
done
}
readonly COMPILER_DBG="\
-o script \
-f --compilation_level=WHITESPACE_ONLY \
-f --debug \
-f --formatting=PRETTY_PRINT \
"
readonly COMPILER_OPT1="\
-o compiled \
-f --compilation_level=SIMPLE_OPTIMIZATIONS \
-f --formatting=PRETTY_PRINT \
"
readonly COMPILER_OPT2="\
-o compiled \
-f --compilation_level=ADVANCED_OPTIMIZATIONS \
"
COMPILER_DBG_OR_OPT="${COMPILER_OPT2}"
while getopts 'dO:' OPTION "${COMPILER_FLAGS}"; do
case $OPTION in
d) COMPILER_DBG_OR_OPT="${COMPILER_DBG}"
;;
O) if [ ${OPTARG} -eq 1 ]; then
COMPILER_DBG_OR_OPT="${COMPILER_OPT1}"
elif [ ${OPTARG} -eq 2 ]; then
COMPILER_DBG_OR_OPT="${COMPILER_OPT2}"
else
err_exit "invalid flag value -O ${OPTARG} (allowed: 1, 2)"
fi
;;
*) exit 1
;;
esac
done
${CLOSURE_BUILDER} \
--compiler_jar=${CLOSURE_COMPILER_JAR} \
--root=${CLOSURE_LIBRARY} \
--root=${GAMEBUILDER}/src/js \
"$@" \
${COMPILER_DBG_OR_OPT} \
-f "--generate_exports" \
-f "--warning_level=VERBOSE" \
$(jscomp_error_flags)
|
#!/usr/bin/env bash
test "the compilation of shared files built with different flags" \
"cp ../assets/shared_file.f.mk Config.mk" \
"cp ../assets/say_hello.f hello1.f" \
"cp ../assets/say_hello.f hello2.f" \
"cp ../assets/say_hello.f hello3.f" \
"cp ../assets/greetings.f ." \
"$MAKE standard" \
"$MAKE | grep greetings.o | wc -l" \
should_output "3"
test "the recompilation of a modified program that uses shared files" \
"cp ../assets/shared_file.f.mk Config.mk" \
"cp ../assets/say_hello.f hello1.f" \
"cp ../assets/say_hello.f hello2.f" \
"cp ../assets/say_hello.f hello3.f" \
"cp ../assets/greetings.f ." \
"$MAKE standard" \
"$MAKE" \
"touch src/hello1.f" \
"$MAKE | grep \"is up to date\" | wc -l >> results.txt" \
"touch src/hello2.f" \
"$MAKE | grep \"is up to date\" | wc -l >> results.txt" \
"touch src/hello3.f" \
"$MAKE | grep \"is up to date\" | wc -l >> results.txt" \
"cat results.txt" \
should_output "2\n2\n2"
test "the recompilation of a modified shared file" \
"cp ../assets/shared_file.f.mk Config.mk" \
"cp ../assets/say_hello.f hello1.f" \
"cp ../assets/say_hello.f hello2.f" \
"cp ../assets/say_hello.f hello3.f" \
"cp ../assets/greetings.f ." \
"$MAKE standard" \
"$MAKE" \
"touch src/greetings.f" \
"$MAKE | grep \"greetings.o\" | wc -l" \
should_output "3"
test "that there is no unnecessary recompilations when using shared files" \
"cp ../assets/shared_file.f.mk Config.mk" \
"cp ../assets/say_hello.f hello1.f" \
"cp ../assets/say_hello.f hello2.f" \
"cp ../assets/say_hello.f hello3.f" \
"cp ../assets/greetings.f ." \
"$MAKE standard" \
"$MAKE" \
"$MAKE | grep \"is up to date\" | wc -l" \
should_output "3"
test "that there is a recompilation caused by the use of new flags when using shared files" \
"cp ../assets/shared_file.f.mk Config.mk" \
"cp ../assets/say_hello.f hello1.f" \
"cp ../assets/say_hello.f hello2.f" \
"cp ../assets/say_hello.f hello3.f" \
"cp ../assets/greetings.f ." \
"$MAKE standard" \
"$MAKE" \
"$MAKE CPPFLAGS=-Dbla | grep \"is up to date\" | wc -l >> results.txt" \
"$MAKE" \
"$MAKE CFLAGS=-Wall | grep \"is up to date\" | wc -l >> results.txt" \
"$MAKE" \
"$MAKE CXXFLAGS=-Wall | grep \"is up to date\" | wc -l >> results.txt" \
"$MAKE" \
"$MAKE FFLAGS=-Wall | grep \"is up to date\" | wc -l >> results.txt" \
"$MAKE" \
"cat results.txt" \
should_output "0\n3\n3\n0"
|
alias ls='exa'
alias lsa='exa -lah'
alias eagle_eye='exa --tree --level=2'
# alias eagle_eye_n='exa --tree --level=$1' # TODO: fix the "$1" parameter, it doesn't get recognized. Maybe this helps: https://github.com/koalaman/shellcheck/wiki/SC2142
alias eagle_eye_long='exa --tree --level=2 --long'
# alias eagle_eye_long_n='exa --tree --level=$1 --long' TODO: fix the same error as 'eagle_eye_n'
|
# Autocompletion for kops (Kubernetes Operations),
# the command line interface to get a production grade
# Kubernetes cluster up and running
# Author: https://github.com/nmrony
if [ $commands[kops] ]; then
source <(kops completion zsh)
fi
|
#wget https://s3.amazonaws.com/nyc-tlc/trip+data/yellow_tripdata_2016-06.csv
echo Get the xdf RevoScaleR file in your browser: http://www.mediafire.com/file/q12fkyko059uies/yellow_tripdata_2016-06.xdf/file
|
#!/usr/bin/env bash
ROOT=$(cd $(dirname "${BASH_SOURCE[0]}") && cd "$(git rev-parse --show-toplevel)" && pwd)
source "${ROOT}/build-support/common.sh"
function usage() {
echo "Checks import sort order for python files, optionally fixing incorrect"
echo "sorts."
echo
echo "Usage: $0 (-h|-f)"
echo " -h print out this help message"
echo " -f instead of erroring on files with bad import sort order, fix"
echo " those files"
if (( $# > 0 )); then
die "$@"
else
exit 0
fi
}
isort_args=(
--check-only
)
while getopts "hf" opt
do
case ${opt} in
h) usage ;;
f) isort_args=() ;;
*) usage "Invalid option: -${OPTARG}" ;;
esac
done
REQUIREMENTS=(
"isort==3.9.5"
)
VENV_DIR="${ROOT}/build-support/isort.venv"
function fingerprint_data() {
openssl md5 | cut -d' ' -f2
}
function activate_venv() {
source "${VENV_DIR}/bin/activate"
}
function create_venv() {
rm -rf "${VENV_DIR}"
"${ROOT}/build-support/virtualenv" "${VENV_DIR}"
}
function activate_isort() {
for req in ${REQUIREMENTS[@]}
do
fingerprint="$(echo "${fingerprint}${req}" | fingerprint_data)"
done
bootsrapped_file="${VENV_DIR}/BOOTSTRAPPED.${fingerprint}"
if ! [ -f "${bootsrapped_file}" ]
then
create_venv || die "Failed to create venv."
activate_venv || die "Failed to activate venv."
for req in ${REQUIREMENTS[@]}
do
pip install --quiet ${req} || die "Failed to install requirements from ${req}."
done
touch "${bootsrapped_file}"
else
activate_venv || die "Failed to activate venv."
fi
}
activate_isort
isort ${isort_args[@]} --recursive src tests pants-plugins examples
|
#!/bin/bash
CYAN="\e[0;36m"
CLEAR="\e[0m"
SECTION_START="\e[0Ksection_start:the_time:section_id\r\e[0K${CYAN}section_header${CLEAR}"
SECTION_END="\e[0Ksection_end:the_time:section_id\r\e[0K"
section_start() {
local section_header section_id start
start="$SECTION_START"
if [[ "$#" -eq 1 ]]; then
section_header="$1"
section_id="$(echo "$1" | tr -c '[:alnum:]\n\r' '_')"
elif [[ "$#" -eq 2 ]]; then
if [[ "$2" =~ -{0,2}collapsed ]]; then
start="${start/section_id/section_id[collapsed=true]}"
section_header="$1"
section_id="$(echo "$1" | tr -c '[:alnum:]\n\r' '_')"
else
section_header="$2"
section_id="$1"
fi
elif [[ "$#" -eq 3 && "$3" =~ /^-{0,2}collapsed$/ ]]; then
start="${start/section_id/section_id[collapsed=true]}"
section_header="$2"
section_id="$1"
else
echo "section_start should be called with 1-3 args but it was called with $#"
echo "acceptable usages:"
echo " 1. section_start \"<section-start-id>\" \"<section-header>\""
echo " 2. section_start \"<section-header>\""
echo " 3. section_start \"<section-start-id>\" \"<section-header>\" --collapse"
echo " 4. section_start \"<section-header>\" --collapse"
echo "where <section-start-id> is only alphanumeric characters and underscore and"
echo "--collapse indicates that you would like those log steps to be collapsed in the job log output by default"
exit 9
fi
start_time=$(date +%s)
start="$(echo "$start" | sed -e "s/the_time/$start_time/" -e "s/section_id/$section_id/" -e "s/section_header/$section_header/")"
echo -e "$start"
date +"[%Y-%m-%dT%H:%M:%S.%3N] section start"
}
section_end() {
local section_id end
date +"[%Y-%m-%dT%H:%M:%S.%3N] section end"
end="$SECTION_END"
if [[ "$#" -eq 1 ]]; then
section_id="$(echo "$1" | tr -c '[:alnum:]\n\r' '_')"
else
echo "section_end should be called with 1 arg but it was called with $#"
echo "acceptable usage:"
echo " 1. section_end \"<section-start-id>\""
echo " 2. section_start \"<section-header>\""
echo "where <section-start-id> or <section-header> is that of the section this marks the end of"
exit 9
fi
end_time=$(date +%s)
end="$(echo "$end" | sed -e "s/the_time/$end_time/" -e "s/section_id/$section_id/")"
echo -e "$end"
}
|
#!/bin/bash
SPARK_VERSION='spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz'
# 'eth0:1' is the private IP network interface on Linode.
# Change this if deploying on a different machine or cloud.
BIND_TO_NETWORK_INTERFACE='eth0:1'
# Prepare the system to run this script.
init() {
apt-get -y update
apt-get -y install tmux jq curl wget tar bc
mkdir -p /root/spark
mkdir -p /root/spark/data
mkdir -p /root/spark/data/historydata
mkdir -p /root/spark/data/targetdata
mkdir -p /root/spark/data/spark-events
mkdir -p /root/spark/data/spark-csv
}
install_master() {
install_master_node_prerequisites
install_recommender_app
install_spark "/root/spark/stockspark"
# Since master script will requires non-interactive ssh access to slaves when job is started,
# we'll create a private key here.
if [ ! -f /root/.ssh/id_rsa ]; then
ssh-keygen -t rsa -b 4096 -N "" -f /root/.ssh/id_rsa
fi
}
install_master_node_prerequisites() {
# Install sbt repo
echo "deb https://dl.bintray.com/sbt/debian /" | tee /etc/apt/sources.list.d/sbt.list
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2EE0EA64E40A89B84B2DF73499E82A75642AC823
apt-get -y update
apt-get -y install openjdk-8-jre-headless dstat python3 python3-pip git
# Create Python environment for recommender app.
pip3 install google-api-python-client beautifulsoup4 feedparser PyYAML requests
apt-get -y install sbt
}
# $1 -> Target installation directory where Spark will be installed
install_spark() {
if [ -z "$1" ]; then
echo "Error: Missing target directory"
return 1
fi
local target_dir="$1"
mkdir -p "$target_dir"
# Get the Apache mirror path.
cd "$target_dir"
local mirror_info=$(curl "https://www.apache.org/dyn/closer.lua/spark/$SPARK_VERSION?as_json=1")
local spark_url="$(echo "$mirror_info" | jq --raw-output '.preferred')"
local spark_path="$(echo "$mirror_info" | jq --raw-output '.path_info')"
spark_url="$spark_url$spark_path"
echo "Downloading: $spark_url"
wget -O spark.tgz "$spark_url"
tar -xzv -f spark.tgz
local archive_root_dir="$(tar -tzf spark.tgz|head -1|sed 's|/.*||')"
local installed_dir="$(echo "$target_dir/$archive_root_dir"|tr -s '/')"
cp "/root/spark/recommender/deploy/spark-defaults.conf" "$archive_root_dir/conf/"
cp "/root/spark/recommender/deploy/metrics.properties" "$archive_root_dir/conf/"
configure_spark_memory "$installed_dir"
echo "Spark installed in: $installed_dir"
}
# $1 -> Spark installation directory.
configure_spark_memory() {
# For cluster mode, the settings will go into conf/spark-defaults.conf and
# conf/spark-env.sh.
# In cluster mode, there are 4 processes running on master node:
# 1) The Master daemon
# 2) The Worker daemon
# 3) The Executor process
# 4) The Driver process
#
# - use SPARK_DAEMON_MEMORY to set Xmx for master daemon
# - the same SPARK_DAEMON_MEMORY sets Xmx for worker daemon.
# - use SPARK_WORKER_MEMORY to set maximum memory across all executors. In our case, there's just 1 executor.
# - use SPARK_EXECUTOR_MEMORY or "spark.executor.memory" to set Xmx for executor process.
# - use "--driver-memory" or "spark.driver.memory" to set Xmx for driver process.
#
# Master and Worker daemons are only for job management, resource allocation, etc. So they don't need high Xmx.
# Executor does all the computation tasks; it should have high Xmx.
# But specifically for our LDA app, there is a resource-heavy collect in the driver process. So driver process
# too should have high Xmx.
# The split will be
# 1GB for Master Daemon,
# 1GB for Worker daemon,
# 8GB for other OS processes, NFS and caches,
# (RAM-10)/2 each for executor
# (RAM-10)/2 for driver process.
local spark_dir="$1"
local system_ram_mb=$(grep MemTotal /proc/meminfo | awk '{print $2}' | xargs -I {} echo "{}/1024" | bc)
local other_mem_mb=8192
local master_mem_mb=1024
local worker_mem_mb=1024
local remaining_mem_mb=$(($system_ram_mb - $other_mem_mb - $master_mem_mb - $worker_mem_mb))
local executor_mem_mb=$(echo "scale=0;$remaining_mem_mb / 2" | bc)
local driver_mem_mb=$(echo "scale=0;$remaining_mem_mb / 2" | bc)
local env_file="$spark_dir/conf/spark-env.sh"
cp "$spark_dir/conf/spark-env.sh.template" "$env_file"
echo "export SPARK_DAEMON_MEMORY=$master_mem_mb"M >> "$env_file"
echo "export SPARK_WORKER_MEMORY=$executor_mem_mb"M >> "$env_file"
echo "export SPARK_EXECUTOR_MEMORY=$executor_mem_mb"M >> "$env_file"
echo "export SPARK_DRIVER_MEMORY=$driver_mem_mb"M >> "$env_file"
}
install_recommender_app() {
git clone https://github.com/pathbreak/content-recommender-spark-lda /root/spark/recommender
chmod +x /root/spark/recommender/app/recommender_app.py
sed -i 's|^HISTORY_DIR.*$|HISTORY_DIR: /root/spark/data/historydata|' /root/spark/recommender/app/conf/conf.yml
sed -i 's|^TARGET_DIR.*$|TARGET_DIR: /root/spark/data/targetdata|' /root/spark/recommender/app/conf/conf.yml
# Build the LDA spark driver JAR.
cd /root/spark/recommender/spark
sbt compile
sbt assembly
cp target/scala-2.11/lda-prototype.jar /root/spark/
}
# Runs the LDA job in local (ie, non-cluster) mode on the master itself.
# $1 -> The directory where a spark installation exists to use for running this spark job.
# $2 -> Training data directory (under /root/spark/data/historydata/)
# $3 -> Targets data directory (under /root/spark/data/targetdata)
# $4 -> Number of topics (k)
# $5 -> Number of iterations
# $6 -> Algorithm to use. "online"|"em"
# $7 -> Path of a customs stop word list file
run_lda_local() {
local spark_dir="$1"
if [ ! -f "$spark_dir/bin/spark-submit" ]; then
echo "Error: $spark_dir does not seem to be a Spark installation."
return 1
fi
# Runs the LDA spark app in local execution mode on the master node.
# The important settings are:
# --driver-memory MEM : Sets maximum heap space -Xmx to MEM
# --conf spark.driver.maxResultSize=SIZE: Some of the results like collect/take result in massive
# results that exceed the default 1G size.
local system_ram_mb=$(grep MemTotal /proc/meminfo | awk '{print $2}' | xargs -I {} echo "{}/1024" | bc)
# Set driver max heap space to 70% of system_ram_mb. For bc to give integer results,
# the operation has to be a division.
local driver_max_heap_mb=$(echo "scale=0;$system_ram_mb * 7/10" | bc)
local max_result_size_mb=$(echo "scale=0;$driver_max_heap_mb * 1/2" | bc)
local run_dir="/root/spark/data/master-$(date +%Y-%m-%d-%H-%M-%S)"
start_system_metrics "$run_dir"
"$spark_dir/bin/spark-submit" --driver-memory "$driver_max_heap_mb"M \
--conf spark.driver.maxResultSize="$max_result_size_mb"M \
/root/spark/lda-prototype.jar \
"$2" "$3" "$4" "$5" "$6" 2>&1 | tee -a "$run_dir/stdlogs"
# Wait for sometime before stopping metrics collection, because memory and disk
# cleanup take some time.
sleep 15
stop_system_metrics
}
# Starts the Spark master and a slave daemon on this machine's private IP address.
# $1 -> The directory where a spark installation exists.
start_cluster() {
local spark_dir="$1"
if [ ! -f "$spark_dir/sbin/start-master.sh" ]; then
echo "Error: $spark_dir does not seem to be a Spark installation."
return 1
fi
# Master daemon uses SPARK_LOCAL_IP only for port 8080 (WebUI),
# and --host for ports 6066 (REST endpoint) and 7077 (service)
local private_ip=$(ip addr | grep "$BIND_TO_NETWORK_INTERFACE"$ | awk '{print $2}'|tr '/' ' ' | awk '{print $1}')
SPARK_LOCAL_IP=$private_ip SPARK_PUBLIC_DNS=$private_ip \
"$spark_dir/sbin/start-master.sh" \
"--host $private_ip"
sleep 10
SPARK_LOCAL_IP=$private_ip SPARK_PUBLIC_DNS=$private_ip
"$spark_dir/sbin/start-slave.sh" \
"--host $private_ip" "spark://$private_ip:7077"
}
# Stops the Spark master and slave daemons on this machine.
# $1 -> The directory where a spark installation exists.
stop_cluster() {
local spark_dir="$1"
if [ ! -f "$spark_dir/sbin/stop-master.sh" ]; then
echo "Error: $spark_dir does not seem to be a Spark installation."
return 1
fi
"$spark_dir/sbin/stop-slave.sh"
"$spark_dir/sbin/stop-master.sh"
}
# Runs the LDA job in cluster mode executing tasks across all worker nodes in the cluster.
# $1 -> The directory where a spark installation exists to use for running this spark job.
# $2 -> Training data directory (under /root/spark/data/historydata/)
# $3 -> Targets data directory (under /root/spark/data/targetdata)
# $4 -> Number of topics (k)
# $5 -> Number of iterations
# $6 -> Algorithm to use. "online"|"em"
# $7 -> Path of a customs stop word list file
run_lda_cluster() {
local spark_dir="$1"
if [ ! -f "$spark_dir/bin/spark-submit" ]; then
echo "Error: $spark_dir does not seem to be a Spark installation."
return 1
fi
# Runs the LDA spark app in cluster execution mode on the master node.
# In cluster mode, all the memory settings are set via conf/spark-env.sh and conf/spark-defaults.conf
# Nothing needs to be set here.
local run_time=$(date +%Y-%m-%d-%H-%M-%S)
local run_dir="/root/spark/data/master-$run_time"
start_system_metrics "$run_dir"
start_system_metrics_on_slaves "$run_time"
local private_ip=$(ip addr | grep "$BIND_TO_NETWORK_INTERFACE"$ | awk '{print $2}'|tr '/' ' ' | awk '{print $1}')
"$spark_dir/bin/spark-submit" --master "spark://$private_ip:7077" \
/root/spark/lda-prototype.jar \
"$2" "$3" "$4" "$5" "$6" 2>&1 | tee -a "$run_dir/stdlogs"
# Wait for sometime before stopping metrics collection, because memory and disk
# cleanup take some time.
sleep 15
stop_system_metrics
stop_system_metrics_on_slaves
}
# Start system CPU and memory usage collection using dstat.
# $1 -> Output metrics to this directory
start_system_metrics() {
local report_dir="$1"
if [ -f "/root/.dstat_pid" ]; then
echo "Error: Reporting is already started. Stop it first using stop-metrics or kill dstat process and delete /root/.dstat_pid"
return 1
fi
# Since dstat appends a bunch of headers and newlines on every call by default, the CSV file becomes
# difficult to process. So prevent user from collecting to an existing file.
if [ -d "$report_dir" ]; then
echo "Error: Report directory already exists. Provide a different directory."
return 1
fi
mkdir -p "$report_dir"
# Find number of processors.
local num_cpus=$(cat /proc/cpuinfo | grep '^processor' | wc -l)
local cpu_ids="$(seq -s ',' 0 $((num_cpus - 1)))"
# dstat output columns are:
#--epoch--- -------cpu0-usage--------------cpu1-usage--------------cpu2-usage--------------cpu3-usage------- ------memory-usage-----
# epoch |usr sys idl wai hiq siq:usr sys idl wai hiq siq:usr sys idl wai hiq siq:usr sys idl wai hiq siq| used buff cach free
nohup dstat -T -c -C "$cpu_ids" -m --noheaders --output "$report_dir/dstat.csv" > /dev/null 2>&1 &
local dstat_pid=$!
echo "$dstat_pid" > "/root/.dstat_pid"
# Collect disk free metrics. This is because Spark consumes 10s of GBs of /tmp for shuffle operations.
nohup ./master.sh collect-df "$report_dir/df.csv" 5 > /dev/null 2>&1 &
local df_pid=$!
echo "$df_pid" > "/root/.df_pid"
echo "Started CPU, RAM, disk space collection to $report_dir"
return 0
}
stop_system_metrics() {
if [ -f "/root/.dstat_pid" ]; then
kill -9 "$(cat /root/.dstat_pid)"
if [ $? -eq 0 ]; then
echo "Stopped dstat metrics collection"
rm -f "/root/.dstat_pid"
else
echo "Unable to stop dstat metrics collection. Kill PID $(cat /root/.dstat_pid) manually."
fi
else
echo "Error: Does not look like dstat is running"
fi
if [ -f "/root/.df_pid" ]; then
kill -9 "$(cat /root/.df_pid)"
if [ $? -eq 0 ]; then
echo "Stopped df metrics collection"
rm -f "/root/.df_pid"
else
echo "Unable to stop df metrics collection. Kill PID $(cat /root/.df_pid) manually."
fi
else
echo "Error: Does not look like df is running"
fi
}
# $1 -> the run timestamp that master wants slaves to include in metrics directories.
start_system_metrics_on_slaves() {
while read slave_ip; do
echo "Starting metrics on $slave_ip"
local run_dir="/root/spark/data/slave-$slave_ip-$1"
ssh -i /root/.ssh/id_rsa "root@$slave_ip" /root/slave.sh start-metrics "$run_dir"
done < /root/slaves
}
stop_system_metrics_on_slaves() {
while read slave_ip; do
echo "Stopping metrics on $slave_ip"
ssh -i /root/.ssh/id_rsa "root@$slave_ip" /root/slave.sh stop-metrics
done < /root/slaves
}
# Periodically collects disk free stats for /dev/root
# $1 -> Report file
# $2 -> Interval between collections
collect_df() {
report_file=$1
interval=$2
while sleep "$interval"; do
echo "$(date +%s) $(df -h | grep /dev/root)" | awk '{printf "%s,%s,%s,%s\n",$1,$3,$4,$5}' >> "$report_file"
done
}
enable_nfs_sharing() {
apt-get -y install nfs-kernel-server
systemctl start nfs-kernel-server.service
}
disable_nfs_sharing() {
systemctl stop nfs-kernel-server.service
}
# Add a Spark slave as permitted NFS client. This is called by the slave itself
# when it's joining the cluster.
# $1 => The private IP address of client. Example: 192.168.11.239
add_slave() {
ssh-copy-id -i /root/.ssh/id_rsa "root@$1"
add_nfs_client "$1"
touch "/root/slaves"
grep "$1" "/root/slaves"
if [ $? -ne 0 ]; then
echo "$1" >> "/root/slaves"
fi
}
# Remove a Spark slave as permitted NFS client.
# $1 => The private IP address of client. Example: 192.168.11.239
remove_slave() {
remove_nfs_client "$1"
sed -i -r "/^$1$/ d" "/root/slaves"
}
# Add a Spark slave as permitted NFS client.
# $1 => The private IP address of client.
add_nfs_client() {
# /etc/exports allows the same directory to be repeated on multiple lines for different clients.
# This makes grepping and adding or replacing much easier compared to having all clients on a
# single line.
# The /17 subnet after slave's IP address is required.
local worker_ip="$1"
grep '/root/spark/data' /etc/exports | grep $worker_ip
if [ $? -ne 0 ]; then
echo "/root/spark/data $worker_ip/17(rw,sync,no_subtree_check,no_root_squash)" > /etc/exports
exportfs -a
fi
}
# Remove a Spark slave as permitted NFS client.
# $1 => The private IP address of client.
remove_nfs_client() {
# /etc/exports allows the same directory to be repeated on multiple lines for different clients.
# This makes grepping and adding or replacing much easier compared to having all clients on a
# single line.
# The /17 subnet after slave's IP address is required.
local worker_ip="$1"
grep "$worker_ip" /etc/exports
if [ $? -eq 0 ]; then
sed -i -r "\|/root/spark/data.+$worker_ip.*$| d" /etc/exports
exportfs -r -v
fi
}
# For Spark to be able to use native linear algebra libraries like OpenBLAS or ATLAS,
# it requires some additional JARs that are not packaged with it.
# This function installs them under SPARK_DIR/jars/
#
# $1 -> The Spark installation directory. It should have ./jars/ under it.
install_spark_native_stack() {
local spark_dir="$1"
if [ -z "$spark_dir" ]; then
echo "Error: Missing Spark installation directory"
return 1
fi
if [ ! -d "$spark_dir/jars" ]; then
echo "Error: $spark_dir does not seem to be a Spark installation"
return 1
fi
# To integrate with native stacks, we need these additional JARS under SPARK_DIR/jars/
# 1. com.github.fommil.netlib:native_system-java:1.1
# 2. com.github.fommil.netlib:netlib-native_system-linux-x86_64:1.1
# 3. com.github.fommil:jniloader:1.1
wget -P "$spark_dir/jars/" \
'http://repo1.maven.org/maven2/com/github/fommil/netlib/native_system-java/1.1/native_system-java-1.1.jar' \
'http://repo1.maven.org/maven2/com/github/fommil/netlib/netlib-native_system-linux-x86_64/1.1/netlib-native_system-linux-x86_64-1.1-natives.jar' \
'http://repo1.maven.org/maven2/com/github/fommil/jniloader/1.1/jniloader-1.1.jar'
}
case "$1" in
# Prepare the system to run this script.
init)
init
;;
install-master)
install_master
;;
install-prereqs)
install_master_node_prerequisites
;;
install-spark)
install_spark "$2"
;;
config-memory)
configure_spark_memory "$2"
;;
install-spark-native)
install_spark_native_stack "$2"
;;
run-local)
run_lda_local "${@:2}"
;;
start-cluster)
start_cluster "$2"
;;
stop-cluster)
stop_cluster "$2"
;;
add-slave)
add_slave "$2"
;;
remove-slave)
remove_slave "$2"
;;
run-cluster)
run_lda_cluster "${@:2}"
;;
start-metrics)
start_system_metrics "$2"
;;
stop-metrics)
stop_system_metrics
;;
collect-df)
collect_df "$2" "$3"
;;
enable-nfs)
enable_nfs_sharing
;;
disable-nfs)
disable_nfs_sharing
;;
*)
echo "Unknown command: $1"
;;
esac
|
#!/bin/bash
trap cleanup SIGINT
list_descendants ()
{
local pids=$(ps -o pid= -o ppid=)
local children
for i in $1
do
local tmp=$(echo "$pids" | awk -v i=$i '{if($2==i){print $1}}')
children=$children" "$tmp
done
for pid in $children
do
list_descendants "$pid"
done
echo "$children"
}
cleanup() {
echo "Killing all processes."
kill $(list_descendants $$) &> /dev/null
}
die() {
echo "$1" >&2
echo
cleanup
exit 1
}
REDIS_PORT=7777
WEB_ADDR=127.0.0.1
WEB_PORT=5000
LOG_LOCATION=$(pwd)
usage() {
echo "Usage: $0 [-r REDIS_PORT] [-a WEB_ADDRESS] [-p WEB_PORT] [-l LOG_PATH]" 1>&2;
echo "optional arguments: " 1>&2;
echo " -h show this help message and exit " 1>&2;
echo " -r REDIS_PORT " 1>&2;
echo " port to use for redis server (default: $REDIS_PORT) " 1>&2;
echo " -a WEB_ADDRESS " 1>&2;
echo " address to bind the web interface (default: $WEB_ADDR), but to run from other" 1>&2;
echo " computers over the network (under VPN) can be 0.0.0.0 *WARNING* " 1>&2;
echo " -p WEB_PORT " 1>&2;
echo " port used with web address (default: $WEB_PORT) " 1>&2;
echo " -l LOG_PATH " 1>&2;
echo " the directory path to write the logs (default: $LOG_LOCATION) " 1>&2;
echo " " 1>&2;
exit 1;
}
while getopts ":r:p:a:l:" o; do
case "${o}" in
r)
REDIS_PORT=${OPTARG}
;;
p)
WEB_PORT=${OPTARG}
;;
a)
WEB_ADDR=${OPTARG}
;;
l) LOG_LOCATION=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${REDIS_PORT}" ] || [ -z "${WEB_PORT}" ] || [ -z "${WEB_ADDR}" ]; then
usage
fi
REDIS_LOG="${LOG_LOCATION}/redis.log"
INTERARTIC_LOG="${LOG_LOCATION}/interartic.log"
CELERY_LOG="${LOG_LOCATION}/celery.log"
REALPATH=$(dirname "$0")
readlink "$0" && REALPATH=$(dirname "$(readlink "$0")")
ARCH=$(uname -m)
OS=$(uname -s)
if [ "${OS}" != "Darwin" ];
then
echo "This binary package is for Darwin (MacOS). You O/S is ${OS}. Trying to launch anyway - anticipating a crash!"
fi
if [[ ${ARCH} != "x86_64" && ${ARCH} != "aarch64" && ${ARCH} != "arm64" ]];
then
echo "Unsupported architecture ${ARCH}. Trying to launch anyway - anticipating a crash!"
fi
export PYTHONNOUSERSITE=1
unset PYTHONHOME
unset PYTHONPATH
cd "${REALPATH}"
echo "Starting redis server on port $REDIS_PORT. Log location: $REDIS_LOG"
( bin/redis-server --port ${REDIS_PORT} &> "${REDIS_LOG}" || die "Launching redis server on port $REDIS_PORT failed. See $REDIS_LOG" ) &
sleep 1
echo "Starting interartic on $WEB_ADDR:$WEB_PORT. Log location: $INTERARTIC_LOG"
( bin/python3.7 main.py ${REDIS_PORT} -a ${WEB_ADDR} -p ${WEB_PORT} &> "${INTERARTIC_LOG}" || die "Launching interartic on $WEB_ADDR:$WEB_PORT failed. See $INTERARTIC_LOG") &
sleep 1
echo "Starting celery. Log location: $CELERY_LOG"
( export PATH="$(pwd)/artic_bin/bin:$(pwd)/scripts:${PATH}"; export HDF5_PLUGIN_PATH="$(pwd)/lib/ont-vbz-hdf-plugin"; bin/python3.7m bin/celery worker -A main.celery -b redis://localhost:${REDIS_PORT}/0 --result-backend redis://localhost:${REDIS_PORT}/0 --concurrency=1 --loglevel=info &> "${CELERY_LOG}" || die "Launching celery failed. See ${CELERY_LOG}" ) &
sleep 1
echo ""
echo "InterARTIC is now running on your machine :)"
echo "To launch InterARTIC web interface visit http://127.0.0.1:${WEB_PORT} on your browser"
echo "To keep your InterARTIC active this terminal must remain open."
echo "To terminate InterARTIC type CTRL-C or close the terminal."
wait
|
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -ex
cd $(dirname $0)
unzip -o "$EXTERNAL_GIT_ROOT/input_artifacts/csharp_nugets.zip" -d TestNugetFeed
./update_version.sh auto
cd DistribTest
# TODO(jtattermusch): make sure we don't pollute the global nuget cache with
# the nugets being tested.
dotnet restore
dotnet build
dotnet publish
# .NET 4.5 target after dotnet build
mono bin/Debug/net45/*-x64/DistribTest.exe
# .NET 4.5 target after dotnet publish
mono bin/Debug/net45/*-x64/publish/DistribTest.exe
# .NET Core target after dotnet build
dotnet exec bin/Debug/netcoreapp1.0/DistribTest.dll
# .NET Core target after dotnet publish
dotnet exec bin/Debug/netcoreapp1.0/publish/DistribTest.dll
|
#!/bin/bash
#bl2 file size 41K, bl21 file size 3K (file size not equal runtime size)
#total 44K
#after encrypt process, bl2 add 4K header, cut off 4K tail
#bl30 limit 41K
#bl301 limit 12K
#bl2 limit 41K
#bl21 limit 3K, but encrypt tool need 48K bl2.bin, so fix to 7168byte.
#$7:name flag
if [ "$7" = "bl30" ]; then
declare blx_bin_limit=40960
declare blx01_bin_limit=13312
elif [ "$7" = "bl2" ]; then
declare blx_bin_limit=57344
declare blx01_bin_limit=4096
else
echo "blx_fix name flag not supported!"
exit 1
fi
# blx_size: blx.bin size, zero_size: fill with zeros
declare -i blx_size=`du -b $1 | awk '{print int($1)}'`
declare -i zero_size=$blx_bin_limit-$blx_size
dd if=/dev/zero of=$2 bs=1 count=$zero_size
cat $1 $2 > $3
rm $2
declare -i blx01_size=`du -b $4 | awk '{print int($1)}'`
declare -i zero_size_01=$blx01_bin_limit-$blx01_size
dd if=/dev/zero of=$2 bs=1 count=$zero_size_01
cat $4 $2 > $5
cat $3 $5 > $6
rm $2
exit 0
|
#!/bin/bash -e
source "/etc/openshift/node.conf"
source ${CARTRIDGE_BASE_PATH}/abstract/info/lib/util
# Import Environment Variables
for f in ~/.env/*
do
. $f
done
cartridge_type="jenkins-1.4"
cartridge_dir=$OPENSHIFT_HOMEDIR/$cartridge_type
translate_env_vars
if ! [ $# -eq 1 ]
then
echo "Usage: \$0 [start|restart|graceful|graceful-stop|stop]"
exit 1
fi
validate_run_as_user
. app_ctl_pre.sh
isrunning() {
# Check for running app
pid=`pgrep -f ".*java.*-jar.*jenkins.war.*--httpListenAddress=${OPENSHIFT_INTERNAL_IP}.*" 2> /dev/null`
if [ -n "$pid" ]
then
return 0
fi
# not running
return 1
}
start_jenkins() {
src_user_hook pre_start_${cartridge_type}
set_app_state started
JENKINS_CMD="/etc/alternatives/jre/bin/java \
-Xmx168m \
-XX:MaxPermSize=100m \
-Dcom.sun.akuma.Daemon=daemonized \
-Djava.awt.headless=true"
if [ -f "${OPENSHIFT_REPO_DIR}/.openshift/markers/enable_debugging" ]; then
JENKINS_CMD="${JENKINS_CMD} -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=${OPENSHIFT_JENKINS_IP}:7600,suspend=n"
fi
JENKINS_CMD="${JENKINS_CMD} -DJENKINS_HOME=$OPENSHIFT_DATA_DIR/ \
-Dhudson.slaves.NodeProvisioner.recurrencePeriod=500 \
-Dhudson.slaves.NodeProvisioner.initialDelay=100 \
-Dhudson.slaves.NodeProvisioner.MARGIN=100 \
-Dhudson.model.UpdateCenter.never=true \
-Dhudson.DNSMultiCast.disabled=true \
-jar /usr/lib/jenkins/jenkins.war \
--ajp13Port=-1 \
--controlPort=-1 \
--logfile=$OPENSHIFT_JENKINS_LOG_DIR/jenkins.log \
--daemon \
--httpPort=8080 \
--debug=5 \
--handlerCountMax=45 \
--handlerCountMaxIdle=20 \
--httpListenAddress=$OPENSHIFT_INTERNAL_IP"
$JENKINS_CMD &
echo $! > /dev/null
if [ $? -eq 0 ]; then
run_user_hook post_start_${cartridge_type}
fi
}
stop_nodes() {
result=`curl -s --insecure https://${JENKINS_USERNAME}:${JENKINS_PASSWORD}@${OPENSHIFT_GEAR_DNS}/computer/api/json`
nodes=`echo $result | awk -F"[,:]" '{for(i=1;i<=NF;i++){if($i~/displayName\042/){print $(i+1)} } }'`
OIFS="${IFS}"
NIFS=$'\n'
IFS="${NIFS}"
for LINE in ${nodes} ; do
node="${LINE%\"}"
node="${node#\"}"
IFS="${OIFS}"
result=`curl -s -X POST --insecure https://${JENKINS_USERNAME}:${JENKINS_PASSWORD}@${OPENSHIFT_GEAR_DNS}/computer/${node}/delete`
IFS="${NIFS}"
done
IFS="${OIFS}"
}
stop_jenkins() {
src_user_hook pre_stop_${cartridge_type}
set_app_state stopped
kill -TERM $pid > /dev/null 2>&1
wait_for_stop $pid
run_user_hook post_stop_${cartridge_type}
}
case "$1" in
start)
_state=`get_app_state`
if [ -f ${cartridge_dir}/run/stop_lock -o idle = "$_state" ]
then
echo "Application is explicitly stopped! Use 'rhc app start -a ${OPENSHIFT_APP_NAME}' to start back up." 1>&2
exit 0
else
if isrunning
then
echo "Application is already running!" 1>&2
exit 0
fi
start_jenkins
fi
;;
graceful-stop|stop)
if isrunning
then
stop_jenkins
else
echo "Application is already stopped!" 1>&2
exit 0
fi
;;
restart|graceful)
if isrunning
then
stop_jenkins
fi
start_jenkins
;;
reload)
# the plugin automatically does a reload prior to a build - so a no-op here
exit 0
;;
status)
if ! isrunning; then
echo "Application '${cartridge_type}' is either stopped or inaccessible"
exit 0
fi
print_user_running_processes `id -u`
exit 0
;;
esac
|
#!/bin/bash
#SBATCH --job-name=/data/unibas/boittier/test-neighbours
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --partition=short
#SBATCH --output=/data/unibas/boittier/test-neighbours_%A-%a.out
hostname
# Path to scripts and executables
cubefit=/home/unibas/boittier/fdcm_project/mdcm_bin/cubefit.x
fdcm=/home/unibas/boittier/fdcm_project/fdcm.x
ars=/home/unibas/boittier/fdcm_project/ARS.py
# Variables for the job
n_steps=0
n_charges=24
scan_name=SCAN_amide1.pdb-
suffix=.xyz.chk
cubes_dir=/data/unibas/boittier/fdcm/amide/scan-large
output_dir=/data/unibas/boittier/test-neighbours
frames=/home/unibas/boittier/fdcm_project/mdcms/amide/model1/frames.txt
initial_fit=/home/unibas/boittier/fdcm_project/mdcms/amide/model1/24_charges_refined.xyz
initial_fit_cube=/home/unibas/boittier/fdcm_project/mdcms/amide/model1/amide1.pdb.chk
prev_frame=30
start_frame=31
next_frame=32
acd=/home/unibas/boittier/fdcm_project/0_fit.xyz.acd
start=$start_frame
next=$next_frame
dir='frame_'$next
output_name=$output_dir/$dir/$dir'-'$start'-'$next'.xyz'
initial_fit=$output_dir/"frame_"$start/"frame_"$start'-'$prev_frame'-'$start'.xyz'
# Go to the output directory
mkdir -p $output_dir
cd $output_dir
mkdir -p $dir
cd $dir
# Do Initial Fit
# for initial fit
esp1=$cubes_dir/$scan_name$start$suffix'.p.cube'
dens1=$cubes_dir/$scan_name$start$suffix'.d.cube'
esp=$cubes_dir/$scan_name$next$suffix'.p.cube'
dens=$cubes_dir/$scan_name$next$suffix'.d.cube'
# adjust reference frame
python $ars -charges $initial_fit -pcube $dens1 -pcube2 $dens -frames $frames -output $output_name -acd $acd > $output_name.ARS.log
# do gradient descent fit
$fdcm -xyz $output_name.global -dens $dens -esp $esp -stepsize 0.2 -n_steps $n_steps -learning_rate 0.5 -output $output_name > $output_name.GD.log
# adjust reference frame
python $ars -charges $output_name -pcube $esp -pcube2 $esp -frames $frames -output $output_name -acd $acd > $output_name.ARS-2.log
# make a cube file for the fit
$cubefit -v -generate -esp $esp -dens $dens -xyz refined.xyz > $output_name.cubemaking.log
# do analysis
$cubefit -v -analysis -esp $esp -esp2 $n_charges'charges.cube' -dens $dens > $output_name.analysis.log
echo $PWD
sbatch /home/unibas/boittier/fdcm_project/job_files/test-neighbours/frame_32_33.sh
|
#!/bin/bash
declare -a nodes
function git_update_details {
remote=`git config --get remote.origin.url`
current_branch=`git symbolic-ref -q --short HEAD`
echo -e "The process will also perform the following UPGRADES:\n"
echo " 1. Upgrade to the most recent version of the installer: "
echo " * The update will be retrieved from: ${remote}"
echo " * It will be based on the most recent release from: ${current_branch}"
echo ""
echo " 2. May add additional files to your Assets area to allow for advanced configuration"
echo ""
echo " 3. Upgrade, if necessary, to the most recently vetted versions of: "
echo " * Shibboleth IdP"
echo -e " * Jetty\n\n"
}
the_install_base=/opt
working_dir=$the_install_base/shibboleth-idp-installer/repository
cd $working_dir || exit
function get_nodes {
i=1
getting_nodes=false
for line in $(cat ansible_hosts)
do
if [ $line == "[idp-servers]" ]; then
getting_nodes=true
else
if [[ "$line" == [* ]]; then
getting_nodes=false
else
if ( $getting_nodes ); then
nodes[$i]=$line
fi
fi
fi
done
}
function server_patch () {
patch=`grep "server_patch:" host_vars/$1`
if [[ $patch == "server_patch: \"false\"" ]]; then
echo " Node: $1 will NOT be patched"
else
echo " Node: $1 will be patched"
fi
}
get_nodes
echo ${nodes[*]}
upgrade=false
while getopts ":u" opt; do
case $opt in
u) upgrade=true;;
esac
done
echo -e "\n-----\n"
echo -e "This process will perform the following UPDATES: \n"
echo " 1. Update underlying operating system packages to ensure any security issues are addressed"
echo ""
for x in ${nodes[*]}
do
server_patch $x
done
echo ""
echo " 2. Apply any configuration changes made within the assets directory for: "
echo " * Shibboleth IdP"
echo " * Jetty"
echo " * Apache HTTPD"
echo ""
echo -e " 3. RESTART all dependant processes.\n"
if [[ $upgrade = true ]]
then
git_update_details
fi
echo "You MUST have a tested rollback plan in place before continuing."
echo -e "\n-----\n"
read -r -p "Are you sure you wish to continue with the process as detailed above? [y/N] " response
response=${response,,}
if [[ $response =~ ^(yes|y)$ ]]
then
if [[ $upgrade = true ]]
then
echo -e "\nAttempting to update the AAF Installer respositry...\n"
git pull
retval=$?
if [ $retval -ne 0 ]
then
echo -e "\n ----"
echo -e " An ERROR occurred attempting to upgrade the local AAF Installer respoitory"
echo -e " This must be resolved before your upgrade can proceed!\n"
echo -e " Details of the issue are shown above."
echo -e " ----"
echo -e "\nNo changes have been made. Exiting."
exit 1
else
ansible-playbook -i ansible_hosts update.yml --extra-var="install_base=$the_install_base"
fi
fi
ansible-playbook -i ansible_hosts site.yml --force-handlers --extra-var="install_base=$the_install_base"
else
echo "No changes made, exiting."
exit 0
fi
|
#!/usr/bin/env bash
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
# Constants
readonly GITHUB_REPOSITORY_NAME="grpc"
# GKE Cluster
readonly GKE_CLUSTER_NAME="interop-test-psm-lb-v1-us-central1-a"
readonly GKE_CLUSTER_ZONE="us-central1-a"
readonly SECONDARY_GKE_CLUSTER_NAME="interop-test-psm-lb-v1-us-west1-b"
readonly SECONDARY_GKE_CLUSTER_ZONE="us-west1-b"
## xDS test client Docker images
readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/cpp-client"
readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}"
readonly BUILD_APP_PATH="interop-testing/build/install/grpc-interop-testing"
#######################################
# Builds test app Docker images and pushes them to GCR
# Globals:
# BUILD_APP_PATH
# CLIENT_IMAGE_NAME: Test client Docker image name
# GIT_COMMIT: SHA-1 of git commit being built
# Arguments:
# None
# Outputs:
# Writes the output of `gcloud builds submit` to stdout, stderr
#######################################
build_test_app_docker_images() {
echo "Building C++ xDS interop test app Docker images"
docker build -f "${SRC_DIR}/tools/dockerfile/interoptest/grpc_interop_cxx_xds/Dockerfile.xds_client" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}"
gcloud -q auth configure-docker
docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}"
}
#######################################
# Builds test app and its docker images unless they already exist
# Globals:
# CLIENT_IMAGE_NAME: Test client Docker image name
# GIT_COMMIT: SHA-1 of git commit being built
# FORCE_IMAGE_BUILD
# Arguments:
# None
# Outputs:
# Writes the output to stdout, stderr
#######################################
build_docker_images_if_needed() {
# Check if images already exist
client_tags="$(gcloud_gcr_list_image_tags "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}")"
printf "Client image: %s:%s\n" "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}"
echo "${client_tags:-Client image not found}"
# Build if any of the images are missing, or FORCE_IMAGE_BUILD=1
if [[ "${FORCE_IMAGE_BUILD}" == "1" || -z "${client_tags}" ]]; then
build_test_app_docker_images
else
echo "Skipping C++ test app build"
fi
}
#######################################
# Executes the test case
# Globals:
# TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile
# KUBE_CONTEXT: The name of kubectl context with GKE cluster access
# SECONDARY_KUBE_CONTEXT: The name of kubectl context with secondary GKE cluster access, if any
# TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report
# CLIENT_IMAGE_NAME: Test client Docker image name
# GIT_COMMIT: SHA-1 of git commit being built
# Arguments:
# Test case name
# Outputs:
# Writes the output of test execution to stdout, stderr
# Test xUnit report to ${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml
#######################################
run_test() {
# Test driver usage:
# https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage
local test_name="${1:?Usage: run_test test_name}"
python -m "tests.${test_name}" \
--flagfile="${TEST_DRIVER_FLAGFILE}" \
--kube_context="${KUBE_CONTEXT}" \
--secondary_kube_context="${SECONDARY_KUBE_CONTEXT}" \
--client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \
--xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \
}
#######################################
# Main function: provision software necessary to execute tests, and run them
# Globals:
# KOKORO_ARTIFACTS_DIR
# GITHUB_REPOSITORY_NAME
# SRC_DIR: Populated with absolute path to the source repo
# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing
# the test driver
# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code
# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile
# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report
# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build
# GIT_COMMIT: Populated with the SHA-1 of git commit being built
# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built
# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access
# SECONDARY_KUBE_CONTEXT: Populated with name of kubectl context with secondary GKE cluster access, if any
# Arguments:
# None
# Outputs:
# Writes the output of test execution to stdout, stderr
#######################################
main() {
local script_dir
script_dir="$(dirname "$0")"
# shellcheck source=tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh
source "${script_dir}/grpc_xds_k8s_install_test_driver.sh"
set -x
if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then
kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}"
else
local_setup_test_driver "${script_dir}"
fi
build_docker_images_if_needed
# Run tests
cd "${TEST_DRIVER_FULL_DIR}"
local failed_tests=0
test_suites=("change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test")
for test in "${test_suites[@]}"; do
run_test $test || (( failed_tests++ ))
done
echo "Failed test suites: ${failed_tests}"
if (( failed_tests > 0 )); then
exit 1
fi
}
main "$@"
|
#!/bin/bash
DATA_DIR="data/arxiv-abs"
OUT_DIR="outputs/reorder_exp/bart-large_arxiv"
mkdir -p ${OUT_DIR}
cp $0 ${OUT_DIR}
python -m source.encoder_decoder \
--train_file ${DATA_DIR}/train.jsonl \
--eval_data_file ${DATA_DIR}/dev.jsonl \
--out_dir $OUT_DIR \
--model_type facebook/bart-large \
--model_name_or_path facebook/bart-large \
--device 1 \
--do_train \
--save_total_limit 1 \
--num_train_epochs 1 \
--logging_steps 3000 \
--gradient_accumulation_steps 8 \
--train_batch_size 4 \
--eval_batch_size 8 \
--overwrite_out_dir \
--max_input_length 1024 \
--max_output_length 40 \
--task index_with_sep \
$@
#--overwrite_cache \
|
#!/bin/bash
/usr/bin/python3 "/lto-node/starter.py"
echo "Node is starting..."
${JAVA_HOME}/bin/java -Dlogback.stdout.level="${LTO_LOG_LEVEL}" "-Xmx${LTO_HEAP_SIZE}" -jar "/lto-node/lto-public-all.jar" $LTO_CONFIG_FILE
|
#!/bin/sh
#/ Usage: test.sh <pkg_ident>
#/
#/ Example: test.sh core/php/7.2.8/20181108151533
#/
set -euo pipefail
source "$(dirname "${0}")/../../bin/ci/test_helpers.sh"
if [[ -z "${1:-}" ]]; then
grep '^#/' < "${0}" | cut -c4-
exit 1
fi
TEST_PKG_IDENT="${1}"
export TEST_PKG_IDENT
hab pkg install core/bats --binlink
hab pkg install "${TEST_PKG_IDENT}"
ci_ensure_supervisor_running
ci_load_service "$TEST_PKG_IDENT"
# run the tests
bats "$(dirname "${0}")/test.bats"
# unload the service
hab svc unload "${TEST_PKG_IDENT}" || true
|
{
"name": "<CUSTOM_VARIABLE_NAME>",
"value": "<CUSTOM_VARIABLE_VALUE>",
"is_secret": false
}
|
#!/bin/bash
input=$1
output=$2
keyword=$3
if [[ ${input} == "" ]]; then exit; fi
if [[ ${output} == "" ]]; then exit; fi
dpmurl=root://${DPNS_HOST}:1094/${output}
dpmhost=${DPM_HOME}/${output}
dpns-mkdir -p ${dpmhost}
for file in `xrdfs eosams.cern.ch ls ${input}`
do
if [[ `echo ${file} | grep ${keyword}` == "" ]]; then continue; fi
filename=${file##*/}
filecheck=$(dpns-ls ${dpmhost}/${filename} 2>&1)
if [[ "${filecheck}" == *"No such file or directory"* ]] || [[ "${filecheck}" == *"invalid path"* ]]; then
srcfile=root://eosams.cern.ch/${file}
echo -e "==== xrdcp ${srcfile} ${dpmurl}/${filename}"
xrdcp ${srcfile} ${dpmurl}/${filename}
else
echo -e "==== ${filename} is exist."
fi
done
|
#!/bin/sh
PROJECT=mendex-doc
TMP=/tmp
PWDF=`pwd`
LATESTRELEASEDATE=`git tag | sort -r | head -n 1`
RELEASEDATE=`git tag --points-at HEAD | sort -r | head -n 1`
if [ -z "$RELEASEDATE" ]; then
RELEASEDATE="**not tagged**; later than $LATESTRELEASEDATE?"
fi
echo " * Create $PROJECT.zip ($RELEASEDATE)"
git archive --format=tar --prefix=$PROJECT/ HEAD | (cd $TMP && tar xf -)
rm $TMP/$PROJECT/.gitignore
rm $TMP/$PROJECT/create_archive.sh
rm -rf $TMP/$PROJECT/mendex.1*
perl -pi.bak -e "s/\\\$RELEASEDATE/$RELEASEDATE/g" $TMP/$PROJECT/README.md
rm -f $TMP/$PROJECT/README.md.bak
cd $TMP && zip -r $PWDF/$PROJECT.zip $PROJECT
rm -rf $TMP/$PROJECT
echo
echo " * Done: $PROJECT.zip ($RELEASEDATE)"
|
#!/usr/bin/env bash
dep ensure -v
|
#!/bin/bash
sqlite3 ./game.db .dump > game.sql
|
#!/bin/bash
# Simple bash script for generating the file structure for a new assignment.
prefix="assignments/$1"
# Create the directory structure:
mkdir $prefix
mkdir "$prefix/exemplars"
mkdir "$prefix/files"
# Create symbolic links to the `includes` and `graphics` folders:
# Following lines are required for MacOS ('brew install coreutils' for grealpath):
# relative=$(grealpath --relative-to="$prefix" templates/)
# ln -s "$relative/includes" "$prefix/includes"
# ln -s "$relative/graphics" "$prefix/graphics"
# Following lines are used for linux:
# ln -rs templates/includes "$prefix/includes"
# ln -rs templates/graphics "$prefix/graphics"
# Create a copy of the template.tex file with the assignment name:
cp templates/assignment_template.tex "$prefix/assignment_$1.tex"
|
#!/bin/bash
if [ "$#" -ne 2 ]
then
echo "usage: [input file] [output file]"
exit
fi
INFILE=$1
OUTFILE=$2
if test ! -w "$OUTFILE"
then
echo "$OUTFILE is not writable!" > error.log
fi
if [ -e "$INFILE" ]
then
cat "$INFILE" >> "$OUTFILE"
fi
# the script copies the content of the INFILE (1st input argument)
# and appends it to the OUTFILE (2nd input argument)
|
#!/usr/bin/env bash
main () {
declare -A chars
for char in $(echo "${1,,}" | tr -d " " | grep -o .); do
printf -v num "%d\n" "'$char" 2>/dev/null
if [[ "$num" -ge 97 ]] && [[ "$num" -le 122 ]]; then
chars["$char"]=0
fi
done
if [[ "${#chars[@]}" -eq 26 ]]; then
echo "true"
else
echo "false"
fi
}
main "$@"
|
#!/bin/sh
docker run -it -v $PWD:/home/jovyan/work --rm docker.io/jupyter/all-spark-notebook start.sh bash
|
#!/bin/bash
read -e -p $'\e[32mEnter work dir:\e[0m ' CODE_WORK_DIR
docker run -t -p 127.0.0.1:8443:8443 -v "$CODE_WORK_DIR:/root/project" codercom/code-server --allow-http --no-auth
|
export DIR="./train_log/4_8_0"
export NGPUS=1
export BATCH=$(echo "$NGPUS*8"|bc)
for i in $(seq 1 18)
do
let ITER=i*2500
ITER=`printf "%07d" $ITER`
python -m torch.distributed.launch --nproc_per_node=$NGPUS ./tools/test_net.py \
--config-file "./configs/retinanet/retinanet_R-50-FPN_P5_1x.yaml" \
--ckpt $DIR"/model_"$ITER".pth" \
OUTPUT_DIR $DIR TEST.IMS_PER_BATCH $BATCH
done |
if [ ! -z $BUILD_BUILDNUMBER ]; then
exit 0
else
exit 1
fi
|
#!/usr/bin/env bats
load '../lib/helper'
load '../bats/extensions/bats-support/load'
load '../bats/extensions/bats-assert/load'
load '../bats/extensions/bats-file/load'
@test "plugin-install: helm plugin install" {
HOME="$(mktemp -d)"
# Windows
# See: https://github.com/helm/helm/blob/b4f8312dbaf479e5f772cd17ae3768c7a7bb3832/pkg/helmpath/lazypath_windows.go#L22
# shellcheck disable=SC2034
APPDATA="${HOME}"
run helm plugin install "${GIT_ROOT}"
assert_output --regexp "$(printf "sops is already installed: sops .*\nInstalled plugin: secrets")"
assert_file_exist "${HOME}/.gitconfig"
}
@test "plugin-install: SKIP_SOPS_INSTALL=true helm plugin install" {
SKIP_SOPS_INSTALL=true
export SKIP_SOPS_INSTALL
HOME="$(mktemp -d)"
# Windows
# See: https://github.com/helm/helm/blob/b4f8312dbaf479e5f772cd17ae3768c7a7bb3832/pkg/helmpath/lazypath_windows.go#L22
# shellcheck disable=SC2034
APPDATA="${HOME}"
run helm plugin install "${GIT_ROOT}"
assert_output --regexp "$(printf "Skipping sops installation.\nInstalled plugin: secrets")"
assert_file_exist "${HOME}/.gitconfig"
}
@test "plugin-install: helm plugin list" {
run helm plugin list
assert_success
assert_output --partial 'secrets'
}
@test "plugin-install: helm secrets" {
run helm secrets
assert_failure
assert_output --partial 'Available Commands:'
}
@test "plugin-install: helm secrets --help" {
run helm secrets --help
assert_success
assert_output --partial 'Available Commands:'
}
|
IFS=','
while IFS=, read -r date_rec timestamp val; do
echo "inserting $timestamp $val in $1 $2"
curl -X POST "http://api.waziup.io/api/v1/sensors/$1/measurements/$2/values" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"value\": $val, \"timestamp\": $timestamp}"
done < $3
|
#!/bin/bash
# Script to deploy a very simple web application.
# The web app has a customizable image and some text.
cat << EOM > /var/www/html/index.html
<html>
<head><title>Meow!</title></head>
<body>
<div style="width:800px;margin: 0 auto">
<!-- BEGIN -->
<center><img src="http://${PLACEHOLDER}/${WIDTH}/${HEIGHT}"></img></center>
<center><h2>Meow World!</h2></center>
Welcome to ${PREFIX}'s app. No hairballs allowed!
<!-- END -->
</div>
</body>
</html>
EOM
echo "Script complete."
|
#!/bin/sh
while true; do sleep $INTERVAL; date; wget -q -S --timeout $TIMEOUT $ENDPOINT -O /dev/null; done
|
#!/bin/sh
# you need to set the environment variable HABITAT_PROJECT_FOLDER
mkdir -p $HABITAT_PROJECT_FOLDER/assets/widgets/lab_interactive
cp -R tmp/lab-interactive/* $HABITAT_PROJECT_FOLDER/assets/widgets/lab_interactive
# next steps:
# need to setup the meta data files for the widget
# need to add a snippet to: /trunk/s9ml/.templates/pattern-snippets.tpls
# here is example snippet:
# <!-- [Widget Name] -->
# <script type="application/inkling+patternsnippet" id="[widget-id]" data-label="[widget label]" data-category="Widgets">
# <figure>
# <object type="text/html" data="../../assets/widgets/[widget folder]/index.html" class="widget [widget class]">
# </object>
# </figure>
# </script>
|
#!/bin/bash
rm -rf pof/
mkdir pof/
echo copying help patches...
cp -a ../help/* pof/
cp ../LICENSE.txt pof/
echo copying example patches...
mkdir pof/example/
cp -a ../example/pd/* pof/example/
echo copying pof external...
cp bin/pof.* pof/
patchelf --set-rpath '$ORIGIN/libs' pof/pof.*
mkdir pof/libs
echo copying libs...
cp -a bin/libs/* pof/libs
LIBS_TO_COPY="freeimage boost_filesystem boost_system openal gstreamer gstbase gstapp gstvideo \
sndfile GLEW openjpeg IlmImf IlmThread Half Iex raw gpg-error gcrypt datrie graphite2 \
protobuf-lite mirprotobuf mircommon jbig lzma gomp lcms2 jasper jpegxr Xdmcp Xau orc \
jxrglue pcre mirclient atspi wayland-client wayland-egl wayland-cursor Xfixes \
openjp2 jpeg png webp webpmux ssl crypto \
"
# gstadaptivedemux gstallocators gstbadbase libgstbadvideo gstaudio gstbadaudio gstcheck gstcodecparsers gstcontroller gstfft gstgl gstinsertbin gstmpegts gstnet gstpbutils gstphotography gstplayer gstriff gstrtp gstrtsp gsttag
for libtocopy in $LIBS_TO_COPY ; do
libfile=`ldd bin/pof.* | grep lib${libtocopy} | cut -d' ' -f 3`
if [ "x$libfile" != "x" ] ; then
#echo "$libtocopy : copying $libfile in libs"
cp $libfile pof/libs
fi
done
echo copying gst plugins...
mkdir pof/libs/gstplugins
GSTREAMER_VERSION=0.10
RET=$(ls pof/libs/libgstreamer-1.0* &> /dev/null; echo $?)
if [ "$RET" -eq "0" ]; then
GSTREAMER_VERSION=1.0
fi
GSTREAMER_PLUGINS=$(dirname `locate gstreamer-$GSTREAMER_VERSION/libgstavi.so`)
cp $GSTREAMER_PLUGINS/*.so* pof/libs/gstplugins
echo setting libs and plugins rpath...
for x in pof/libs/gstplugins/*.so* ; do patchelf --set-rpath '$ORIGIN/..' $x ;done
for x in pof/libs/*.so* ; do patchelf --set-rpath '$ORIGIN' $x ;done
echo copying OF install_dependencies scripts...
mkdir pof/scripts
cp -a ../../../scripts/linux/*/ pof/scripts
echo
POF_VERSION=`strings pof/pof.* | grep "Pof: version" | cut -s -d' ' -f 4`
rm *${POF_VERSION}*dek*
if [ x$1 == xtest ] ; then
deken package --objects ../pof.dek.txt --version ${POF_VERSION} pof
else
deken upload --objects ../pof.dek.txt --no-source-error --version ${POF_VERSION} pof
rm -rf pof/
fi
|
#!/bin/bash
# This script creates a json file containing openssl verification meta data
# which is used inside java unit tests via SecurityTesthelper.java
function usage(){
echo "usage: ciphertest <server:port> <server|pds>"
echo " - second parameter is for taget type, currently only 'server' (sechub-server) and"
echo " 'pds' are supported"
}
# check if open ssl is available at all
# if not the function will exit with exit code 3
function ensureOpenSSLInstalled(){
checkCommand="which openssl";
foundOpenSSLPath=$($checkCommand)
if [[ "$foundOpenSSLPath" = "" ]]; then
echo "Did not found a open SSL installation! So cannot check ciphers!"
exit 3
fi
}
# check and print openssl version
ensureOpenSSLInstalled
echo "Using installed $(openssl version)."
if [ -z "$1" ] ; then
echo "server is missing as first parameter!"
usage
exit 1
fi
if [ -z "$2" ] ; then
echo "type is missing as second parameter (server|pds)"
usage
exit 1
fi
cd ..
cd sechub-$2
source ./dev-base.sh
cd ..
cd sechub-integrationtest
# OpenSSL requires the port number.
DEV_CERT_PEM="$DEV_CERT_PATH/generated-dev-localhost-keystore.pem"
OUTPUT_FOLDER="./build/test-results/ciphertest/"
OUTPUT_FILE="$OUTPUT_FOLDER/sechub-$2.json"
rm -f $OUTPUT_FILE
mkdir -p $OUTPUT_FOLDER
echo DEV_CERT_FILE=$DEV_CERT_FILE
SERVER=$1
ciphers=$(openssl ciphers 'ALL:eNULL' | sed -e 's/:/ /g')
# convert existing pkcs12 file to a PEM file, so we can use it later to connect to localhost with self signed certificates....
openssl pkcs12 -in $DEV_CERT_FILE -out $DEV_CERT_PEM -clcerts -nokeys -passin pass:$PSEUDO_PWD
echo Obtaining cipher list by openssl
echo "{" > $OUTPUT_FILE
echo " \"cipherChecks\" : [" >> $OUTPUT_FILE
count=0
for cipher in ${ciphers[@]}
do
if [[ "$count" != "0" ]] ; then
echo "," >> $OUTPUT_FILE
fi
count=$count+1
echo -n " { \"cipher\" : \"$cipher\", \"verified\" :\"" >> $OUTPUT_FILE
# wyh -tls1_2? only when using tls_1_2 (or below) the given cipher is really used
# otherwise client will accept tls1_3 fallback which are then verifified as true
# and the test is not possible
#
# why -CAfile? We use our former generated PEM file for the self signed certificate
# otherwise we have always unknown results because openssl does not
# trust the self-signed certificates
result=$(echo -n | openssl s_client -CAfile $DEV_CERT_PEM -tls1_2 -cipher "$cipher" -connect $SERVER 2>&1)
if [[ "$result" =~ ":error:" ]] ; then
error=$(echo -n $result | cut -d':' -f6)
echo -n "false\",\"error\" : \"$error\"" >> $OUTPUT_FILE
else
if echo $result | grep -q "Verify return code: 0 (ok)"; then
echo -n "true\"" >> $OUTPUT_FILE
else
echo -n "undefined\", \"error\" : \"$result\"" >> $OUTPUT_FILE
fi
fi
echo -n "}" >> $OUTPUT_FILE
done
echo "] }" >> $OUTPUT_FILE
echo "written to $OUTPUT_FILE"
|
#!/bin/bash
set -e
ABSOLUTE_SCRIPT=`readlink -m $0`
SCRIPT_DIR=`dirname ${ABSOLUTE_SCRIPT}`
source ${SCRIPT_DIR}/00_config.sh
if (( $# != 2 )); then
echo "USAGE: $0 <solve base parameters file> <inference options file>"
exit 1
fi
SOLVE_BASE_PARAMETERS_FILE=$(readlink -m $1)
INFERENCE_OPTIONS_FILE=$(readlink -m $2)
if [[ ! -f ${SOLVE_BASE_PARAMETERS_FILE} ]]; then
echo "ERROR: ${SOLVE_BASE_PARAMETERS_FILE} not found"
exit 1
fi
if [[ ! -f ${INFERENCE_OPTIONS_FILE} ]]; then
echo "ERROR: ${INFERENCE_OPTIONS_FILE} not found"
exit 1
fi
RUN_DIR=$(dirname ${SOLVE_BASE_PARAMETERS_FILE})
RUN_NAME=$(basename ${RUN_DIR})
LOGS_DIR="${RUN_DIR}/logs"
mkdir -p ${LOGS_DIR}
# use shell group to tee all output to log file
{
# ---------------------------
# run solve
JAVA_CLASS="org.janelia.render.client.zspacing.ZPositionCorrectionClient"
ARGS=$(cat ${SOLVE_BASE_PARAMETERS_FILE})
ARGS="${ARGS} --solveExisting"
ARGS="${ARGS} --optionsJson ${INFERENCE_OPTIONS_FILE}"
ARGS="${ARGS} --normalizedEdgeLayerCount 30"
${RENDER_CLIENT_SCRIPT} ${RENDER_CLIENT_HEAP} ${JAVA_CLASS} ${ARGS}
# ---------------------------
# merge cc data
CC_BATCHES_DIR="/nrs/flyem/render/z_corr/${RENDER_OWNER}/${RENDER_PROJECT}/${ALIGN_STACK}/${RUN_NAME}/cc_batches"
if [[ ! -d ${CC_BATCHES_DIR} ]]; then
echo "ERROR: ${CC_BATCHES_DIR} not found"
exit 1
fi
JAVA_CLASS="org.janelia.render.client.zspacing.CrossCorrelationDataMerger"
${RENDER_CLIENT_SCRIPT} ${RENDER_CLIENT_HEAP} ${JAVA_CLASS} ${CC_BATCHES_DIR}
} 2>&1 1>>${LOGS_DIR}/cc_solve.log
echo
grep Zcoords.txt ${LOGS_DIR}/cc_solve.log
echo
# ---------------------------
# generate plots
Z_CORR_SCRIPTS_DIR="/groups/flyem/data/trautmane/z_corr"
ARGS="${RENDER_OWNER} ${RENDER_PROJECT} ${ALIGN_STACK} ${RUN_NAME}"
${Z_CORR_SCRIPTS_DIR}/plot_cross_correlation.sh ${ARGS}
${Z_CORR_SCRIPTS_DIR}/plot_z_coords.sh ${ARGS}
|
#!/bin/sh
ROOT_SRC_DIR=/home/mjt5v/CLionProjects/belief-propagation
# cuda benchmark
cd "$ROOT_SRC_DIR/cmake-build-release/src/cuda_benchmark"
./cuda_edge_benchmark || true
./cuda_edge_streaming_benchmark || true
#./cuda_edge_openmpi_benchmark || true
|
#!/bin/bash
# RESTful Interface Tool Sample Script for HPE iLO Products #
# Copyright 2014, 2020 Hewlett Packard Enterprise Development LP #
# Description: This is a sample bash script to return the #
# current FIPs Mode #
# NOTE: You will need to replace the USER_LOGIN and PASSWORD #
# values with values that are appropriate for your #
# environment. #
# Firmware support information for this script: #
# iLO 5 - All versions #
runLocal(){
ilorest get TpmFips --selector=Bios. -u USER_LOGIN -p PASSWORD
ilorest logout
}
runRemote(){
ilorest get TpmFips --selector=Bios. --url=$1 --user $2 --password $3
ilorest logout
}
error(){
echo "Usage:"
echo "remote: Get_FIPS_Status.sh ^<iLO url^> ^<iLO username^> ^<iLO password^>"
echo "local: Get_FIPS_Status.sh"
}
if [ "$#" -eq "3" ]
then
runRemote "$1" "$2" "$3"
elif [ "$#" -eq "0" ]
then
runLocal
else
error
fi |
# !/bin/bash
# 功能:包含编译、启动
#
. /etc/profile
# 1. checkout 代码
rm -rf /data/temp/ideploy
echo "开始checkout代码"
git clone https://github.com/oldDiverInGZ/ideploy.git /data/temp/ideploy/ > /dev/null
if [ $? -ne 0 ]; then
echo "checkout代码失败"
exit 1
fi
# 2.开始编译
cd /data/temp/ideploy/
echo "开始编译代码"
mvn -P=dev -Dmaven.test.skip=true -U clean install > complie.log 2>&1
if [ $? -ne 0 ]; then
echo "编译代码失败"
exit 1
fi
echo "编译完成"
# 迁移jar包
if [ -f "/data/project/ideploy/deployment-log/" ];then
rm -f /data/project/ideploy/deployment-log/
fi
mkdir -p /data/project/ideploy/deployment-log/
cp /data/temp/ideploy/deployment-log/target/deployment-log.jar /data/project/ideploy/deployment-log
# 3.重启
echo "开始部署....."
killTimes=3
# 循环kill -15 3次,否则直接kill -9
echo "开始停止tomcat....."
pId=$(ps -ef | grep deployment-log.jar | grep -v grep | awk '{print $2}')
while [ $killTimes -ge 0 ]; do
ps ax | awk '{ print $1 }' | grep -e "^$pId$"
if [ $? -ne 0 ]; then
break
fi
kill -15 $pId >/dev/null 2>&1
if [ $killTimes -gt 0 ]; then
sleep 10
fi
# 强kill
ps ax | awk '{ print $1 }' | grep -e "^$pId$"
if [ $? -ne 0 ]; then
sleep 10
else
break
fi
if [ $killTimes -eq 0 ]; then
kill -9 $pId
fi
killTimes=`expr $killTimes - 1 `
done
export JAVA_OPTS="$JAVA_OPTS"
echo "开始启动...."
#启动
cd /data/project/ideploy/deployment-log/
nohup java $JAVA_OPTS -jar /data/project/ideploy/deployment-log/deployment-log.jar>/dev/null 2>deployment-log-err.log &
#删除临时目录
rm -rf /data/temp/ideploy
echo "启动完成"
|
#!/bin/sh
#
# Copyright (c) 2018-2020, Christer Edwards <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
. /usr/local/share/bastille/colors.pre.sh
usage() {
echo -e "${COLOR_RED}Usage: bastille cmd TARGET command.${COLOR_RESET}"
exit 1
}
# Handle special-case commands first.
case "$1" in
help|-h|--help)
usage
;;
esac
if [ $# -lt 2 ]; then
usage
fi
TARGET="${1}"
shift
if [ "${TARGET}" = 'ALL' ]; then
JAILS=$(jls name)
fi
if [ "${TARGET}" != 'ALL' ]; then
JAILS=$(jls name | awk "/^${TARGET}$/")
fi
for _jail in ${JAILS}; do
echo -e "${COLOR_GREEN}[${_jail}]:${COLOR_RESET}"
jexec -l "${_jail}" "$@"
echo
done
|
#!/bin/bash
REF_FA=$1
READ_1=$2
READ_2=$3
SAMPLE_NAME=$4
REF_DIR=$5
# Base name of reference.fa file
TEMP_VAR=${REF_FA}
REF_NAME=${TEMP_VAR%.*}
cd "${REF_DIR}"
if [ "${REF_NAME}*" != "${REF_FA}" ]
then
# Create symbolic links to all reference index files
#
ln -s ${REF_NAME}* ${REF_DIR}
fi
/usr/local/apps/bioapps/bwa/bwa-0.7.16/bwa mem -t 12 -M -k 32 -I 300,30 -R "@RG\tID:lane1\tLB:${SAMPLE_NAME}\tPL:illumina\tPU:lane1\tSM:lane1\tCN:${SAMPLE_NAME}" $REF_FA $READ_1 $READ_2 > /projects/mgc/Project_1/ram/CromwellWDL_WorkFlow_Development/IdxCaptureOutputs_temp/${SAMPLENAME}.aligned.sam
find -type l -delete
|
#!/bin/bash
readonly CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman}
readonly CURRENT_SCRIPT=$(basename "$0")
readonly CMD="${CONTAINER_RUNTIME} run --entrypoint performance-profile-creator"
readonly IMG_EXISTS_CMD="${CONTAINER_RUNTIME} image exists"
readonly IMG_PULL_CMD="${CONTAINER_RUNTIME} image pull"
readonly MUST_GATHER_VOL="/must-gather"
PAO_IMG="quay.io/openshift-kni/performance-addon-operator:4.8-snapshot"
MG_TARBALL=""
DATA_DIR=""
usage() {
print "Wrapper usage:"
print " ${CURRENT_SCRIPT} [-h] [-p image][-t path] -- [performance-profile-creator flags]"
print ""
print "Options:"
print " -h help for ${CURRENT_SCRIPT}"
print " -p Performance Addon Operator image"
print " -t path to a must-gather tarball"
${IMG_EXISTS_CMD} "${PAO_IMG}" && ${CMD} "${PAO_IMG}" -h
}
function cleanup {
[ -d "${DATA_DIR}" ] && rm -rf "${DATA_DIR}"
}
trap cleanup EXIT
exit_error() {
print "error: $*"
usage
exit 1
}
print() {
echo "$*" >&2
}
check_requirements() {
${IMG_EXISTS_CMD} "${PAO_IMG}" || ${IMG_PULL_CMD} "${PAO_IMG}" || \
exit_error "Performance Addon Operator image not found"
[ -n "${MG_TARBALL}" ] || exit_error "Must-gather tarball file path is mandatory"
[ -f "${MG_TARBALL}" ] || exit_error "Must-gather tarball file not found"
DATA_DIR=$(mktemp -d -t "${CURRENT_SCRIPT}XXXX") || exit_error "Cannot create the data directory"
tar -zxf "${MG_TARBALL}" --directory "${DATA_DIR}" || exit_error "Cannot decompress the must-gather tarball"
return 0
}
main() {
while getopts ':hp:t:' OPT; do
case "${OPT}" in
h)
usage
exit 0
;;
p)
PAO_IMG="${OPTARG}"
;;
t)
MG_TARBALL="${OPTARG}"
;;
?)
exit_error "invalid argument: ${OPTARG}"
;;
esac
done
shift $((OPTIND - 1))
check_requirements || exit 1
${CMD} -v "${DATA_DIR}:${MUST_GATHER_VOL}:z" "${PAO_IMG}" "$@" -M "${MUST_GATHER_VOL}"
}
main "$@"
|
#!/bin/sh
export ORGANISM=human
export ORGANISM_SHORT=hs
export READ_LENGTH=100
export FILE_BASE=ensembl_rna_hs
export GENE_MODEL=ensembl
export RADIUS=100
export STRAND_SPECIFIC=FALSE
export STRAND_SPECIFIC_DIRECTION=none
export PAIRED_END=TRUE
export GENOME_FASTA_URL=ftp://ftp.ensembl.org/pub/release-76/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz
|
ovs-vsctl add-br br0
ovs-vsctl add-port br0 xvm1 -- set Interface xvm1 type=xiluiovs
ovs-vsctl show
|
#!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
assert_ok "$FLOW" type-at-pos printBinaryExpression.js 15 12 --strip-root
assert_ok "$FLOW" type-at-pos printBinaryExpression.js 17 15 --strip-root
|
#!/bin/bash
namespace=$1
kubectl -n ${namespace} cp how_many_pods.sh client:/how_many_pods.sh
servers=0
while [ $servers -lt 5 ]
do
echo "###########Web Requests#############"
kubectl -n ${namespace} exec -it client /how_many_pods.sh > ./result.txt
echo "50 requests divided on these pods:"
cat ./result.txt
servers=`cat ./result.txt | wc -l`
echo "-----------Pods---------------------"
kubectl -n ${namespace} get pods
echo "-----------Endpoints IPs------------"
kubectl -n ${namespace} describe ep liveandready | grep "Addresses:\|NotReadyAddresses:"
if [ $servers -lt 5 ]
then
sleep 10
fi
done
|
#!/bin/bash
DEPLOY_TO="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../.."
NGINX_DIR="/usr/local/openresty/nginx"
USER=$(whoami)
chown -R $USER:$USER $NGINX_DIR
ln -sf $DEPLOY_TO/ $NGINX_DIR/count-von-count
mkdir -p $NGINX_DIR/conf/include
ln -sf $DEPLOY_TO/config/voncount.nginx.conf $NGINX_DIR/conf/include/voncount.conf
service redis-server start
$DEPLOY_TO/lib/scripts/reload.sh
$NGINX_DIR/sbin/nginx
if ps aux | grep nginx | grep master > /dev/null ; then
echo ">>> nginx is running"
else
echo "ERROR: nginx is not running"
fi
if ps aux | grep redis-server | grep -v 'grep' > /dev/null ; then
echo ">>> redis-server is running"
else
echo "ERROR: redis-server is not running"
fi
|
# cleanup last version
rm -rf __deployme
mkdir __deployme
# build
# sh scripts/build.sh
# minify js
uglifyjs bundle.js -o __deployme/bundle.js
# or copy bundle
# cp bundle.js __deployme/bundle.js
# minify css
cssshrink bundle.css > __deployme/bundle.css
# copy html and images
cp index.html __deployme/index.html
cp -r images/ __deployme/images/
# done
date; echo;
|
#!/usr/bin/env bash
# This script identifies the unit test modules that do not correspond
# directly with a module in the code tree. See TESTING.rst for the
# intended structure.
neutron_path=$(cd "$(dirname "$0")/.." && pwd)
base_test_path=networking_sfc/tests/unit
test_path=$neutron_path/$base_test_path
test_files=$(find ${test_path} -iname 'test_*.py')
ignore_regexes=(
"^plugins.*$"
"^db/test_migrations.py$"
)
error_count=0
ignore_count=0
total_count=0
for test_file in ${test_files[@]}; do
relative_path=${test_file#$test_path/}
expected_path=$(dirname $neutron_path/networking_sfc/$relative_path)
test_filename=$(basename "$test_file")
expected_filename=${test_filename#test_}
# Module filename (e.g. foo/bar.py -> foo/test_bar.py)
filename=$expected_path/$expected_filename
# Package dir (e.g. foo/ -> test_foo.py)
package_dir=${filename%.py}
if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then
for ignore_regex in ${ignore_regexes[@]}; do
if [[ "$relative_path" =~ $ignore_regex ]]; then
((ignore_count++))
continue 2
fi
done
echo "Unexpected test file: $base_test_path/$relative_path"
((error_count++))
fi
((total_count++))
done
if [ "$ignore_count" -ne 0 ]; then
echo "$ignore_count unmatched test modules were ignored"
fi
if [ "$error_count" -eq 0 ]; then
echo 'Success! All test modules match targets in the code tree.'
exit 0
else
echo "Failure! $error_count of $total_count test modules do not match targets in the code tree."
exit 1
fi
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
BCSYMBOLMAP_DIR="BCSymbolMaps"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then
# Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied
find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do
echo "Installing $f"
install_bcsymbolmap "$f" "$destination"
rm "$f"
done
rmdir "${source}/${BCSYMBOLMAP_DIR}"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures from the dSYM.
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 0 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=1
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=0
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/test-app/test_app.framework"
install_framework "${BUILT_PRODUCTS_DIR}/test-extension/test_extension.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/test-app/test_app.framework"
install_framework "${BUILT_PRODUCTS_DIR}/test-extension/test_extension.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
#!/bin/bash
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker_image=$1
data_dir=$2
function show_help() {
echo "Usage: docker_start.sh docker_image data_dir"
}
function param_check() {
if [ -z "${docker_image}" ]; then
echo "please input docker_image"
show_help
exit 1
fi
if [ -z "${data_dir}" ]; then
echo "please input data_dir"
show_help
exit 1
fi
}
param_check
docker run -it \
--device=/dev/davinci0 \
--device=/dev/davinci_manager \
--device=/dev/devmm_svm \
--device=/dev/hisi_hdc \
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
-v ${data_dir}:${data_dir} \
${docker_image} \
/bin/bash
|
#!/bin/bash
#
# BSD LICENSE
#
# Copyright(c) 2017-2018 Huawei Technologies Co., Ltd. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Huawei Technologies Co., Ltd nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
if [ -z "$FPGA_TOOL_DIR" ];
then
echo "FPGA_TOOL INSTALL ERROR: FPGA_TOOL_DIR environment variable is not set. Please 'source fpga_tool_setup.sh' from the fpga directory first."
exit 1
fi
if [ $EUID != 0 ];
then
echo ""
echo "Root privileges are required to install. You may be asked for your password..."
sudo -E "$0" "$@"
exit $?
else
echo "FPGA_TOOL INSTALL MESSAGE: Executing as root..."
fi
BASE_PATH=/usr/local
echo $PATH | grep "$BASE_PATH" &> /dev/null
ret=$?
if [ $ret -ne "0" ];
then
BASE_PATH=/usr
fi
FPGA_TOOL_DIST_DIR=$FPGA_TOOL_DIR/dist
FPGA_TOOL_DST_DIR=$BASE_PATH/bin
if [ ! -d "$FPGA_TOOL_DST_DIR" ];
then
mkdir -p $FPGA_TOOL_DST_DIR
fi
#Copy libfpgamgmt.so to /usr/lib64
cp -f $FPGA_TOOL_DIST_DIR/libfpgamgmt.so /usr/lib64
RET=$?
if [ $RET -ne 0 ];
then
echo "FPGA_TOOL INSTALL ERROR: Copy libfpgamgmt.so to /usr/lib64 failed."
exit 1
fi
echo "FPGA_TOOL INSTALL MESSAGE: Copy libfpgamgmt.so to /usr/lib64 success "
#Set libfpgamgmt.so privilege
chmod 600 /usr/lib64/libfpgamgmt.so
RET=$?
if [ $RET -ne 0 ];
then
echo "FPGA_TOOL INSTALL ERROR: Set the privilege of /usr/lib64/libfpgamgmt.so failed."
exit 1
fi
echo "FPGA_TOOL INSTALL MESSAGE: Set the privilege of /usr/lib64/libfpgamgmt.so success"
#Copy fpga tool to /usr/local/bin or /usr/bin
cp -f $FPGA_TOOL_DIST_DIR/FpgaCmdEntry $FPGA_TOOL_DST_DIR
RET=$?
if [ $RET -ne 0 ];
then
echo "FPGA_TOOL INSTALL ERROR:Copy FpgaCmdEntry to $FPGA_TOOL_DST_DIR failed."
exit 1
fi
echo "FPGA_TOOL INSTALL MESSAGE: Copy FpgaCmdEntry to $FPGA_TOOL_DST_DIR success "
#Set fpga tool privilege
chmod 700 $FPGA_TOOL_DST_DIR/FpgaCmdEntry
RET=$?
if [ $RET -ne 0 ];
then
echo "FPGA_TOOL INSTALL ERROR:Set the privilege of FpgaCmdEntry failed."
exit 1
fi
echo "FPGA_TOOL INSTALL MESSAGE: Set the privilege of $FPGA_TOOL_DST_DIR/FpgaCmdEntry success"
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-ConnectSwift_Tests/Alamofire.framework"
install_framework "Pods-ConnectSwift_Tests/ConnectSwift.framework"
install_framework "Pods-ConnectSwift_Tests/SwiftyJSON.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-ConnectSwift_Tests/Alamofire.framework"
install_framework "Pods-ConnectSwift_Tests/ConnectSwift.framework"
install_framework "Pods-ConnectSwift_Tests/SwiftyJSON.framework"
fi
|
#!/bin/bash
# Run_AMP4_Dhrystone.sh
# Check Environment
if [ -z ${IMPERAS_HOME} ]; then
echo "IMPERAS_HOME not set. Please check environment setup."
exit
fi
${IMPERAS_ISS} --verbose --output imperas.log \
--program ../../../Applications/dhrystone/dhrystone.ARM9E-O0-g.elf \
--processorvendor arm.ovpworld.org --processorname arm --variant ARM1026EJ-S \
--numprocessors 4 \
--parameter compatibility=nopSVC --parameter UAL=1 --parameter endian=little \
"$@" \
-argv 300000
|
#!/bin/bash
trap "" SIGHUP
trap kill_and_exit EXIT
PGID=$(echo $(ps -p $$ o pgid h))
KILL=false
kill_and_exit()
{
local ret=$?
echo Caught EXIT
do_kill -9
exit $?
}
print_ps()
{
ps -eO pgid | grep -E 'PGID|'" $PGID"
}
do_kill()
{
print_ps
PIDS=
for p in $(ps -e h o pid,pgid | grep -E ' +'${PGID}'$' | awk '{print $1}' | grep -v $$); do
if [ -e /proc/$p ]; then
PIDS="$PIDS $p"
fi
done
if [ -n "$PIDS" ]; then
print_ps
echo PID $PGID has died doing kill "$@" $PIDS
kill "$@" $PIDS
fi
}
main()
{
while sleep 2; do
if [ -e /proc/${PGID} ]; then
#print_ps
continue
fi
if [ "$KILL" = "false" ]; then
do_kill
KILL=true
else
do_kill -9
break
fi
done
}
main
|
source /tmp/lib.sh
if [ -n "$(sshd -T | grep -i kexalgorithms | grep -i diffie-hellman-group-exchange-sha1)" ]; then exit $FAIL; fi
if [ -n "$(sshd -T | grep -i kexalgorithms | grep -i diffie-hellman-group1-sha1)" ]; then exit $FAIL; fi
if [ -n "$(sshd -T | grep -i kexalgorithms | grep -i diffie-hellman-group14-sha1)" ]; then exit $FAIL; fi
exit $PASS
|
#!/bin/bash
#SBATCH -J Act_minsin_1
#SBATCH [email protected]
#SBATCH --mail-type=FAIL
#SBATCH -e /work/scratch/se55gyhe/log/output.err.%j
#SBATCH -o /work/scratch/se55gyhe/log/output.out.%j
#SBATCH -n 1 # Number of cores
#SBATCH --mem-per-cpu=6000
#SBATCH -t 23:59:00 # Hours, minutes and seconds, or '#SBATCH -t 10' -only mins
#module load intel python/3.5
python3 /home/se55gyhe/Act_func/sequence_tagging/arg_min/PE-my.py minsin 431 sgd 1 0.5099879732360412 0.01245660089173843 orth 0.3
|
#!/usr/bin/env bash
## datasets
carla_kitti_dataset_moduletest=../datasets/carla_kitti/carla_kitti_sr_lowquality_moduletest
carla_kitti_dataset_overfit=../datasets/carla_kitti/carla_kitti_sr_lowquality_overfit
carla_kitti_dataset=../datasets/carla_kitti/carla_kitti_sr_lowquality/
sceneflow_dataset=../datasets/sceneflow/
kitti2015_dataset=../datasets/kitti/data_scene_flow/training/
kitti2015_testset=../datasets/kitti/data_scene_flow/testing/
kitti2015_sr_dataset=../datasets/kitti/data_scene_flow_sr/training/
kitti2015_dense_dataset=../datasets/kitti/data_scene_flow_dense/training/
kitti2012_dataset=../datasets/kitti/data_stereo_flow/training/
## dir setting
pretrained_dir=logs/pretrained
submission_dir=logs/submission
## pretrained models
pretrained_PSMNet_sceneflow=${pretrained_dir}/PSMNet_pretrained_sceneflow/PSMNet_pretrained_sceneflow.tar
pretrained_PSMNet_kitti2012=${pretrained_dir}/PSMNet_pretrained_model_KITTI2012/PSMNet_pretrained_model_KITTI2012.tar
pretrained_PSMNet_kitti2015=${pretrained_dir}/PSMNet_pretrained_model_KITTI2015/PSMNet_pretrained_model_KITTI2015.tar
pretrained_EDSR_DIV2K=${pretrained_dir}/EDSR_pretrained_DIV2K/EDSR_baseline_x2.pt
## GPU settings
export CUDA_VISIBLE_DEVICES=0,1,2,3
nGPUs=$(( (${#CUDA_VISIBLE_DEVICES} + 1) / 2 ))
pretrained_SR_kitti=${submission_dir}/pretrain_SR_kitti/SR_train/190318163338_SR_loadScale_1_0.5_trainCrop_64_512_batchSize_64_lossWeights_1_kitti2015
pretrained_SRStereo_kitti=${submission_dir}/SRStereo_finetune_kitti/Stereo_train/190319080705_SRStereo_loadScale_1.0_trainCrop_64_512_batchSize_12_lossWeights_-1.0_0.0_1.0_kitti2015
finetuned_SRStereo_kitti=${submission_dir}/SRStereo_finetune_kitti/Stereo_train/190320013526_SRStereo_loadScale_1.0_trainCrop_64_512_batchSize_12_lossWeights_0.5_0.0_0.5_kitti2015
finetuned_SRdispStereoRefine_carla=${submission_dir}/SRdispStereoRefine_SRStereo_compare_carla/Stereo_train/190313215524_SRdispStereoRefine_loadScale_1.0_0.5_trainCrop_128_1024_batchSize_12_lossWeights_0.5_0.4_0.1_carla_kitti
pretrained_SRdisp_kitti=${submission_dir}/pretrain_SRdisp_kitti/SR_train/
## prepare: pretrain_SR_kitti (DONE: 190318163338)
## finetune SR on kitti2015
#PYTHONPATH=./ python train/SR_train.py --model SR --outputFolder submission/pretrain_SR_kitti --datapath $kitti2015_dataset --dataset kitti2015 --trainCrop 64 512 --epochs 6000 --save_every 300 --log_every 50 --batchsize_train 64 --lr 0.0001 --loadmodel $pretrained_EDSR_DIV2K --half --subtype subFinal
## SRStereo_finetune_kitti (DOING)
## finetune SRStereo initialized with PSMNet pretrained with KITTI and SR finetuned with KITTI without updating SR (DONE: 190319080705)
#PYTHONPATH=./ python train/Stereo_train.py --model SRStereo --dispscale 2 --outputFolder submission/SRStereo_finetune_kitti --datapath $kitti2015_dataset --dataset kitti2015 --load_scale 1 --trainCrop 64 512 --epochs 1200 --save_every 300 --log_every 50 --batchsize_train 12 --lr 0.001 300 0.0005 450 0.0002 600 0.0001 --lossWeights -1 0 1 --loadmodel $pretrained_SR_kitti $pretrained_PSMNet_kitti2015 --half --subtype subFinal
## finetune SRStereo initialized with prefinetuned SRStereo with updating SR (DONE: 190320013526)
#PYTHONPATH=./ python train/Stereo_train.py --model SRStereo --dispscale 2 --outputFolder submission/SRStereo_finetune_kitti --datapath $kitti2015_dataset --dataset kitti2015 --load_scale 1 --trainCrop 64 512 --epochs 300 --save_every 50 --log_every 50 --batchsize_train 12 --lr 0.0001 --lossWeights 0.5 0 0.5 --loadmodel $pretrained_SRStereo_kitti --half --subtype subFinal
## submission
#PYTHONPATH=./ python submission/Stereo_sub.py --model SRStereo --dispscale 2 --datapath $kitti2015_testset --dataset kitti2015 --loadmodel $finetuned_SRStereo_kitti --load_scale 1 --subtype subTest --half
## prepare: pretrain_SRdisp_kitti (TODO)
## generate GTs of SR and dense disparity map with finetuned SRStereo
#PYTHONPATH=./ python submission/SR_sub.py --datapath $kitti2015_dataset --dataset kitti2015 --loadmodel $finetuned_SRStereo_kitti --load_scale 2 1 --subtype subTrainEval --half
#PYTHONPATH=./ python submission/Stereo_sub.py --model SRStereo --dispscale 2 --datapath $kitti2015_dataset --dataset kitti2015 --loadmodel $finetuned_SRStereo_kitti --load_scale 1 --subtype subTrainEval --half
## finetune SRdisp on kitti2015_dense: compare different initialization checkpoints (SERVER 135)
#PYTHONPATH=./ python train/SR_train.py --model SRdisp --outputFolder submission/pretrain_SRdisp_kitti --datapath $kitti2015_dense_dataset --dataset kitti2015_dense --trainCrop 64 2040 --epochs 1500 --save_every 300 --log_every 50 --batchsize_train 16 --lr 0.0005 300 0.0002 500 0.0001 700 0.00005 900 0.00002 1100 0.00001 --loadmodel $finetuned_SRdispStereoRefine_carla --half --subtype subFinal
#
#
## fintune SRdispStereoRefine with updating SRdisp
#PYTHONPATH=./ python train/Stereo_train.py --model SRdispStereoRefine --dispscale 2 --outputFolder submission/SRdispStereoRefine_finetune_kitti --datapath $kitti2015_dataset --dataset kitti2015 --load_scale 1 --trainCrop 64 512 --epochs 300 --save_every 50 --log_every 50 --itRefine 2 --batchsize_train 12 --lr 0.0001 --lossWeights 0.5 0 0.5 --loadmodel $pretrained_SRdisp_kitti $finetuned_SRStereo_kitti --half --subtype subFinal
|
apt-get -y install aptitude git
aptitude update
aptitude install -y libmicrohttpd-dev libjansson-dev libnice-dev libssl-dev libsofia-sip-ua-dev libglib2.0-dev libopus-dev libogg-dev libcurl4-openssl-dev pkg-config gengetopt libtool automake libcurl4-openssl-dev apache2
cd /usr/local/src
wget https://github.com/cisco/libsrtp/archive/v2.0.0.tar.gz
tar xfv v2.0.0.tar.gz
cd libsrtp-2.0.0
./configure --prefix=/usr --enable-openssl
make shared_library && make install
cd ..
git clone https://github.com/meetecho/janus-gateway.git
cd janus-gateway
sh autogen.sh
./configure --prefix=/opt/janus
make
make install
make configs
cp -a /usr/local/src/janus-gateway/html/. /var/www/html/
|
#!/bin/bash
set -eu
# Delete Kube-OVN components
kubectl delete -f https://raw.githubusercontent.com/alauda/kube-ovn/master/yamls/kube-ovn.yaml --ignore-not-found=true
kubectl delete -f https://raw.githubusercontent.com/alauda/kube-ovn/master/yamls/ovn.yaml --ignore-not-found=true
kubectl delete -f https://raw.githubusercontent.com/alauda/kube-ovn/master/yamls/crd.yaml --ignore-not-found=true
# Remove annotations in all pods of all namespaces
for ns in $(kubectl get ns -o name |cut -c 11-); do
echo "annotating pods in ns:$ns"
kubectl annotate pod --all ovn.kubernetes.io/cidr- -n "$ns"
kubectl annotate pod --all ovn.kubernetes.io/gateway- -n "$ns"
kubectl annotate pod --all ovn.kubernetes.io/ip_address- -n "$ns"
kubectl annotate pod --all ovn.kubernetes.io/logical_switch- -n "$ns"
kubectl annotate pod --all ovn.kubernetes.io/mac_address- -n "$ns"
kubectl annotate pod --all ovn.kubernetes.io/port_name- -n "$ns"
done
# Remove annotations in namespaces and nodes
kubectl annotate no --all ovn.kubernetes.io/cidr-
kubectl annotate no --all ovn.kubernetes.io/gateway-
kubectl annotate no --all ovn.kubernetes.io/ip_address-
kubectl annotate no --all ovn.kubernetes.io/logical_switch-
kubectl annotate no --all ovn.kubernetes.io/mac_address-
kubectl annotate no --all ovn.kubernetes.io/port_name-
kubectl annotate ns --all ovn.kubernetes.io/cidr-
kubectl annotate ns --all ovn.kubernetes.io/exclude_ips-
kubectl annotate ns --all ovn.kubernetes.io/gateway-
kubectl annotate ns --all ovn.kubernetes.io/logical_switch-
kubectl annotate ns --all ovn.kubernetes.io/private-
kubectl annotate ns --all ovn.kubernetes.io/allow-
|
#!/bin/bash
sort -r -n -k 5.1,5.5 emp2 #sort numerically by field 5 character 1 to field 5 character 5, in reverse order
|
#!/bin/bash
# ==============================================================================
# Copyright 2019 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export LC_CTYPE=en_US.UTF-8
if [ ! -d log ]; then
mkdir log
else
rm -r log/*
fi
if [ ! -d output ]; then
mkdir output
else
rm -r output/*
fi
export FLAGS_cudnn_deterministic=true
export FLAGS_cpu_deterministic=true
PWD_DIR=`pwd`
DATA=../data/
BERT_DIR=cased_L-24_H-1024_A-16
CPT_EMBEDDING_PATH=../retrieve_concepts/KB_embeddings/nell_concept2vec.txt
python3 src/run_record.py \
--batch_size 6 \
--do_train true \
--do_predict true \
--do_lower_case false \
--init_pretraining_params $BERT_DIR/params \
--train_file $DATA/ReCoRD/train.json \
--predict_file $DATA/ReCoRD/dev.json \
--vocab_path $BERT_DIR/vocab.txt \
--bert_config_path $BERT_DIR/bert_config.json \
--freeze false \
--save_steps 4000 \
--weight_decay 0.01 \
--warmup_proportion 0.1 \
--learning_rate 3e-5 \
--epoch 4 \
--max_seq_len 384 \
--doc_stride 128 \
--concept_embedding_path $CPT_EMBEDDING_PATH \
--use_nell true \
--random_seed 45 \
--checkpoints output/ 1>$PWD_DIR/log/train.log 2>&1
|
#!/usr/bin/env bash
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2020-05-29 12:35:16 +0100 (Fri, 29 May 2020)
#
# https://github.com/HariSekhon/DevOps-Python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/HariSekhon
#
set -euo pipefail
[ -n "${DEBUG:-}" ] && set -x
srcdir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck disable=SC1090
. "$srcdir/lib.sh"
# statements should be named in format: Barclays_Statement_YYYY-MM-DD.csv
export STATEMENT_GLOB="Barclays_Statement_[[:digit:]][[:digit:]][[:digit:]][[:digit:]]-[[:digit:]][[:digit:]]-[[:digit:]][[:digit:]].csv"
# Barclays CSV statements often have whitespace starting fields instead of blank or 'null'
# unfortunately this resets all the original CSV timestamps each run so it's best to do only where needed
# UPDATE: no longer necessary, converter just ignores these blank fields now in the validation
#for statement in $STATEMENT_GLOB; do
#perl -pi -e 's/^\s+,/,/' "$statement"
#done
generate_crunch_statements --reverse-order
|
#!/bin/bash
# Activate environment
source /home/nbuser/anaconda3_420/bin/activate
# Install packages
conda update -c conda-forge conda conda-build
conda install -y -c conda-forge folium=0.9.1 jinja2=2.10* xlrd networkx=2.3* missingno=0.4* bokeh pandas=0.25*
pip install --upgrade pip
pip install pyviz plotly==4.0.0
source /home/nbuser/anaconda3_420/bin/deactivate
|
#!/bin/bash
# Default values for testing files
relative_path_test_directory=$'test/auto/'
input_file_suffix=$'-input.bc'
output_file_suffix=$'-output.txt'
# The will output if the files failed or passed
diffFiles() {
if diff <(java -cp target/SimpleBC-1.0-jar-with-dependencies.jar com.codecaptured.SimpleBC.SimpleBC $1) $2 > /dev/null
then
echo "Pass: " $1
else
echo "Fail: " $1
fi
}
# Go through all the files in the test directory that match the input suffix
for file in $relative_path_test_directory*$input_file_suffix
do
# Compare the input files output with the output file
diffFiles $file ${file%$input_file_suffix}$output_file_suffix
done
|
# This file downloads the IWSLT15 English-Vietnamese dataset.
NMT_ROOT=$(cd $(dirname $0) && pwd)/..
cd $NMT_ROOT/dataset; wget https://www.cs.toronto.edu/~bojian/Downloads/iwslt15_en-vi.tar.gz; \
tar xvzf iwslt15_en-vi.tar.gz; rm -f iwslt15_en-vi.tar.gz
|
kubectl delete pod $POD_NAME &
|
#!/usr/bin/env bash
#
# Customize an existing OS image using systemd-nspawn (from systemd-container package).
#
# For other architecture support (e.g. arm) make sure binfmt-support and qemu-user-static
# packages are installed, e.g.
#
# apt install systemd-container qemu binfmt-support qemu-user-static
#
# This script relies on tools available in the bc, coreutils, and rsync packages, e.g.
#
# apt install bc rsync
declare -A FS_OPTS
FS_OPTS[btrfs]="-q -m dup"
FS_OPTS[ext4]="-q -m 2 -O ^64bit,^metadata_csum"
FS_OPTS[vfat]=""
declare -A MNT_OPTS
MNT_OPTS[btrfs]="defaults,noatime,nodiratime,commit=600,compress-force=zstd"
MNT_OPTS[ext4]="defaults,commit=600"
MNT_OPTS[vfat]="defaults"
declare -A DEST_MNT_OPTS
DEST_MNT_OPTS[btrfs]="defaults,noatime,nodiratime,compress=zstd"
DEST_MNT_OPTS[ext4]="defaults,noatim"
DEST_MNT_OPTS[vfat]="defaults"
SRC_BOOT_LABEL="SOLARBOOT"
SRC_BOOT_PARTNUM=""
SRC_ROOT_LABEL="SOLARNODE"
SRC_ROOT_PARTNUM=""
DEST_ROOT_FSTYPE=""
BOOT_DEV_LABEL="${BOOT_DEV_LABEL:-SOLARBOOT}"
BOOT_DEV_MOUNT="${BOOT_DEV_MOUNT:-/boot}"
ROOT_DEV_LABEL="${ROOT_DEV_LABEL:-SOLARNODE}"
CLEAN_IMAGE=""
COMPRESS_DEST_IMAGE=""
COMPRESS_DEST_OPTS="-8 -T 0"
DEST_PATH=""
EXPAND_SOLARNODE_FS=""
INTERACTIVE_MODE=""
KEEP_SSH=""
SCRIPT_ARGS=""
SHRINK_SOLARNODE_FS=""
SRC_IMG=""
VERBOSE=""
ERR=""
do_help () {
cat 1>&2 <<EOF
Usage: $0 <arguments> src script [bind-mounts]
-a <args> - extra argumnets to pass to the script
-c - clean out log files, temp files, SSH host keys from final image
-E <size MB> - shrink the SOLARNODE partition by this amount, in MB
-e <size MB> - expand the SOLARNODE partition by this amount, in MB
-i - interactive mode; run without script
-N <boot part #> - the source image boot partition number, instead of using label
-n <root part #> - the source image root partition number, instead of using label
-P <boot label> - the source image boot partition label; defaults to SOLARBOOT
-p <root label> - the source image root partition label; defaults to SOLARNODE
-M <boot mount> - the boot partition mount directory; defaults to /boot
-o <out name> - the output name for the final image
-r <fstype> - force a specific root filesystem type in the destination image
-S - if -c set, keep SSH host keys
-v - increase verbosity of tasks
-Z <options> - xz options to use on final image; defaults to '-8 -T 0'
-z - compress final image with xz
The bind-mounts argument must adhere to the systemd-nspawn --bind-ro syntax,
that is something like 'src:mount'. Multiple mounts should be separarted by
commas. This mounts will then be available to the customization script.
Example that mounts /home/me as /var/tmp/me in the chroot:
./customize.sh solarnodeos-20200820.img my-cust.sh /home/me:/var/tmp/me
To expand the root filesystem by 500 MB:
./customize.sh -e 500 solarnodeos-20200820.img my-cust.sh
To interactively customize the image (my-cust.sh is not run, but copied into image):
./customize.sh -i solarnodeos-20200820.img my-cust.sh
EOF
}
while getopts ":a:cE:e:io:M:N:n:P:p:r:SvZ:z" opt; do
case $opt in
a) SCRIPT_ARGS="${OPTARG}";;
c) CLEAN_IMAGE="TRUE";;
E) SHRINK_SOLARNODE_FS="${OPTARG}";;
e) EXPAND_SOLARNODE_FS="${OPTARG}";;
i) INTERACTIVE_MODE="TRUE";;
o) DEST_PATH="${OPTARG}";;
M) BOOT_DEV_MOUNT="${OPTARG}";;
N) SRC_BOOT_PARTNUM="${OPTARG}";;
n) SRC_ROOT_PARTNUM="${OPTARG}";;
P) SRC_BOOT_LABEL="${OPTARG}";;
p) SRC_ROOT_LABEL="${OPTARG}";;
r) DEST_ROOT_FSTYPE="${OPTARG}";;
S) KEEP_SSH="TRUE";;
v) VERBOSE="TRUE";;
Z) COMPRESS_DEST_OPTS="${OPTARG}";;
z) COMPRESS_DEST_IMAGE="TRUE";;
*)
echo "Unknown argument ${OPTARG}"
do_help
exit 1
esac
done
shift $(($OPTIND - 1))
if [ $(id -u) -ne 0 ]; then
echo "This script must be run as root."
exit 1
fi
if ! command -v bc >/dev/null; then
echo 'Error: bc is not available. Perhaps `apt install bc`?'
exit 1
fi
if ! command -v sfdisk >/dev/null; then
echo 'Error: sfdisk is not available. Perhaps `apt install util-linux`?'
exit 1
fi
IMG="$1"
if [ -z "$IMG" ]; then
echo 'Must specify source image as argument.'
exit 1
fi
if [ ! -e "$IMG" ]; then
echo "Error: source image '$IMG' not available."
exit 1
fi
SCRIPT="$2"
if [ -z "$SCRIPT" ]; then
echo 'Must specify script as argument.'
exit 1
fi
if [ ! -e "$SCRIPT" ]; then
echo "Error: script '$SCRIPT' not available."
exit 1
fi
BIND_MOUNTS="$3"
FSTYPE_SOLARNODE=""
FSTYPE_SOLARBOOT=""
LOOPDEV=""
SOLARBOOT_PART=""
SOLARNODE_PART=""
SRC_IMG=$(mktemp -t img-XXXXX)
SRC_MOUNT=$(mktemp -d -t sn-XXXXX)
SCRIPT_DIR=""
copy_src_img () {
if [ -n "$VERBOSE" ]; then
echo "Creating source image copy $SRC_IMG"
fi
if [ "${IMG##*.}" = "xz" ]; then
if ! xzcat ${VERBOSE//TRUE/-v} "$IMG" >"$SRC_IMG"; then
echo "Error extracting $IMG to $SRC_IMG"
exit 1
fi
elif ! cp ${VERBOSE//TRUE/-v} "$IMG" "$SRC_IMG"; then
echo "Error: unable to copy $IMG to $SRC_IMG"
exit 1
fi
if [ -n "$EXPAND_SOLARNODE_FS" ]; then
if ! truncate -s +${EXPAND_SOLARNODE_FS}M "$SRC_IMG"; then
echo "Error: unable to expand $SRC_IMG by ${EXPAND_SOLARNODE_FS}MB."
elif [ -n "$VERBOSE" ]; then
echo "Expanded $SRC_IMG by ${EXPAND_SOLARNODE_FS}MB."
fi
fi
}
clean_src_img () {
rm -f "$SRC_IMG"
if [ -n "$VERBOSE" ]; then
echo "Deleted $SRC_IMG"
fi
}
setup_src_loopdev () {
LOOPDEV=$(losetup -P -f --show $SRC_IMG)
if [ -z "$LOOPDEV" ]; then
echo "Error: loop device not discovered for image $SRC_IMG"
exit 1
elif [ -n "$VERBOSE" ]; then
echo "Created loop device $LOOPDEV for source image."
fi
# seems system needs a little rest before labels are available in lsblk?
sleep 1
if [ -n "$SRC_BOOT_PARTNUM" ]; then
SOLARBOOT_PART=$(lsblk -npo kname $LOOPDEV |tail +$((1+$SRC_BOOT_PARTNUM)) |head -n 1)
else
SOLARBOOT_PART=$(lsblk -npo kname,label $LOOPDEV |grep -i $SRC_BOOT_LABEL |cut -d' ' -f 1)
fi
if [ -z "$SOLARBOOT_PART" ]; then
echo "Error: $SRC_BOOT_LABEL partition not discovered"
exit 1
elif [ -n "$VERBOSE" ]; then
echo "Discovered source $SRC_BOOT_LABEL partition ${SOLARBOOT_PART}."
fi
if [ -n "$SRC_BOOT_PARTNUM" ]; then
SOLARNODE_PART=$(lsblk -npo kname $LOOPDEV |tail +$((1+$SRC_ROOT_PARTNUM)) |head -n 1)
else
SOLARNODE_PART=$(lsblk -npo kname,label $LOOPDEV |grep -i $SRC_ROOT_LABEL |cut -d' ' -f 1)
fi
if [ -z "$SOLARNODE_PART" ]; then
echo "Error: $SRC_ROOT_LABEL partition not discovered"
exit 1
elif [ -n "$VERBOSE" ]; then
echo "Discovered source $SRC_ROOT_LABEL partition ${SOLARNODE_PART}."
fi
if [ -n "$EXPAND_SOLARNODE_FS" ]; then
local part_num=$(sfdisk -ql "$LOOPDEV" -o Device |tail +2 |awk '{print NR,$0}' |grep "$SOLARNODE_PART" |cut -d' ' -f1)
if [ -n "$VERBOSE" ]; then
echo "Expanding partition $part_num on ${LOOPDEV} by $EXPAND_SOLARNODE_FS MB."
fi
echo ",+${EXPAND_SOLARNODE_FS}M" |sfdisk ${LOOPDEV} -N${part_num} --no-reread -q
partx -u ${LOOPDEV}
fi
if ! mount "$SOLARNODE_PART" "$SRC_MOUNT"; then
echo "Error: unable to mount $SOLARNODE_PART on $SRC_MOUNT"
exit 1
elif [ -n "$VERBOSE" ]; then
echo "Mounted source $SRC_ROOT_LABEL filesystem on $SRC_MOUNT."
fi
FSTYPE_SOLARNODE=$(findmnt -f -n -o FSTYPE "$SOLARNODE_PART")
if [ -z "$FSTYPE_SOLARNODE" ]; then
echo "Error: $SRC_ROOT_LABEL filesystem type not discovered."
elif [ -n "$VERBOSE" ]; then
echo "Discovered source $SRC_ROOT_LABEL filesystem type $FSTYPE_SOLARNODE."
fi
if [ -n "$EXPAND_SOLARNODE_FS" ]; then
case $FSTYPE_SOLARNODE in
btrfs) btrfs filesystem resize max "$SRC_MOUNT";;
ext4) resize2fs "$SOLARNODE_PART";;
*) echo "Filesystem expansion for type $FSTYPE_SOLARNODE not supported.";;
esac
fi
if [ ! -d "$SRC_MOUNT$BOOT_DEV_MOUNT" ]; then
if ! mkdir -p "$SRC_MOUNT$BOOT_DEV_MOUNT"; then
echo "Error: unable to create $SRC_MOUNT$BOOT_DEV_MOUNT directory to mount $SOLARBOOT_PART."
exit 1
fi
fi
if ! mount "$SOLARBOOT_PART" "$SRC_MOUNT$BOOT_DEV_MOUNT"; then
echo "Error: unable to mount $SOLARBOOT_PART on $SRC_MOUNT$BOOT_DEV_MOUNT."
exit 1
elif [ -n "$VERBOSE" ]; then
echo "Mounted source $SRC_BOOT_LABEL filesystem on $SRC_MOUNT$BOOT_DEV_MOUNT."
fi
FSTYPE_SOLARBOOT=$(findmnt -f -n -o FSTYPE "$SOLARBOOT_PART")
if [ -z "$FSTYPE_SOLARBOOT" ]; then
echo "Error: $SRC_BOOT_LABEL filesystem type not discovered."
elif [ -n "$VERBOSE" ]; then
echo "Discovered source $SRC_BOOT_LABEL filesystem type $FSTYPE_SOLARBOOT."
fi
}
close_src_loopdev () {
if [ -n "$VERBOSE" ]; then
echo "Unmounting source $SRC_BOOT_LABEL filesystem $SRC_MOUNT$BOOT_DEV_MOUNT."
fi
umount "$SRC_MOUNT$BOOT_DEV_MOUNT"
if [ -n "$VERBOSE" ]; then
echo "Unmounting source $SRC_ROOT_LABEL filesystem $SRC_MOUNT."
fi
umount "$SRC_MOUNT"
if [ -n "$VERBOSE" ]; then
echo "Closing source image loop device $LOOPDEV."
fi
losetup -d "$LOOPDEV"
rmdir "$SRC_MOUNT"
}
disable_ld_preload () {
if [ -e "$SRC_MOUNT/etc/ld.so.preload" ]; then
echo -n "Disabling preload shared libs from $SRC_MOUNT/etc/ld.so.preload... "
sed -i 's/^/#/' "$SRC_MOUNT/etc/ld.so.preload"
echo 'OK'
fi
}
enable_ld_preload () {
if [ -e "$SRC_MOUNT/etc/ld.so.preload" ]; then
echo -n "Enabling preload shared libs in $SRC_MOUNT/etc/ld.so.preload... "
sed -i 's/^#//' "$SRC_MOUNT/etc/ld.so.preload"
echo 'OK'
fi
}
setup_mounts () {
# be sure to work with UUID= and PARTUUID= and LABEL= forms; also, work with /boot and /boot/firmware
if grep 'UUID=[^ ]* */boot' $SRC_MOUNT/etc/fstab >/dev/null 2>&1; then
echo -n "Changing /boot mount in $SRC_MOUNT/etc/fstab to use label $BOOT_DEV_LABEL... "
sed -i 's/^.*UUID=[^ ]* *\/boot/LABEL='"$BOOT_DEV_LABEL"' \/boot/' $SRC_MOUNT/etc/fstab \
&& echo "OK" || echo "ERROR"
elif grep 'LABEL=[^ ]* */boot' $SRC_MOUNT/etc/fstab >/dev/null 2>&1; then
if ! grep 'LABEL='"$BOOT_DEV_LABEL" $SRC_MOUNT/etc/fstab >/dev/null 2>&1; then
echo -n "Changing /boot mount in $SRC_MOUNT/etc/fstab to use label $BOOT_DEV_LABEL... "
sed -i 's/^.*LABEL=[^ ]* *\/boot/LABEL='"$BOOT_DEV_LABEL"' \/boot/' $SRC_MOUNT/etc/fstab \
&& echo "OK" || echo "ERROR"
fi
fi
if grep 'UUID=[^ ]* */ ' $SRC_MOUNT/etc/fstab >/dev/null 2>&1; then
echo -n "Changing / mount in $SRC_MOUNT/etc/fstab to use label $ROOT_DEV_LABEL... "
sed -i 's/^.*UUID=[^ ]* *\/ /LABEL='"$ROOT_DEV_LABEL"' \/ /' $SRC_MOUNT/etc/fstab \
&& echo "OK" || echo "ERROR"
elif grep 'LABEL=[^ ]* */ ' $SRC_MOUNT/etc/fstab >/dev/null 2>&1; then
if ! grep 'LABEL='"$ROOT_DEV_LABEL" $SRC_MOUNT/etc/fstab >/dev/null 2>&1; then
echo -n "Changing / mount in $SRC_MOUNT/etc/fstab to use label $ROOT_DEV_LABEL... "
sed -i 's/^.*LABEL=[^ ]* *\/ /LABEL='"$ROOT_DEV_LABEL"' \/ /' $SRC_MOUNT/etc/fstab \
&& echo "OK" || echo "ERROR"
fi
fi
if ! grep '^tmpfs /run ' $SRC_MOUNT/etc/fstab >/dev/null 2>&1; then
echo -n "Adding /run mount in $SRC_MOUNT/etc/fstab with explicit size... "
echo 'tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=50%,mode=755 0 0' >>$SRC_MOUNT/etc/fstab \
&& echo "OK" || echo "ERROR"
fi
}
setup_chroot () {
disable_ld_preload
setup_mounts
if [ -L "$SRC_MOUNT/etc/resolv.conf" -o -e "$SRC_MOUNT/etc/resolv.conf" ]; then
if ! mv "$SRC_MOUNT/etc/resolv.conf" "$SRC_MOUNT/etc/resolv.conf.sn-cust-bak"; then
echo "Error: unable to rename $SRC_MOUNT/etc/resolv.conf."
exit 1
fi
fi
echo 'nameserver 1.1.1.1' >"$SRC_MOUNT/etc/resolv.conf"
SCRIPT_DIR=$(mktemp -d -t sn-XXXXX -p "$SRC_MOUNT/var/tmp")
if [ -n "$VERBOSE" ]; then
echo "Created script directory $SCRIPT_DIR."
fi
if ! cp -a ${VERBOSE//TRUE/-v} "$SCRIPT" "$SCRIPT_DIR/customize"; then
echo "Error: unable to copy $SCRIPT to $SCRIPT_DIR"
exit 1
fi
chmod ugo+x "$SCRIPT_DIR/customize"
}
clean_chroot_fluff () {
if [ -n "$VERBOSE" ]; then
echo "Finding archive logs to delete..."
find "$SRC_MOUNT/var/log" -type f \( -name '*.gz' -o -name '*.1' \) -print
fi
find "$SRC_MOUNT/var/log" -type f \( -name '*.gz' -o -name '*.1' \) -delete
if [ -n "$VERBOSE" ]; then
echo "Finding archive logs to truncate..."
find "$SRC_MOUNT/var/log" -type f -size +0c -print
fi
find "$SRC_MOUNT/var/log" -type f -size +0c -exec sh -c '> {}' \;
if [ -n "$VERBOSE" ]; then
echo "Finding apt cache files to delete..."
find "$SRC_MOUNT/var/cache/apt" -type f -name '*.bin' -print
fi
find "$SRC_MOUNT/var/cache/apt" -type f -name '*.bin' -delete
if [ -e "$SRC_MOUNT/var/tmp" ]; then
if [ -n "$VERBOSE" ]; then
echo "Deleting temporary files from /var/tmp..."
find "$SRC_MOUNT/var/tmp" -type f -print
fi
find "$SRC_MOUNT/var/tmp" -type f -delete
fi
if [ -n "$VERBOSE" ]; then
echo "Finding localized man files to delete..."
find "$SRC_MOUNT/usr/share/man" -maxdepth 1 -type d \( -name '??' -o -name '??_*' -o -name '??.*' \) -print
fi
find "$SRC_MOUNT/usr/share/man" -maxdepth 1 -type d \( -name '??' -o -name '??_*' -o -name '??.*' \) \
-exec rm -rf {} \;
if [ -s "$SRC_MOUNT/etc/machine-id" ]; then
if [ -n "$VERBOSE" ]; then
echo "Truncating /etc/machine-id"
fi
sh -c ">$SRC_MOUNT/etc/machine-id"
fi
if [ -e "$SRC_MOUNT/var/lib/dbus/machine-id" ]; then
if [ -n "$VERBOSE" ]; then
echo "Deleting /var/lib/dbus/machine-id"
fi
rm -f "$SRC_MOUNT/var/lib/dbus/machine-id"
fi
if [ -n "$KEEP_SSH" ]; then
if [ -n "$VERBOSE" ]; then
echo "Preserving SSH host keys."
fi
else
if [ -n "$VERBOSE" ]; then
echo "Deleting SSH host keys..."
find "$SRC_MOUNT/etc/ssh" -type f -name 'ssh_host_*' -print
fi
find "$SRC_MOUNT/etc/ssh" -type f -name 'ssh_host_*' -delete
fi
}
clean_chroot () {
if [ -L "$SRC_MOUNT/etc/resolv.conf.sn-cust-bak" ]; then
if [ -n "$VERBOSE" ]; then
echo "Restoring original $SRC_MOUNT/etc/resolv.conf"
fi
rm -f "$SRC_MOUNT/etc/resolv.conf"
mv "$SRC_MOUNT/etc/resolv.conf.sn-cust-bak" "$SRC_MOUNT/etc/resolv.conf"
fi
if [ -d "$SCRIPT_DIR" ]; then
rm -rf ${VERBOSE//TRUE/-v} "$SCRIPT_DIR"
fi
enable_ld_preload
if [ -n "$CLEAN_IMAGE" ]; then
clean_chroot_fluff
fi
}
execute_chroot () {
local binds="$1"
if [ -n "$binds" ]; then
if [ -n "$VERBOSE" ]; then
echo "Binding container dir $binds"
fi
binds="--bind=$binds"
fi
if [ -n "$INTERACTIVE_MODE" ]; then
if ! systemd-nspawn -M solarnode-cust -D "$SRC_MOUNT" \
--chdir=${SCRIPT_DIR##${SRC_MOUNT}} \
${binds}; then
ERR="Error running setup script in container."
echo "!!!"
echo "!!! Error with interactive setup in container!"
echo "!!!"
fi
elif ! systemd-nspawn -M solarnode-cust -D "$SRC_MOUNT" \
--chdir=${SCRIPT_DIR##${SRC_MOUNT}} \
${binds} \
./customize \
${VERBOSE//TRUE/-v} \
${SCRIPT_ARGS}; then
ERR="Error running setup script in container."
echo "!!!"
echo "!!! Error running setup script in container!"
echo "!!!"
fi
}
copy_bootloader () {
local dev="$1"
# note: following assumes MBR, with first 440 bytes the boot loader
local start_len="440"
local bl_offset="1"
local bl_len=$(echo "$(sfdisk -ql $dev -o Start |tail +2 |head -1) - $bl_offset" |bc)
if ! dd status=none if=$SRC_IMG of=$dev bs=$start_len count=1; then
echo "Error: problem copying MBR bootloader from $SRC_IMG to $dev."
elif [ -n "$VERBOSE" ]; then
echo "Copied $start_len bootloader bytes from $SRC_IMG to $dev."
fi
if ! dd status=none if=$SRC_IMG of=$dev bs=512 skip=$bl_offset seek=$bl_offset count=$bl_len; then
echo "Error: problem copying bootloader from $SRC_IMG to $dev."
elif [ -n "$VERBOSE" ]; then
echo "Copied ${bl_len} sectors starting from $bl_offset for bootloader from $SRC_IMG to $dev."
fi
}
LAST_PARTUUID=""
copy_part () {
local part="$1"
local fstype="$2"
local label="$3"
local src="$4"
if [ -n "$VERBOSE" ]; then
echo "Creating $part $fstype filesystem with options ${FS_OPTS[$fstype]}."
fi
if ! mkfs.$fstype ${FS_OPTS[$fstype]} "$part"; then
echo "Error: failed to create $part $fstype filesystem."
exit 1
fi
local tmp_mount=$(mktemp -d -t sn-XXXXX)
if [ -n "$VERBOSE" ]; then
echo "Mounting $part on $tmp_mount with options ${MNT_OPTS[$fstype]}."
fi
if ! mount -o ${MNT_OPTS[$fstype]} "$part" "$tmp_mount"; then
echo "Error: failed to mount $part on $tmp_mount."
exit 1
fi
LAST_PARTUUID=$(blkid -o export "$part" |grep PARTUUID |cut -d= -f2)
if [ -n "$VERBOSE" ]; then
echo "$part PARTUUID = $LAST_PARTUUID"
echo "Labling $part as $label"
fi
case $fstype in
btrfs) btrfs filesystem label "$tmp_mount" "$label";;
ext*) e2label "$part" "$label";;
vfat) fatlabel "$part" "$label";;
esac
if [ -n "$VERBOSE" ]; then
echo "Copying files from $src to $tmp_mount..."
fi
rsync -aHWXhx ${VERBOSE//TRUE/--info=progress2,stats1} "$src"/ "$tmp_mount"/
umount "$tmp_mount"
rmdir "$tmp_mount"
}
setup_boot_cmdline () {
local part="$1"
local fstype="$2"
local rootpartuuid="$3"
local tmp_mount=$(mktemp -d -t sn-XXXXX)
if [ -n "$VERBOSE" ]; then
echo "Mounting $part on $tmp_mount with options ${MNT_OPTS[$fstype]}."
fi
if ! mount -o ${MNT_OPTS[$fstype]} "$part" "$tmp_mount"; then
echo "Error: failed to mount $part on $tmp_mount."
exit 1
fi
if [ -e "$tmp_mount/cmdline.txt" ]; then
if grep ' root=' "$tmp_mount/cmdline.txt" >/dev/null 2>&1; then
echo -n "Changing root to PARTUUID=$rootpartuuid in $tmp_mount/cmdline.txt... "
sed -i 's/root=[^ ]*/root=PARTUUID='"$rootpartuuid"'/' $tmp_mount/cmdline.txt \
&& echo "OK" || echo "ERROR"
fi
if [ -n "$DEST_ROOT_FSTYPE" ]; then
if grep ' rootfstype=' "$tmp_mount/cmdline.txt" >/dev/null 2>&1; then
echo -n "Changing rootfstype to $DEST_ROOT_FSTYPE in $tmp_mount/cmdline.txt... "
sed -i 's/rootfstype=[^ ]*/rootfstype='"$DEST_ROOT_FSTYPE"'/' $tmp_mount/cmdline.txt \
&& echo "OK" || echo "ERROR"
fi
fi
if grep ' init=' "$tmp_mount/cmdline.txt" >/dev/null 2>&1; then
echo -n "Removing init from $tmp_mount/cmdline.txt... "
sed -i 's/ init=[^ ]*//' $tmp_mount/cmdline.txt \
&& echo "OK" || echo "ERROR"
fi
if ! grep ' fsck.repair=' "$tmp_mount/cmdline.txt" >/dev/null 2>&1; then
echo -n "Adding fsck.repair=yes to $tmp_mount/cmdline.txt... "
sed -i '1s/$/ fsck.repair/' $tmp_mount/cmdline.txt \
&& echo "OK" || echo "ERROR"
fi
fi
umount "$tmp_mount"
rmdir "$tmp_mount"
}
copy_img () {
local size=$(wc -c <"$SRC_IMG")
local size_mb=$(echo "$size / 1024 / 1024" |bc)
local size_sector=""
local size_sector_in=""
if [ -n "$SHRINK_SOLARNODE_FS" ]; then
size_mb=$(echo "$size_mb - $SHRINK_SOLARNODE_FS" |bc)
if [ -n "$VERBOSE" ]; then
echo "Shrinking output image by $SHRINK_SOLARNODE_FS MB."
fi
local part_num=$(sfdisk -ql "$LOOPDEV" -o Device |tail +2 |awk '{print NR,$0}' \
|grep "${LOOPDEV}${SOLARNODE_PART##$LOOPDEV}" |cut -d' ' -f1)
size_sector_in=$(sfdisk -ql "$LOOPDEV" -o Sectors |tail +$((1 + $part_num)) |head -1)
size_sector=$(echo "$size_sector_in - $SHRINK_SOLARNODE_FS * 1024 * 1024 / 512" |bc)
fi
local out_img=$(mktemp -t img-XXXXX)
if [ -n "$VERBOSE" ]; then
echo "Creating ${size_mb}MB output image $out_img."
fi
if ! dd if=/dev/zero of="$out_img" bs=1M count=$size_mb status=none; then
echo "Error creating ${size_mb}MB output image $out_img."
exit 1
fi
chmod 644 "$out_img"
local out_loopdev=$(losetup -P -f --show $out_img)
if [ -n "$VERBOSE" ]; then
echo "Opened output image loop device $out_loopdev."
fi
if [ -n "$size_sector" ]; then
if ! sfdisk -q -d "$LOOPDEV" |sed -e "s/size=.*$size_sector_in/size=$size_sector/" |sfdisk -q "$out_loopdev"; then
echo "Error copying partition table from $LOOPDEV to $outdev, shrunk from $size_sector_in to $size_sector sectors."
exit 1
fi
elif ! sfdisk -q -d "$LOOPDEV" |sfdisk -q "$out_loopdev"; then
echo "Error copying partition table from $LOOPDEV to $outdev."
exit 1
fi
copy_bootloader "$out_loopdev"
copy_part "${out_loopdev}${SOLARBOOT_PART##$LOOPDEV}" "$FSTYPE_SOLARBOOT" "SOLARBOOT" "$SRC_MOUNT$BOOT_DEV_MOUNT"
copy_part "${out_loopdev}${SOLARNODE_PART##$LOOPDEV}" "${DEST_ROOT_FSTYPE:-${FSTYPE_SOLARNODE}}" "SOLARNODE" "$SRC_MOUNT"
setup_boot_cmdline "$out_loopdev${SOLARBOOT_PART##$LOOPDEV}" "$FSTYPE_SOLARBOOT" "$LAST_PARTUUID"
if [ -n "$VERBOSE" ]; then
echo "Closing output image loop device $out_loopdev."
fi
losetup -d "$out_loopdev"
close_src_loopdev
if [ -n "$VERBOSE" ]; then
echo "Customized image complete: $out_img"
fi
if [ -n "$DEST_PATH" ]; then
mv "$out_img" "$DEST_PATH"
out_img="$DEST_PATH"
fi
out_path=$(dirname $(readlink -f "$out_img"))
out_name=$(basename "${out_img%%.*}")
# cd into out_path so checksums don't contain paths
pushd "$out_path"
if [ -n "$VERBOSE" ]; then
echo "Checksumming image as ${out_path}/${out_name}.img.sha256..."
fi
sha256sum $(basename $out_img) >"${out_name}.img.sha256"
if [ -n "$COMPRESS_DEST_IMAGE" ]; then
if [ -n "$VERBOSE" ]; then
echo "Compressing image as ${out_path}/${out_name}.img.xz..."
fi
xz -cv ${COMPRESS_DEST_OPTS} "$out_img" >"${out_name}.img.xz"
if [ -n "$VERBOSE" ]; then
echo "Checksumming compressed image as ${out_name}.img.xz.sha256..."
fi
sha256sum "${out_name}.img.xz" >"${out_name}.img.xz.sha256"
fi
popd
}
copy_src_img
setup_src_loopdev
setup_chroot
execute_chroot "$BIND_MOUNTS"
clean_chroot
if [ -z "$ERR" ]; then
copy_img
else
close_src_loopdev
fi
clean_src_img
if [ -z "$ERR" -a -n "$DEST_PATH" ]; then
echo "Customized image saved to $DEST_PATH"
fi
|
#!/usr/bin/env -S bash --norc --noprofile
#shellcheck shell=bash disable=SC1008,SC2096
#shellcheck disable=SC2155,SC2034,SC2154
#
# Distro neutral upgrade script michaeltd 171124
# From https://en.wikipedia.org/wiki/Package_manager
# Unofficial Bash Strict Mode
set -euo pipefail
IFS=$'\t\n'
#link free (S)cript: (D)ir(N)ame, (B)ase(N)ame.
readonly sdn="$(dirname "$(realpath "${BASH_SOURCE[0]}")")" \
sbn="$(basename "$(realpath "${BASH_SOURCE[0]}")")"
main() {
# For this to work package manager arrays must be in following format...
# | #1 package manager executable | #2 repo update switch | #3 distro upgrade switch(es)| #4 ...
# PS: By ignoring dpkg and rpm we are avoiding issues with systems where alien has been installed.
local -ra apt_get=( "apt-get" "update" "--assume-yes" "--simulate" "dist-upgrade" ) \
yum=( "yum" "check-update" "update" ) \
zypper=( "zypper" "refresh" "update" "--no-confirm" "--auto-agree-with-licenses" ) \
pacman=( "pacman" "-Sy" "-Syu" ) \
emerge=( "emerge" "--sync" "--pretend" "--nospinner" "--update" "--deep" "--newuse" "@world" ) \
pkg=( "pkg" "update" "upgrade" "--quiet" "--no-repo-update" "--yes" )
local -ra pms=( apt_get[@] yum[@] zypper[@] pacman[@] emerge[@] pkg[@] )
local -r notfound="404"
local pmidx="${notfound}"
# Which is the first available pm in this system?
for x in "${!pms[@]}"; do
if type -P "${!pms[x]:0:1}" &> /dev/null; then
local -r pmidx="${x}"
break # break on first match.
fi
done
if [[ "${pmidx}" == "${notfound}" || "${EUID}" != "0" ]]; then
printf " Error: required access privilages not met,\n or package manager not found. \n For this to work you need root account privilages \n and a %s, %s, %s, %s, %s or %s based distro.\n Quithing.\n" "${!pms[0]:0:1}" "${!pms[1]:0:1}" "${!pms[2]:0:1}" "${!pms[3]:0:1}" "${!pms[4]:0:1}" "${!pms[5]:0:1}" >&2
return 1
else
"${!pms[pmidx]:0:1}" "${!pms[pmidx]:1:1}" && "${!pms[pmidx]:0:1}" "${!pms[pmidx]:2}"
fi
}
[[ "${BASH_SOURCE[0]}" == "${0}" ]] && main "${@}"
|
#!/usr/local/bin/bash
if [ -f ~/.previousexternaladdr ]; then
rm -rf ~/.previousexternaladdr
fi
if [ -f ~/.previousinternaladdr ]; then
rm -rf ~/.previousinternaladdr
fi
if [ -f ~/.externaladdr ]; then
rm -rf ~/.externaladdr
fi
if [ -f ~/.internaladdr ]; then
rm -rf ~/.internaladdr
fi
|
#!/bin/bash
#
# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
mkdir -p "$(dirname $0)/../.github" "$(dirname $0)/../.github/ISSUE_TEMPLATE"
for file in `find "$(dirname $0)"/../vendor/github.com/gardener/gardener/.github -name '*.md'`; do
cat "$file" |\
sed 's/operating Gardener/working with this Gardener extension/g' |\
sed 's/to the Gardener project/for this extension/g' |\
sed 's/to Gardener/to this extension/g' |\
sed 's/- Gardener version:/- Gardener version (if relevant):\n- Extension version:/g' |\
sed 's/\/kind [a-zA-Z]*/&\n\/platform vsphere/g' \
> "$(dirname $0)/../.github/${file#*.github/}"
done |
#!/usr/bin/env bash
set -ex
docker exec -it influxdb influx setup
|
# -*- sh -*- (Bash only)
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The template is expanded at build time using tables of commands/options
# derived from the bazel executable built in the same client; the expansion is
# written to bazel-complete.bash.
#
# Don't use this script directly. Generate the final script with
# bazel build //scripts:bash_completion instead.
# This script expects a header to be prepended to it that defines the following
# nullary functions:
#
# _bazel_completion_use_query - Has a successful exit code if
# BAZEL_COMPLETION_USE_QUERY is "true".
#
# _bazel_completion_allow_tests_for_run - Has a successful exit code if
# BAZEL_COMPLETION_ALLOW_TESTS_FOR_RUN is "true".
# The package path used by the completion routines. Unfortunately
# this isn't necessarily the same as the actual package path used by
# Bazel, but that's ok. (It's impossible for us to reliably know what
# the relevant package-path, so this is just a good guess. Users can
# override it if they want.)
: ${BAZEL_COMPLETION_PACKAGE_PATH:=%workspace%}
# Some commands might interfer with the important one, so don't complete them
: ${BAZEL_IGNORED_COMMAND_REGEX:="__none__"}
# bazel & ibazel commands
: ${BAZEL:=bazel}
: ${IBAZEL:=ibazel}
# Pattern to match for looking for a target
# BAZEL_BUILD_MATCH_PATTERN__* give the pattern for label-*
# when looking in the the build file.
# BAZEL_QUERY_MATCH_PATTERN__* give the pattern for label-*
# when using 'bazel query'.
# _RUNTEST is a special case for _bazel_completion_allow_tests_for_run.
: ${BAZEL_BUILD_MATCH_PATTERN__test:='(.*_test|test_suite)'}
: ${BAZEL_QUERY_MATCH_PATTERN__test:='(test|test_suite)'}
: ${BAZEL_BUILD_MATCH_PATTERN__bin:='.*_binary'}
: ${BAZEL_QUERY_MATCH_PATTERN__bin:='(binary)'}
: ${BAZEL_BUILD_MATCH_PATTERN_RUNTEST__bin:='(.*_(binary|test)|test_suite)'}
: ${BAZEL_QUERY_MATCH_PATTERN_RUNTEST__bin:='(binary|test)'}
: ${BAZEL_BUILD_MATCH_PATTERN__:='.*'}
: ${BAZEL_QUERY_MATCH_PATTERN__:=''}
# Usage: _bazel__get_rule_match_pattern <command>
# Determine what kind of rules to match, based on command.
_bazel__get_rule_match_pattern() {
local var_name pattern
if _bazel_completion_use_query; then
var_name="BAZEL_QUERY_MATCH_PATTERN"
else
var_name="BAZEL_BUILD_MATCH_PATTERN"
fi
if [[ "$1" =~ ^label-?([a-z]*)$ ]]; then
pattern=${BASH_REMATCH[1]:-}
if _bazel_completion_allow_tests_for_run; then
eval "echo \"\${${var_name}_RUNTEST__${pattern}:-\$${var_name}__${pattern}}\""
else
eval "echo \"\$${var_name}__${pattern}\""
fi
fi
}
# Compute workspace directory. Search for the innermost
# enclosing directory with a WORKSPACE file.
_bazel__get_workspace_path() {
local workspace=$PWD
while true; do
if [ -f "${workspace}/WORKSPACE" ]; then
break
elif [ -z "$workspace" -o "$workspace" = "/" ]; then
workspace=$PWD
break;
fi
workspace=${workspace%/*}
done
echo $workspace
}
# Find the current piece of the line to complete, but only do word breaks at
# certain characters. In particular, ignore these: "':=
# This method also takes into account the current cursor position.
#
# Works with both bash 3 and 4! Bash 3 and 4 perform different word breaks when
# computing the COMP_WORDS array. We need this here because Bazel options are of
# the form --a=b, and labels of the form //some/label:target.
_bazel__get_cword() {
local cur=${COMP_LINE:0:$COMP_POINT}
# This expression finds the last word break character, as defined in the
# COMP_WORDBREAKS variable, but without '=' or ':', which is not preceeded by
# a slash. Quote characters are also excluded.
local wordbreaks="$COMP_WORDBREAKS"
wordbreaks="${wordbreaks//\'/}"
wordbreaks="${wordbreaks//\"/}"
wordbreaks="${wordbreaks//:/}"
wordbreaks="${wordbreaks//=/}"
local word_start=$(expr "$cur" : '.*[^\]['"${wordbreaks}"']')
echo "${cur:$word_start}"
}
# Usage: _bazel__package_path <workspace> <displacement>
#
# Prints a list of package-path root directories, displaced using the
# current displacement from the workspace. All elements have a
# trailing slash.
_bazel__package_path() {
local workspace=$1 displacement=$2 root
IFS=:
for root in ${BAZEL_COMPLETION_PACKAGE_PATH//\%workspace\%/$workspace}; do
unset IFS
echo "$root/$displacement"
done
}
# Usage: _bazel__options_for <command>
#
# Prints the set of options for a given Bazel command, e.g. "build".
_bazel__options_for() {
local options
if [[ "${BAZEL_COMMAND_LIST}" =~ ^(.* )?$1( .*)?$ ]]; then
# assumes option names only use ASCII characters
local option_name=$(echo $1 | tr a-z A-Z | tr "-" "_")
eval "echo \${BAZEL_COMMAND_${option_name}_FLAGS}" | tr " " "\n"
fi
}
# Usage: _bazel__expansion_for <command>
#
# Prints the completion pattern for a given Bazel command, e.g. "build".
_bazel__expansion_for() {
local options
if [[ "${BAZEL_COMMAND_LIST}" =~ ^(.* )?$1( .*)?$ ]]; then
# assumes option names only use ASCII characters
local option_name=$(echo $1 | tr a-z A-Z | tr "-" "_")
eval "echo \${BAZEL_COMMAND_${option_name}_ARGUMENT}"
fi
}
# Usage: _bazel__matching_targets <kind> <prefix>
#
# Prints target names of kind <kind> and starting with <prefix> in the BUILD
# file given as standard input. <kind> is a basic regex (BRE) used to match the
# bazel rule kind and <prefix> is the prefix of the target name.
_bazel__matching_targets() {
local kind_pattern="$1"
local target_prefix="$2"
# The following commands do respectively:
# Remove BUILD file comments
# Replace \n by spaces to have the BUILD file in a single line
# Extract all rule types and target names
# Grep the kind pattern and the target prefix
# Returns the target name
sed 's/#.*$//' \
| tr "\n" " " \
| sed 's/\([a-zA-Z0-9_]*\) *(\([^)]* \)\{0,1\}name *= *['\''"]\([a-zA-Z0-9_/.+=,@~-]*\)['\''"][^)]*)/\
type:\1 name:\3\
/g' \
| "grep" -E "^type:$kind_pattern name:$target_prefix" \
| cut -d ':' -f 3
}
# Usage: _bazel__is_true <string>
#
# Returns true or false based on the input string. The following are
# valid true values (the rest are false): "1", "true".
_bazel__is_true() {
local str="$1"
[[ "$str" == "1" || "$str" == "true" ]]
}
# Usage: _bazel__expand_rules_in_package <workspace> <displacement>
# <current> <label-type>
#
# Expands rules in specified packages, exploring all roots of
# $BAZEL_COMPLETION_PACKAGE_PATH, not just $(pwd). Only rules
# appropriate to the command are printed. Sets $COMPREPLY array to
# result.
#
# If _bazel_completion_use_query has a successful exit code, 'bazel query' is
# used instead, with the actual Bazel package path;
# $BAZEL_COMPLETION_PACKAGE_PATH is ignored in this case, since the actual Bazel
# value is likely to be more accurate.
_bazel__expand_rules_in_package() {
local workspace=$1 displacement=$2 current=$3 label_type=$4
local package_name=$(echo "$current" | cut -f1 -d:)
local rule_prefix=$(echo "$current" | cut -f2 -d:)
local root buildfile rule_pattern r result
result=
pattern=$(_bazel__get_rule_match_pattern "$label_type")
if _bazel_completion_use_query; then
package_name=$(echo "$package_name" | tr -d "'\"") # remove quotes
result=$(${BAZEL} --output_base=/tmp/${BAZEL}-completion-$USER query \
--keep_going --noshow_progress \
"kind('$pattern rule', '$package_name:*')" 2>/dev/null |
cut -f2 -d: | "grep" "^$rule_prefix")
else
for root in $(_bazel__package_path "$workspace" "$displacement"); do
buildfile="$root/$package_name/BUILD.bazel"
if [ ! -f "$buildfile" ]; then
buildfile="$root/$package_name/BUILD"
fi
if [ -f "$buildfile" ]; then
result=$(_bazel__matching_targets \
"$pattern" "$rule_prefix" <"$buildfile")
break
fi
done
fi
index=$(echo $result | wc -w)
if [ -n "$result" ]; then
echo "$result" | tr " " "\n" | sed 's|$| |'
fi
# Include ":all" wildcard if there was no unique match. (The zero
# case is tricky: we need to include "all" in that case since
# otherwise we won't expand "a" to "all" in the absence of rules
# starting with "a".)
if [ $index -ne 1 ] && expr all : "\\($rule_prefix\\)" >/dev/null; then
echo "all "
fi
}
# Usage: _bazel__expand_package_name <workspace> <displacement> <current-word>
# <label-type>
#
# Expands directories, but explores all roots of
# BAZEL_COMPLETION_PACKAGE_PATH, not just $(pwd). When a directory is
# a bazel package, the completion offers "pkg:" so you can expand
# inside the package.
# Sets $COMPREPLY array to result.
_bazel__expand_package_name() {
local workspace=$1 displacement=$2 current=$3 type=${4:-} root dir index
for root in $(_bazel__package_path "$workspace" "$displacement"); do
found=0
for dir in $(compgen -d $root$current); do
[ -L "$dir" ] && continue # skip symlinks (e.g. bazel-bin)
[[ "$dir" =~ ^(.*/)?\.[^/]*$ ]] && continue # skip dotted dir (e.g. .git)
found=1
echo "${dir#$root}/"
if [ -f $dir/BUILD.bazel -o -f $dir/BUILD ]; then
if [ "${type}" = "label-package" ]; then
echo "${dir#$root} "
else
echo "${dir#$root}:"
fi
fi
done
[ $found -gt 0 ] && break # Stop searching package path upon first match.
done
}
# Usage: _bazel__expand_target_pattern <workspace> <displacement>
# <word> <label-syntax>
#
# Expands "word" to match target patterns, using the current workspace
# and displacement from it. "command" is used to filter rules.
# Sets $COMPREPLY array to result.
_bazel__expand_target_pattern() {
local workspace=$1 displacement=$2 current=$3 label_syntax=$4
case "$current" in
//*:*) # Expand rule names within package, no displacement.
if [ "${label_syntax}" = "label-package" ]; then
compgen -S " " -W "BUILD" "$(echo current | cut -f ':' -d2)"
else
_bazel__expand_rules_in_package "$workspace" "" "$current" "$label_syntax"
fi
;;
*:*) # Expand rule names within package, displaced.
if [ "${label_syntax}" = "label-package" ]; then
compgen -S " " -W "BUILD" "$(echo current | cut -f ':' -d2)"
else
_bazel__expand_rules_in_package \
"$workspace" "$displacement" "$current" "$label_syntax"
fi
;;
//*) # Expand filenames using package-path, no displacement
_bazel__expand_package_name "$workspace" "" "$current" "$label_syntax"
;;
*) # Expand filenames using package-path, displaced.
if [ -n "$current" ]; then
_bazel__expand_package_name "$workspace" "$displacement" "$current" "$label_syntax"
fi
;;
esac
}
_bazel__get_command() {
for word in "${COMP_WORDS[@]:1:COMP_CWORD-1}"; do
if echo "$BAZEL_COMMAND_LIST" | "grep" -wsq -e "$word"; then
echo $word
break
fi
done
}
# Returns the displacement to the workspace given in $1
_bazel__get_displacement() {
if [[ "$PWD" =~ ^$1/.*$ ]]; then
echo ${PWD##$1/}/
fi
}
# Usage: _bazel__complete_pattern <workspace> <displacement> <current>
# <type>
#
# Expand a word according to a type. The currently supported types are:
# - {a,b,c}: an enum that can take value a, b or c
# - label: a label of any kind
# - label-bin: a label to a runnable rule (basically to a _binary rule)
# - label-test: a label to a test rule
# - info-key: an info key as listed by `bazel help info-keys`
# - command: the name of a command
# - path: a file path
# - combinaison of previous type using | as separator
_bazel__complete_pattern() {
local workspace=$1 displacement=$2 current=$3 types=$4
for type in $(echo $types | tr "|" "\n"); do
case "$type" in
label*)
_bazel__expand_target_pattern "$workspace" "$displacement" \
"$current" "$type"
;;
info-key)
compgen -S " " -W "${BAZEL_INFO_KEYS}" -- "$current"
;;
"command")
local commands=$(echo "${BAZEL_COMMAND_LIST}" \
| tr " " "\n" | "grep" -v "^${BAZEL_IGNORED_COMMAND_REGEX}$")
compgen -S " " -W "${commands}" -- "$current"
;;
path)
compgen -f -- "$current"
;;
*)
compgen -S " " -W "$type" -- "$current"
;;
esac
done
}
# Usage: _bazel__expand_options <workspace> <displacement> <current-word>
# <options>
#
# Expands options, making sure that if current-word contains an equals sign,
# it is handled appropriately.
_bazel__expand_options() {
local workspace="$1" displacement="$2" cur="$3" options="$4"
if [[ $cur =~ = ]]; then
# also expands special labels
current=$(echo "$cur" | cut -f2 -d=)
_bazel__complete_pattern "$workspace" "$displacement" "$current" \
"$(compgen -W "$options" -- "$cur" | cut -f2 -d=)" \
| sort -u
else
compgen -W "$(echo "$options" | sed 's|=.*$|=|')" -- "$cur" \
| sed 's|\([^=]\)$|\1 |'
fi
}
_bazel__complete_stdout() {
local cur=$(_bazel__get_cword) word command displacement workspace
# Determine command: "" (startup-options) or one of $BAZEL_COMMAND_LIST.
command="$(_bazel__get_command)"
workspace="$(_bazel__get_workspace_path)"
displacement="$(_bazel__get_displacement ${workspace})"
case "$command" in
"") # Expand startup-options or commands
local commands=$(echo "${BAZEL_COMMAND_LIST}" \
| tr " " "\n" | "grep" -v "^${BAZEL_IGNORED_COMMAND_REGEX}$")
_bazel__expand_options "$workspace" "$displacement" "$cur" \
"${commands}\
${BAZEL_STARTUP_OPTIONS}"
;;
*)
case "$cur" in
-*) # Expand options:
_bazel__expand_options "$workspace" "$displacement" "$cur" \
"$(_bazel__options_for $command)"
;;
*) # Expand target pattern
expansion_pattern="$(_bazel__expansion_for $command)"
NON_QUOTE_REGEX="^[\"']"
if [[ $command = query && $cur =~ $NON_QUOTE_REGEX ]]; then
: # Ideally we would expand query expressions---it's not
# that hard, conceptually---but readline is just too
# damn complex when it comes to quotation. Instead,
# for query, we just expand target patterns, unless
# the first char is a quote.
elif [ -n "$expansion_pattern" ]; then
_bazel__complete_pattern \
"$workspace" "$displacement" "$cur" "$expansion_pattern"
fi
;;
esac
;;
esac
}
_bazel__to_compreply() {
local replies="$1"
COMPREPLY=()
# Trick to preserve whitespaces
while IFS="" read -r reply; do
COMPREPLY+=("${reply}")
done < <(echo "${replies}")
}
_bazel__complete() {
_bazel__to_compreply "$(_bazel__complete_stdout)"
}
# Some users have aliases such as bt="bazel test" or bb="bazel build", this
# completion function allows them to have auto-completion for these aliases.
_bazel__complete_target_stdout() {
local cur=$(_bazel__get_cword) word command displacement workspace
# Determine command: "" (startup-options) or one of $BAZEL_COMMAND_LIST.
command="$1"
workspace="$(_bazel__get_workspace_path)"
displacement="$(_bazel__get_displacement ${workspace})"
_bazel__to_compreply "$(_bazel__expand_target_pattern "$workspace" "$displacement" \
"$cur" "$(_bazel__expansion_for $command)")"
}
# default completion for bazel
complete -F _bazel__complete -o nospace "${BAZEL}"
complete -F _bazel__complete -o nospace "${IBAZEL}"
|
#!/usr/bin/env bash
shopt -s expand_aliases
alias echo_on="{ set -x; }"
alias echo_off="{ set +x; } 2>/dev/null"
# Choose colors carefully. If they don't work on both a black
# background and a white background, pick other colors (so white,
# yellow, and black are poor choices).
RED='\033[1;31m'
GRN='\033[1;32m'
BLU='\033[1;34m'
CYN='\033[1;36m'
END='\033[0m'
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
DBUILD=${DIR}/dbuild.sh
now=$(date +"%H%M%S")
# container name of the builder
BUILDER_CONT_NAME=${BUILDER_CONT_NAME:-"bld-${BUILDER_NAME}-${now}"}
# command for running a container (ie, "docker run")
BUILDER_DOCKER_RUN=${BUILDER_DOCKER_RUN:-docker run}
# the name of the Docker network
# note: this is necessary for connecting the builder to a local k3d/microk8s/kind network (ie, for running tests)
BUILDER_DOCKER_NETWORK=${BUILDER_DOCKER_NETWORK:-${BUILDER_NAME}}
# Do this with `eval` so that we properly interpret quotes.
eval "pytest_args=(${PYTEST_ARGS:-})"
builder() {
docker ps --quiet \
--filter=label=builder \
--filter=label="$BUILDER_NAME"
}
builder_network() { docker network ls -q -f name="${BUILDER_DOCKER_NETWORK}"; }
builder_volume() { docker volume ls -q -f label=builder; }
declare -a dsynced
dsync() {
printf "${GRN}Synchronizing... $*${END}\n"
TIMEFORMAT="(sync took %1R seconds)"
time IFS='|' read -ra dsynced <<<"$(rsync --info=name -aO --blocking-io -e 'docker exec -i' $@ 2> >(fgrep -v 'rsync: failed to set permissions on' >&2) | tr '\n' '|')"
}
dcopy() {
printf "${GRN}Copying... $*${END}\n"
TIMEFORMAT="(copy took %1R seconds)"
time docker cp $@
}
dexec() {
if [[ -t 0 ]]; then
flags=-it
else
flags=-i
fi
docker exec ${flags} $(builder) "$@"
}
# Usage: build_builder_base [--stage1-only]
# Effects:
# 1. Set the `builder_base_image` variable in the parent scope
# 2. Ensure that the `$builder_base_image` Docker image exists (pulling
# it or building it if it doesn't).
# 3. (If $DEV_REGISTRY is set AND we built the image) push the
# `$builder_base_image` Docker image.
#
# Description:
#
# Rebuild (and push if DEV_REGISTRY is set) the builder's base image if
# - `Dockerfile.base` changes
# - `requirements.txt` changes
# - Enough time has passed (The base only has external/third-party
# dependencies, and most of those dependencies are not pinned by
# version, so we rebuild periodically to make sure we don't fall too
# far behind and then get surprised when a rebuild is required for
# Dockerfile changes.) We have defined "enough time" as a few days.
# See the variable "build_every_n_days" below.
#
# The base theory of operation is that we generate a Docker tag name that
# is essentially the tuple
# (rounded_timestamp, hash(Dockerfile.base), hash(requirements.txt)
# then check that tag for existence/pullability using `docker run --rm
# --entrypoint=true`; and build it if it doesn't exist and can't be
# pulled.
#
# OK, now for a wee bit of complexity. We want to use `pip-compile` to
# update `requirements.txt`. Because of Python-version-conditioned
# dependencies, we really want to run it with the image's python3, not
# with the host's python3. And since we're updating `requirements.txt`,
# we don't really want the `pip install` to have already been run. So,
# we split the base image in to two stages; stage-1 is everything but
# `COPY requirements.txt` / `pip install -r requirements.txt`, and then
# stage-2 copies in `requirements.txt` and runs the `pip install`. In
# normal operation we just go ahead and build both stages. But if the
# `--stage1-only` flag is given (as it is by the `pip-compile`
# subcommand), then we only build the stage-1, and set the
# `builder_base_image` variable to that.
build_builder_base() {
local builder_base_tag_py='
# Someone please rewrite this in portable Bash. Until then, this code
# works on Python 2.7 and 3.5+.
import datetime, hashlib
build_every_n_days = 5 # Periodic rebuild even if Dockerfile does not change
epoch = datetime.datetime(2017, 4, 13, 1, 30)
age = int((datetime.datetime.now() - epoch).days / build_every_n_days)
age_start = epoch + datetime.timedelta(days=age*build_every_n_days)
dockerfilehash = hashlib.sha256(open("Dockerfile.base", "rb").read()).hexdigest()
stage1 = "%sx%s-%s" % (age_start.strftime("%Y%m%d"), build_every_n_days, dockerfilehash[:16])
requirementshash = hashlib.sha256(open("requirements.txt", "rb").read()).hexdigest()
stage2 = "%s-%s" % (stage1, requirementshash[:16])
print("stage1_tag=%s" % stage1)
print("stage2_tag=%s" % stage2)
'
local stage1_tag stage2_tag
eval "$(cd "$DIR" && python -c "$builder_base_tag_py")" # sets 'stage1_tag' and 'stage2_tag'
local name1="${DEV_REGISTRY:+$DEV_REGISTRY/}builder-base:stage1-${stage1_tag}"
local name2="${DEV_REGISTRY:+$DEV_REGISTRY/}builder-base:stage2-${stage2_tag}"
printf "${GRN}Using stage-1 base ${BLU}${name1}${END}\n"
if ! docker run --rm --entrypoint=true "$name1"; then # skip building if the "$name1" already exists
${DBUILD} -f "${DIR}/Dockerfile.base" -t "${name1}" --target builderbase-stage1 "${DIR}"
if [ -n "$DEV_REGISTRY" ]; then
docker push "$name1"
fi
fi
if [[ $1 = '--stage1-only' ]]; then
builder_base_image="$name1" # not local
return
fi
printf "${GRN}Using stage-2 base ${BLU}${name2}${END}\n"
if ! docker run --rm --entrypoint=true "$name2"; then # skip building if the "$name2" already exists
${DBUILD} --build-arg=builderbase_stage1="$name1" -f "${DIR}/Dockerfile.base" -t "${name2}" --target builderbase-stage2 "${DIR}"
if [ -n "$DEV_REGISTRY" ]; then
docker push "$name2"
fi
fi
builder_base_image="$name2" # not local
}
bootstrap() {
if [ -z "$(builder_volume)" ] ; then
docker volume create --label builder
printf "${GRN}Created docker volume ${BLU}$(builder_volume)${GRN} for caching${END}\n"
fi
if [ -z "$(builder_network)" ]; then
docker network create "${BUILDER_DOCKER_NETWORK}" > /dev/null
printf "${GRN}Created docker network ${BLU}${BUILDER_DOCKER_NETWORK}${END}\n"
else
printf "${GRN}Connecting to existing network ${BLU}${BUILDER_DOCKER_NETWORK}${GRN}${END}\n"
fi
if [ -z "$(builder)" ] ; then
printf "${CYN}==> ${GRN}Bootstrapping builder base image${END}\n"
build_builder_base
printf "${CYN}==> ${GRN}Bootstrapping build image${END}\n"
${DBUILD} --build-arg envoy="${ENVOY_DOCKER_TAG}" --build-arg builderbase="${builder_base_image}" --target builder ${DIR} -t builder
if [ "$(uname -s)" == Darwin ]; then
DOCKER_GID=$(stat -f "%g" /var/run/docker.sock)
else
DOCKER_GID=$(stat -c "%g" /var/run/docker.sock)
fi
if [ -z "${DOCKER_GID}" ]; then
echo "Unable to determine docker group-id"
exit 1
fi
echo_on
$BUILDER_DOCKER_RUN \
--name="$BUILDER_CONT_NAME" \
--network="${BUILDER_DOCKER_NETWORK}" \
--network-alias="builder" \
--group-add="${DOCKER_GID}" \
--detach \
--rm \
--volume=/var/run/docker.sock:/var/run/docker.sock \
--volume="$(builder_volume):/home/dw" \
${BUILDER_MOUNTS} \
--cap-add=NET_ADMIN \
--label=builder \
--label="${BUILDER_NAME}" \
--label="${BUILDER_NAME}" \
${BUILDER_PORTMAPS} \
${BUILDER_DOCKER_EXTRA} \
--env=BUILDER_NAME="${BUILDER_NAME}" \
--entrypoint=tail builder -f /dev/null > /dev/null
echo_off
printf "${GRN}Started build container ${BLU}$(builder)${END}\n"
fi
dcopy ${DIR}/builder.sh $(builder):/buildroot
dcopy ${DIR}/builder_bash_rc $(builder):/home/dw/.bashrc
}
module_version() {
echo MODULE="\"$1\""
echo GIT_BRANCH="\"$(git rev-parse --abbrev-ref HEAD)\""
# The short git commit hash
echo GIT_COMMIT="\"$(git rev-parse --short HEAD)\""
# Whether `git add . && git commit` would commit anything (empty=false, nonempty=true)
if [ -n "$(git status --porcelain)" ]; then
echo GIT_DIRTY="\"dirty\""
dirty="yes"
else
echo GIT_DIRTY="\"\""
dirty=""
fi
# The _previous_ tag, plus a git delta, like 0.36.0-436-g8b8c5d3
echo GIT_DESCRIPTION="\"$(git describe --tags)\""
# We're going to post-process RELEASE_VERSION below. But for now
# what you need to know is: This block is just going to set it to
# the git tag.
#
# The reason that we give precedence to `CIRCLE_TAG` over `git
# describe` is that if there are multiple tags pointing at the
# same commit, then it is non-deterministic which tag `git
# describe` will choose. We want to know which one of those tags
# actually triggered this CI run, so we give precedence to
# CircleCI, since it has information that isn't actually stored in
# Git.
for VAR in "${CIRCLE_TAG}" "$(git describe --tags --always)"; do
if [ -n "${VAR}" ]; then
RELEASE_VERSION="${VAR}"
break
fi
done
# RELEASE_VERSION is an X.Y.Z[-prerelease] (semver) string that we
# will upload/release the image as. It does NOT include a leading 'v'
# (trimming the 'v' from the git tag is what the 'patsubst' is for).
# If this is an RC or EA, then it includes the '-rc.N' or '-ea.N'
# suffix.
#
# BUILD_VERSION is of the same format, but is the version number that
# we build into the image. Because an image built as a "release
# candidate" will ideally get promoted to be the GA image, we trim off
# the '-rc.N' suffix.
if [[ ${RELEASE_VERSION} =~ ^v[0-9]+.*$ ]]; then
RELEASE_VERSION=${RELEASE_VERSION:1}
fi
if [ -n "${dirty}" ]; then
RELEASE_VERSION="${RELEASE_VERSION}-dirty"
fi
echo RELEASE_VERSION="\"${RELEASE_VERSION}\""
echo BUILD_VERSION="\"$(echo "${RELEASE_VERSION}" | sed 's/-rc\.[0-9]*$//')\""
}
sync() {
name=$1
sourcedir=$2
container=$3
real=$(cd ${sourcedir}; pwd)
dexec mkdir -p /buildroot/${name}
if [[ $name == apro ]]; then
# Don't let 'deleting ambassador' cause the sync to be marked dirty
dexec sh -c 'rm -rf apro/ambassador'
fi
dsync $DSYNC_EXTRA --exclude-from=${DIR}/sync-excludes.txt --delete ${real}/ ${container}:/buildroot/${name}
summarize-sync $name "${dsynced[@]}"
if [[ $name == apro ]]; then
# BusyBox `ln` 1.30.1's `-T` flag is broken, and doesn't have a `-t` flag.
dexec sh -c 'if ! test -L apro/ambassador; then rm -rf apro/ambassador && ln -s ../ambassador apro; fi'
fi
(cd ${sourcedir} && module_version ${name} ) | dexec sh -c "cat > /buildroot/${name}.version && cp ${name}.version ambassador/python/"
}
summarize-sync() {
name=$1
shift
lines=("$@")
if [ "${#lines[@]}" != 0 ]; then
dexec touch ${name}.dirty image.dirty
fi
for line in "${lines[@]}"; do
if [[ $line = *.go ]]; then
dexec touch go.dirty
break
fi
done
printf "${GRN}Synced ${#lines[@]} ${BLU}${name}${GRN} source files${END}\n"
PARTIAL="yes"
for i in {0..9}; do
if [ "$i" = "${#lines[@]}" ]; then
PARTIAL=""
break
fi
line="${lines[$i]}"
printf " ${CYN}%s${END}\n" "$line"
done
if [ -n "${PARTIAL}" ]; then
printf " ${CYN}...${END}\n"
fi
}
clean() {
cid=$(builder)
if [ -n "${cid}" ] ; then
printf "${GRN}Killing build container ${BLU}${cid}${END}\n"
docker kill ${cid} > /dev/null 2>&1
docker wait ${cid} > /dev/null 2>&1 || true
fi
nid=$(builder_network)
if [ -n "${nid}" ] ; then
printf "${GRN}Removing docker network ${BLU}${BUILDER_DOCKER_NETWORK} (${nid})${END}\n"
# This will fail if the network has some other endpoints alive: silence any errors
docker network rm ${nid} 2>&1 >/dev/null || true
fi
}
push-image() {
LOCAL="$1"
REMOTE="$2"
if ! ( dexec test -e /buildroot/pushed.log && dexec fgrep -q "${REMOTE}" /buildroot/pushed.log ); then
printf "${CYN}==> ${GRN}Pushing ${BLU}${LOCAL}${GRN}->${BLU}${REMOTE}${END}\n"
docker tag ${LOCAL} ${REMOTE}
docker push ${REMOTE}
echo ${REMOTE} | dexec sh -c "cat >> /buildroot/pushed.log"
else
printf "${CYN}==> ${GRN}Already pushed ${BLU}${LOCAL}${GRN}->${BLU}${REMOTE}${END}\n"
fi
}
find-modules () {
find /buildroot -type d -mindepth 1 -maxdepth 1 \! -name bin | sort
}
cmd="$1"
case "${cmd}" in
clean)
clean
;;
clobber)
clean
vid=$(builder_volume)
if [ -n "${vid}" ] ; then
printf "${GRN}Killing cache volume ${BLU}${vid}${END}\n"
if ! docker volume rm ${vid} > /dev/null 2>&1 ; then \
printf "${RED}Could not kill cache volume; are other builders still running?${END}\n"
fi
fi
;;
bootstrap)
bootstrap
echo $(builder)
;;
builder|"")
echo $(builder)
;;
sync)
shift
bootstrap
sync $1 $2 $(builder)
;;
release-type)
shift
RELVER="$1"
if [ -z "${RELVER}" ]; then
source <(module_version ${BUILDER_NAME})
RELVER="${RELEASE_VERSION}"
fi
if [[ "${RELVER}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo release
elif [[ "${RELVER}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]*$ ]]; then
echo rc
else
echo other
fi
;;
release-version)
shift
source <(module_version ${BUILDER_NAME})
echo "${RELEASE_VERSION}"
;;
version)
shift
source <(module_version ${BUILDER_NAME})
echo "${BUILD_VERSION}"
;;
compile)
shift
bootstrap
dexec /buildroot/builder.sh compile-internal
;;
compile-internal)
# This runs inside the builder image
if [[ $(find-modules) != /buildroot/ambassador* ]]; then
echo "Error: ambassador must be the first module to build things correctly"
echo "Modules are: $(find-modules)"
exit 1
fi
for MODDIR in $(find-modules); do
module=$(basename ${MODDIR})
eval "$(grep BUILD_VERSION apro.version 2>/dev/null)" # this will `eval ''` for OSS-only builds, leaving BUILD_VERSION unset; dont embed the version-number in OSS Go binaries
if [ -e ${module}.dirty ] || ([ "$module" != ambassador ] && [ -e go.dirty ]) ; then
if [ -e "${MODDIR}/go.mod" ]; then
printf "${CYN}==> ${GRN}Building ${BLU}${module}${GRN} go code${END}\n"
echo_on
mkdir -p /buildroot/bin
(cd ${MODDIR} && go build -trimpath ${BUILD_VERSION:+ -ldflags "-X main.Version=$BUILD_VERSION" } -o /buildroot/bin ./cmd/...) || exit 1
if [ -e ${MODDIR}/post-compile.sh ]; then (cd ${MODDIR} && bash post-compile.sh); fi
echo_off
fi
fi
if [ -e ${module}.dirty ]; then
if [ -e "${MODDIR}/python" ]; then
if ! [ -e ${MODDIR}/python/*.egg-info ]; then
printf "${CYN}==> ${GRN}Setting up ${BLU}${module}${GRN} python code${END}\n"
echo_on
sudo pip install --no-deps -e ${MODDIR}/python || exit 1
echo_off
fi
chmod a+x ${MODDIR}/python/*.py
fi
rm ${module}.dirty
else
printf "${CYN}==> ${GRN}Already built ${BLU}${module}${GRN}${END}\n"
fi
done
rm -f go.dirty # Do this after _all_ the Go code is built
;;
mypy-internal)
# This runs inside the builder image
shift
op="$1"
# This runs inside the builder image
if [[ $(find-modules) != /buildroot/ambassador* ]]; then
echo "Error: ambassador must be the first module to build things correctly"
echo "Modules are: $(find-modules)"
exit 1
fi
for MODDIR in $(find-modules); do
module=$(basename ${MODDIR})
if [ -e "${MODDIR}/python" ]; then
cd "${MODDIR}"
case "$op" in
start)
if ! dmypy status >/dev/null; then
dmypy start -- --use-fine-grained-cache --follow-imports=skip --ignore-missing-imports
printf "${CYN}==> ${GRN}Started mypy server for ${BLU}$module${GRN} Python code${END}\n"
else
printf "${CYN}==> ${GRN}mypy server already running for ${BLU}$module${GRN} Python code${END}\n"
fi
;;
stop)
printf "${CYN}==> ${GRN}Stopping mypy server for ${BLU}$module${GRN} Python code${END}"
dmypy stop
;;
check)
printf "${CYN}==> ${GRN}Running mypy over ${BLU}$module${GRN} Python code${END}\n"
time dmypy check python
;;
esac
fi
done
;;
pip-compile)
build_builder_base --stage1-only
printf "${GRN}Running pip-compile to update ${BLU}requirements.txt${END}\n"
docker run --rm -i "$builder_base_image" sh -c 'tar xf - && pip-compile --allow-unsafe -q >&2 && cat requirements.txt' \
< <(cd "$DIR" && tar cf - requirements.in requirements.txt) \
> "$DIR/requirements.txt.tmp"
mv -f "$DIR/requirements.txt.tmp" "$DIR/requirements.txt"
;;
pytest-internal)
# This runs inside the builder image
fail=""
for MODDIR in $(find-modules); do
if [ -e "${MODDIR}/python" ]; then
if ! (cd ${MODDIR} && pytest --tb=short -ra "${pytest_args[@]}") then
fail="yes"
fi
fi
done
if [ "${fail}" = yes ]; then
exit 1
fi
;;
gotest-internal)
# This runs inside the builder image
fail=""
for MODDIR in $(find-modules); do
if [ -e "${MODDIR}/go.mod" ]; then
pkgs=$(cd ${MODDIR} && go list -f='{{ if or (gt (len .TestGoFiles) 0) (gt (len .XTestGoFiles) 0) }}{{ .ImportPath }}{{ end }}' ${GOTEST_PKGS})
if [ -n "${pkgs}" ]; then
if ! (cd ${MODDIR} && go test ${pkgs} ${GOTEST_ARGS}) then
fail="yes"
fi
fi
fi
done
if [ "${fail}" = yes ]; then
exit 1
fi
;;
commit)
shift
name=$1
if [ -z "${name}" ]; then
echo "usage: ./builder.sh commit <image-name>"
exit 1
fi
if dexec test -e /buildroot/image.dirty; then
printf "${CYN}==> ${GRN}Snapshotting ${BLU}builder${GRN} image${END}\n"
build_builder_base
docker rmi -f "${name}" &> /dev/null
docker commit -c 'ENTRYPOINT [ "/bin/bash" ]' $(builder) "${name}"
printf "${CYN}==> ${GRN}Building ${BLU}${BUILDER_NAME}${END}\n"
${DBUILD} ${DIR} --build-arg artifacts=${name} --build-arg envoy="${ENVOY_DOCKER_TAG}" --build-arg builderbase="${builder_base_image}" --target ambassador -t ${BUILDER_NAME}
printf "${CYN}==> ${GRN}Building ${BLU}kat-client${END}\n"
${DBUILD} ${DIR} --build-arg artifacts=${name} --build-arg envoy="${ENVOY_DOCKER_TAG}" --build-arg builderbase="${builder_base_image}" --target kat-client -t kat-client
printf "${CYN}==> ${GRN}Building ${BLU}kat-server${END}\n"
${DBUILD} ${DIR} --build-arg artifacts=${name} --build-arg envoy="${ENVOY_DOCKER_TAG}" --build-arg builderbase="${builder_base_image}" --target kat-server -t kat-server
fi
dexec rm -f /buildroot/image.dirty
;;
push)
shift
push-image ${BUILDER_NAME} "$1"
push-image kat-client "$2"
push-image kat-server "$3"
;;
shell)
bootstrap
printf "\n"
docker exec -it "$(builder)" /bin/bash
;;
*)
echo "usage: builder.sh [bootstrap|builder|clean|clobber|compile|commit|shell]"
exit 1
;;
esac
|
#!/bin/bash
header_paths=(
"/usr/include" \
"/usr/local/include" \
"/usr/local/opt/" \
"/opt/include" \
"/opt/local/include" \
"/usr/include/$(uname -m)-linux-gnu" \
"/usr/local/include/$(uname -m)-linux-gnu" \
"/usr/include/i386-linux-gnu" \
"/usr/local/include/i386-linux-gnu" \
"/mingw32/include/"
# -- Add more locations here --
)
missing_depends=()
function check_header
{
for place in ${header_paths[@]}; do
for name in ${@:2}; do
[ -f "$place/$name" ] && return 0
done
done
missing_depends+=($1); return 1
}
function check_header_nosys
{
for place in ${header_paths[@]}; do
if [ "${place:0:12}" != "/usr/include" ]; then
for name in ${@:2}; do
[ -f "$place/$name" ] && return 0
done
fi
done
missing_depends+=($1); return 1
}
function check_program
{
binary=${2:-$1}
for place in ${PATH//:/ }; do
[ -x "$place/$binary" ] || [ -x "$place/$binary.exe" ] && return 0
done
missing_depends+=($1); return 1
}
OSVER=$(uname)
# macOS catalina does not ship headers in default directory anymore
if [ "$(uname)" == "Darwin" ]; then
header_paths+=("`xcrun --show-sdk-path`/usr/include")
fi
check_program git
check_program patch
check_program autoconf
check_program automake
# Disabled pacman for windows
if [ "${OSVER:0:5}" != MINGW ]; then
check_program python3
check_program pip3
check_program gpgme-config
check_header openssl openssl/crypto.h openssl/include/openssl/crypto.h
check_header libarchive archive.h libarchive/include/archive.h
fi
check_program make
check_program cmake
check_program gcc
check_program g++
check_program libtoolize
if [ ${#missing_depends[@]} -ne 0 ]; then
echo "Couldn't find dependencies:"
for dep in "${missing_depends[@]}"; do
echo " - $dep"
done
exit 1
fi
|
#!/bin/bash
rqworker node1 &> log/node1_rqworker.log &
rqworker node2 &> log/node2_rqworker.log &
rqworker node3 &> log/node3_rqworker.log &
rqworker node4 &> log/node4_rqworker.log &
rqworker node5 &> log/node5_rqworker.log &
rqworker topic &> log/topic_rqworker.log &
rqworker tester &> log/tester_rqworker.log &
rqworker failed &> log/failed1_rqworker.log &
rqworker failed &> log/failed2_rqworker.log &
rqworker failed &> log/failed3_rqworker.log &
rqworker failed &> log/failed4_rqworker.log &
rqworker failed &> log/failed5_rqworker.log &
rqworker failed &> log/failed6_rqworker.log &
echo "Start rqworker,Finished!"
|
#!/usr/bin/env bash
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Incrementally compiles the BE.
set -euo pipefail
trap 'echo Error in $0 at line $LINENO: $(cd "'$PWD'" && awk "NR == $LINENO" $0)' ERR
: ${IMPALA_TOOLCHAIN=}
BUILD_TESTS=1
CLEAN=0
TARGET_BUILD_TYPE=${TARGET_BUILD_TYPE:-""}
BUILD_SHARED_LIBS=${BUILD_SHARED_LIBS:-""}
# parse command line options
for ARG in $*
do
case "$ARG" in
-notests)
BUILD_TESTS=0
;;
-clean)
CLEAN=1
;;
-build_type=*)
TARGET_BUILD_TYPE="${ARG#*=}"
;;
-build_shared_libs)
BUILD_SHARED_LIBS="ON"
;;
-build_static_libs)
BUILD_SHARED_LIBS="OFF"
;;
-help)
echo "make_impala.sh [-build_type=<build type> -notests -clean]"
echo "[-build_type] : Target build type. Examples: Debug, Release, Address_sanitizer."
echo " If omitted, the last build target is built incrementally"
echo "[-build_shared_libs] : Link all executables dynamically"
echo "[-build_static_libs] : Link all executables statically (the default)"
echo "[-notests] : Omits building the tests."
echo "[-clean] : Cleans previous build artifacts."
echo ""
echo "If either -build_type or -build_*_libs is set, cmake will be re-run for the "
echo "project. Otherwise the last cmake configuration will continue to take effect."
exit
;;
esac
done
echo "********************************************************************************"
echo " Building Impala "
if [ "x${TARGET_BUILD_TYPE}" != "x" ];
then
echo " Build type: ${TARGET_BUILD_TYPE} "
if [ "x${BUILD_SHARED_LIBS}" == "x" ]
then
echo " Impala libraries will be STATICALLY linked"
fi
fi
if [ "x${BUILD_SHARED_LIBS}" == "xOFF" ]
then
echo " Impala libraries will be STATICALLY linked"
fi
if [ "x${BUILD_SHARED_LIBS}" == "xON" ]
then
echo " Impala libraries will be DYNAMICALLY linked"
fi
echo "********************************************************************************"
cd ${IMPALA_HOME}
if [ "x${TARGET_BUILD_TYPE}" != "x" ] || [ "x${BUILD_SHARED_LIBS}" != "x" ]
then
rm -f ./CMakeCache.txt
CMAKE_ARGS=()
if [ "x${TARGET_BUILD_TYPE}" != "x" ]; then
CMAKE_ARGS+=(-DCMAKE_BUILD_TYPE=${TARGET_BUILD_TYPE})
fi
if [ "x${BUILD_SHARED_LIBS}" != "x" ]; then
CMAKE_ARGS+=(-DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS})
fi
if [[ ! -z $IMPALA_TOOLCHAIN ]]; then
if [[ "$TARGET_BUILD_TYPE" == "ADDRESS_SANITIZER" ]]; then
CMAKE_ARGS+=(-DCMAKE_TOOLCHAIN_FILE=$IMPALA_HOME/cmake_modules/asan_toolchain.cmake)
else
CMAKE_ARGS+=(-DCMAKE_TOOLCHAIN_FILE=$IMPALA_HOME/cmake_modules/toolchain.cmake)
fi
fi
cmake . ${CMAKE_ARGS[@]}
fi
if [ $CLEAN -eq 1 ]
then
make clean
fi
$IMPALA_HOME/bin/gen_build_version.py --noclean
cd $IMPALA_HOME/common/function-registry
make
cd $IMPALA_HOME
# With parallelism, make doesn't always make statestored and catalogd correctly if you
# write make -jX impalad statestored catalogd. So we keep them separate and after impalad,
# which they link to.
make -j${IMPALA_BUILD_THREADS:-4} impalad
make statestored
make catalogd
if [ $BUILD_TESTS -eq 1 ]
then
make -j${IMPALA_BUILD_THREADS:-4}
else
make -j${IMPALA_BUILD_THREADS:-4} fesupport loggingsupport ImpalaUdf
fi
|
#!/bin/bash
set -xe
cat >> .env << EOF
# This is a comment
# We can use equal or colon notation
DIR: root
FLAVOUR: none
INSIDE_FOLDER=false
EOF
|
#!/bin/sh -e
#
# Install kustomize packages.
#
BASE_DIR=${BASE_DIR:-$(cd "$(dirname "$0")/../../.." || exit; pwd -P)}
cd "$(dirname "$0")" || exit 1
. "../../dotfiles/lib/utils.sh"
. "../../dotfiles/lib/buildenv.sh"
. "../../dotfiles/lib/buildenv_asdf.sh"
KUSTOMIZE_VERSION=4.4.0
_preflight() {
if ! command -v asdf >/dev/null; then
printe_info "asdf is not installed, skipping kustomize..."
return 1
fi
}
_run() {
printe_h2 "Installing kustomize..."
_install_kustomize
}
_install_kustomize() {
asdf_plugin kustomize https://github.com/Banno/asdf-kustomize
asdf_install kustomize "$KUSTOMIZE_VERSION" global
}
run_with_flavors "$@"
|
cd ../../../../gofrontend/
# Run translator
echo "**************************************"
echo "Running the translator (Go -> Parallely)."
echo "**************************************"
python -m translator.translator -f ../benchmarks/golang/src/pagerank/pagerank.go -o ../benchmarks/golang/src/pagerank/out.par
cd - > /dev/null
# Run sequentializer
echo "**************************************"
echo "Running the sequentializer"
echo "**************************************"
python ../../../../parser/compiler.py -f out.par -o out.seq
# Run translator
cd ../../../../gofrontend/
echo "**************************************"
echo "Generating executable code by renaming function calls with types (Go -> Go)."
python -m translator.typedGoGenerator -f ../benchmarks/golang/src/pagerank/pagerank.go -o ../benchmarks/golang/src/pagerank/pagerank.exec.go
echo "Use 'go run pagerank.exec.go' to run the generated program"
echo "**************************************"
|
#!/bin/bash
UNAME=`uname`
if [ "$UNAME" == "Darwin" ]; then
dsymutil $1
exit 0;
fi
OBJCOPY=objcopy
function usage {
echo "$0 /path/to/input/file [-o /path/to/output/file ]"
echo ""
}
if [ $# == 0 ]; then
usage
exit 2
fi
if [ $(basename $1) == $1 ]; then
INFILEDIR=$PWD
else
INFILEDIR=$(cd ${1%/*} && echo $PWD)
fi
INFILE=$(basename $1)
OUTFILEDIR=$INFILEDIR
OUTFILE=$INFILE.dbg
while getopts "o:" opt; do
case $opt in
o)
OUTFILEDIR=$(cd ${OPTARG%/*} && echo $PWD)
OUTFILE=$(basename $OPTARG)
;;
esac
done
if [ "$OUTFILEDIR" != "$INFILEDIR" ]; then
INFILE=${INFILEDIR}/${INFILE}
OUTFILE=${OUTFILEDIR}/${OUTFILE}
fi
pushd "$INFILEDIR"
$OBJCOPY "$INFILE" "$OUTFILE"
$OBJCOPY --add-gnu-debuglink="$OUTFILE" "$INFILE"
popd
|
#!/bin/sh
set -e
UNSIGNED=$1
SIGNATURE=$2
ARCH=x86_64
ROOTDIR=dist
BUNDLE=${ROOTDIR}/Bullishcoin-Qt.app
TEMPDIR=signed.temp
OUTDIR=signed-app
if [ -z "$UNSIGNED" ]; then
echo "usage: $0 <unsigned app> <signature>"
exit 1
fi
if [ -z "$SIGNATURE" ]; then
echo "usage: $0 <unsigned app> <signature>"
exit 1
fi
rm -rf ${TEMPDIR} && mkdir -p ${TEMPDIR}
tar -C ${TEMPDIR} -xf ${UNSIGNED}
tar -C ${TEMPDIR} -xf ${SIGNATURE}
if [ -z "${PAGESTUFF}" ]; then
PAGESTUFF=${TEMPDIR}/pagestuff
fi
if [ -z "${CODESIGN_ALLOCATE}" ]; then
CODESIGN_ALLOCATE=${TEMPDIR}/codesign_allocate
fi
for i in `find ${TEMPDIR} -name "*.sign"`; do
SIZE=`stat -c %s ${i}`
TARGET_FILE=`echo ${i} | sed 's/\.sign$//'`
echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}"
${CODESIGN_ALLOCATE} -i ${TARGET_FILE} -a ${ARCH} ${SIZE} -o ${i}.tmp
OFFSET=`${PAGESTUFF} ${i}.tmp -p | tail -2 | grep offset | sed 's/[^0-9]*//g'`
if [ -z ${QUIET} ]; then
echo "Attaching signature at offset ${OFFSET}"
fi
dd if=$i of=${i}.tmp bs=1 seek=${OFFSET} count=${SIZE} 2>/dev/null
mv ${i}.tmp ${TARGET_FILE}
rm ${i}
echo "Success."
done
mv ${TEMPDIR}/${ROOTDIR} ${OUTDIR}
rm -rf ${TEMPDIR}
echo "Signed: ${OUTDIR}"
|
#!/bin/bash
../bin/pth -cp ../rt-classes:../classes:../lib/polyglot.jar:../lib/accrue.jar:../lib/accrue-infoflow.jar:../lib/JSON-java.jar:../lib/pth.jar pthScript $@
|
#!/bin/zsh
script_dir=$(dirname ${0:A})
projectfolder=$(dirname $script_dir)
source ${projectfolder}/Header.sh
CISLevel="1"
audit="2.4.3 Ensure Screen Sharing Is Disabled (Automated)"
orgScore="OrgScore2_4_3"
emptyVariables
# Verify organizational score
runAudit
# If organizational score is 1 or true, check status of client
if [[ "${auditResult}" == "1" ]]; then
method="Script"
remediate="Script > sudo launchctl disable system/com.apple.screensharing"
screenSharing=$(launchctl print-disabled system | grep -c '"com.apple.screensharing" => true')
if [[ "$screenSharing" == "1" ]]; then
result="Passed"
comment="Screen Sharing: Disabled"
else
result="Failed"
comment="Screen Sharing: Enabled"
# Remediation
if [[ "${remediateResult}" == "enabled" ]]; then
sudo launchctl disable system/com.apple.screensharing
# re-check
screenSharing=$(launchctl print-disabled system | grep -c '"com.apple.screensharing" => true')
if [[ "$screenSharing" == "1" ]]; then
result="Passed After Remediation"
comment="Screen Sharing: Disabled"
else
result="Failed After Remediation"
fi
fi
fi
fi
printReport |