content
stringlengths 1
1.02M
⌀ |
---|
# https://developer.zendesk.com/rest_api/docs/core/requests#list-requests
zdesk_requests_list () {
method=GET
url=/api/v2/requests.json
[ -n "$1" ] && query="$query&status=$1"
shift
} |
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
# Start the releasetool reporter
python3 -m pip install gcp-releasetool
python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script
# Ensure that we have the latest versions of Twine, Wheel, and Setuptools.
python3 -m pip install --upgrade twine wheel setuptools
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
# Move into the package, build the distribution and upload.
TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1")
cd github/python-iap
python3 setup.py sdist bdist_wheel
twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/*
|
#!/bin/bash
if [ "$(id -u)" -eq '0' ]
then
UID=${UID:-9001}
GID=${GID:-9001}
AUDIO_GID=${AUDIO_GID:-9002}
VIDEO_GID=${VIDEO_GID:-9003}
usermod -u ${UID} -g ${GID} -a -G root,${AUDIO_GID},${VIDEO_GID} user > /dev/null 2>&1
source dbus start > /dev/null 2>&1
export HOME=/home/user
chown -R ${UID}:${GID} ${HOME} > /dev/null 2>&1
exec gosu user "$0" "$@"
fi
APP="QQ"
APP_NAME="QQ.exe"
/opt/deepinwine/apps/Deepin-${APP}/run.sh > /dev/null 2>&1
sleep 30s
while test -n "`pidof ${APP_NAME}`"
do
sleep 1s
done
exit "$?" |
#!/bin/bash
set -v
# Set Environment
echo ${PATH} | grep -q "${HOME}/bin" || {
echo "Adding ${HOME}/bin to PATH"
export PATH="${PATH}:${HOME}/bin"
}
# Install Go
mkdir -p ~/bin
curl -sL -o ~/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
chmod +x ~/bin/gimme
eval "$(gimme 1.7)"
export GOPATH=$WORKSPACE
# Get the Go dependencies
go get -f -u github.com/axw/gocov/gocov
go get -f -u github.com/mattn/goveralls
go get -f -u golang.org/x/tools/cmd/cover
go get -f -u github.com/golang/lint/golint
export PATH=$PATH:$GOPATH/bin
# speedup govendor sync command
REVISION=`curl -q "https://softwarefactory-project.io/r/changes/5923/detail?O=404" | sed '1d' | jq .current_revision | tr -d '"'`
curl -o /tmp/vendor.tgz https://softwarefactory-project.io/r/changes/5923/revisions/$REVISION/archive?format=tgz
pushd ${GOPATH}/src/github.com/skydive-project/skydive
go get -f -u github.com/kardianos/govendor
govendor sync -n | perl -pe 's|fetch \"(.*)\"$|vendor/\1|g' | sort -u > vendor.fetch.list
cat vendor.fetch.list | xargs tar -xvzf /tmp/vendor.tgz --exclude "vendor/vendor.json"
# remove installed
find vendor/ -mindepth 2 -type f | xargs dirname | sort -u > vendor.installed.list
echo "package to be removed/cleanup"
diff -u vendor.fetch.list vendor.installed.list | grep '^\+v' | perl -pe 's|^\+(.*)|\1|' | tee /dev/stdout | xargs -n 1 rm -rf
rm -f vendor.fetch.list vendor.installed.list
popd
|
#!/bin/sh
CURRENT_DIR=$PWD
SCRIPT_DIR=$CURRENT_DIR
if [ ! -d $SCRIPT_DIR/deploy.yml ]; then
SCRIPT=$(readlink -f "$0")
# Absolute path this script is in
SCRIPT_DIR=$(dirname "$SCRIPT")
fi
PROJECT_DIR=$(dirname "$SCRIPT_DIR")
[ ! -n "$REV_DATE" ] && REV_DATE=$(date)
REV=$(date -d "$REV_DATE" +%Y-%m-%d/%H_%M_%S)
REV_NAME=$(date -d "$REV_DATE" +%Y-%m-%d_%H-%M-%S)
REV_PREFIX=$(date -d "$REV_DATE" +%Y-%m-%d)
EXPIRE_HTTP_SESSION_TOKEN=$(date -d "$REV_DATE" +%d%H%M%S)
DEPLOY_TYPE=0
[ -n "$1" ] && DEPLOY_TYPE=$1
ansible-playbook -u deploy -i $SCRIPT_DIR/hosts.ini \
-e project_dir=$PROJECT_DIR \
-e deploy_rev=rev/$REV \
-e deploy_rev_prefix=rev/$REV_PREFIX \
-e deploy_rev_name=$REV_NAME \
-e deploy_type=$DEPLOY_TYPE \
$SCRIPT_DIR/deploy.yml
|
# {{{ File header. #############################################################
# #
# File informations: #
# - Name: external-tools/ffmpeg.zsh #
# - Summary: Support for ffmpeg. #
# - Authors: #
# - Alessandro Molari <[email protected]> (alem0lars) #
# #
# Project informations: #
# - Homepage: https://github.com/alem0lars/zsh-external-tools #
# - Getting started: see README.md in the project root folder #
# #
# License: Apache v2.0 (see below) #
# #
################################################################################
# #
# Licensed to the Apache Software Foundation (ASF) under one more contributor #
# license agreements. See the NOTICE file distributed with this work for #
# additional information regarding copyright ownership. The ASF licenses this #
# file to you under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# }}} ##########################################################################
if [ $commands[ffmpeg] ]; then # check if 'ffmpeg' is installed
abbrev-alias record-screen-1st="ffmpeg -f x11grab -s 2880x1620 -i :0.0 -r 25 -vcodec libx264 output.mkv"
abbrev-alias record-screen-2nd="ffmpeg -f x11grab -s 3840x2160 -i :0.0+2880,0 -r 25 -vcodec libx264 output.mkv"
fi
|
#!/bin/bash
export PATH="/home/ubuntu/anaconda3/bin:$PATH"
export LD_LIBRARY_PATH="/usr/local/MATLAB/MATLAB_Runtime/v93/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v93/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v93/sys/opengl/lib/glnxa64/"
python /var/www/deco3801-teamanonymous/public/algorithms/combine.py $1 $2 $3 $4 $5 |
#!/bin/bash
#功能描述(Description):continue基本语法演示.
for i in 1 2
do
echo $i
for j in a b
do
echo $j
done
done
echo "---"
for i in 1 2
do
echo $i
for j in a b
do
[ $j == a ] && continue
echo $j
done
done
echo "---"
for i in 1 2
do
echo $i
for j in a b
do
[ $j == a ] && continue 2
echo $j
done
done
|
#!/bin/sh
#
# Simple script to put the Kernel image into a destination folder
# to be booted. The script also copies the a initrd and the conmpiled device
# tree. Usually the destination is a location which can be read while booting
# with U-Boot.
#
# Use this script to populate the first partition of disk images created with
# the simpleimage script of this project.
#
set -e
DEST="$1"
if [ -z "$DEST" ]; then
echo "Usage: $0 <destination-folder> [linux-folder]"
exit 1
fi
BLOBS="../blobs"
LINUX="../linux-a64"
INITRD="./initrd.gz"
BOOTLOGO="../blobs/bootlogo.bmp"
BATTERY="../blobs/bat"
# Targets file names as loaded by U-Boot.
SUBFOLDER="a64"
KERNEL="$SUBFOLDER/Image"
INITRD_IMG="initrd.img"
BOOTLOGO_TARGET="bootlogo.bmp"
BATTERY_TARGET="bat"
if [ "$DEST" = "-" ]; then
DEST="../build"
fi
if [ -n "$2" ]; then
LINUX="$2"
fi
echo "Using Linux from $LINUX ..."
VERSION=$(strings $LINUX/arch/arm64/boot/Image |grep "Linux version"|awk '{print $3}')
echo "Kernel build version $VERSION ..."
if [ -z "$VERSION" ]; then
echo "Failed to get build version, correct <linux-folder>?"
exit 1
fi
# Clean up
mkdir -p "$DEST/$SUBFOLDER"
rm -vf "$DEST/$KERNEL"
rm -vf "$DEST/"*.dtb
# Create and copy Kernel
echo -n "Copying Kernel ..."
cp -vf "$LINUX/arch/arm64/boot/Image" "$DEST/$KERNEL"
echo "$VERSION" > "$DEST/Image.version"
echo " OK"
# Copy initrd
echo -n "Copying initrd ..."
cp -vf "$INITRD" "$DEST/$INITRD_IMG"
echo " OK"
# Create and copy binary device tree
# if grep -q sunxi-drm "$LINUX/arch/arm64/boot/Image"; then
# echo "Kernel with DRM driver!"
# basename="pine64drm"
# fi
# Not found, use device tree from BSP.
echo "Compiling device tree from $BLOBS/${basename}.dts"
dtc -Odtb -o "$DEST/$SUBFOLDER/sun50i-a64-teres.dtb" "$BLOBS/sun50i-a64-teres.dts"
# Add bootlogo.
cp -v "$BOOTLOGO" "$DEST/$BOOTLOGO_TARGET"
# Add battery icons.
mkdir -p "$DEST/$BATTERY_TARGET"
cp -v "$BATTERY/bempty.bmp" "$DEST/$BATTERY_TARGET"
cp -v "$BATTERY/low_pwr.bmp" "$DEST/$BATTERY_TARGET"
cp -v "$BATTERY/battery_charge.bmp" "$DEST/$BATTERY_TARGET"
if [ ! -e "$DEST/uEnv.txt" ]; then
cat <<EOF > "$DEST/uEnv.txt"
console=ttyS0,115200n8
selinux=permissive
enforcing=0
optargs=no_console_suspend
kernel_filename=a64/Image
initrd_filename=initrd.img
recovery_initrd_filename=ramdisk-recovery.img
hardware=sun50iw1p1
debug=on
# INFO:
# To enable one of below options,
# uncomment them by removing # in front of name
# To use android recovery:
# Create empty file recovery.txt in root of this partition
# To enable LCD or HDMI, if not changed it will use default (experimental)
# disp_screen0=lcd or hdmi
# disp_screen1=lcd or hdmi
# disp_mode=screen0 or screen1 or dualhead or xinerama or clone or disabled
# USB OTG port mode (experimental)
# otg_mode=device or host or otg
otg_mode=host
# Configure contiguous memory allocation
# This maybe required to be enlarged for 4K displays
cma=384M
# To change HDMI display mode:
# hdmi_mode=480i
# hdmi_mode=576i
# hdmi_mode=480p
# hdmi_mode=576p
# hdmi_mode=720p50
# hdmi_mode=720p60
# hdmi_mode=1080i50
# hdmi_mode=1080i60
# hdmi_mode=1080p24
# hdmi_mode=1080p50
# hdmi_mode=1080p60
# hdmi_mode=2160p30
# hdmi_mode=2160p25
# hdmi_mode=2160p24
# hdmi_mode=800x480p
# hdmi_mode=1024x600p
# To enable DVI compatibilty:
# disp_dvi_compat=on
# To enable CSI camera, if not enabled it will use default:
# camera_type=s5k4ec
# camera_type=ov5640
# Configure ethernet speed (Android-only)
eth0_speed=auto
# eth0_speed=1000
# eth0_speed=100
# eth0_speed=10
# If you are having problems with running from eMMC, like Sandisk eMMC
# It forces to use SDR-mode instead of HS-mode.
# Enable eMMC compatibility mode:
# emmc_compat=on
# Enable enhanced eMMC speed (might not work), the HS200/150MHz:
# emmc_compat=150mhz
# Enable enhanced eMMC speed (might not work), the HS200/200MHz:
# emmc_compat=200mhz
# Disable HDMI CEC
# hdmi_cec=0
# Enable experimental HDMI CEC driver
hdmi_cec=2
# Allow to execute user command
user_cmd=
EOF
fi
sync
echo "Done - boot files in $DEST"
|
#!/bin/bash
sudo sed -i -e '/^.*data-store.*$/d' /etc/exports
for environment in `ls $HOME/workspace`; do
for appname in `ls $HOME/workspace/$environment`; do
shortenv $environment
if [[ ! -d "$HOME/data-store/$appname-$shortenv" ]]; then
mkdir -p "$HOME/data-store/$appname-$shortenv"
fi
echo "$HOME/data-store/$appname-$shortenv "`minikube ip`"(rw,sync,no_root_squash,no_subtree_check)" | sudo tee -a /etc/exports
done
done
sudo systemctl restart nfs-kernel-server
for environment in `ls $HOME/workspace`; do
for appname in `ls $HOME/workspace/$environment`; do
shortenv $environment
if ! kubectl get namespaces | grep -q "$appname-$shortenv"
then
kubectl create namespace "$appname-$shortenv"
fi
if ! kubectl --namespace $appname-$shortenv get pv | grep -q "$appname-$shortenv"
then
cat <<EOF | kubectl --namespace $appname-$shortenv apply -f -
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv-$appname-$shortenv
spec:
storageClassName: manual
capacity:
storage: $HOME_MINIKUBE_PV_SIZE
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: host.minikube.internal
path: $HOME/data-store/$appname-$shortenv
EOF
fi
until kubectl -n "$appname-$shortenv" get pv | grep -q $appname-$shortenv; do sleep 1; done
if ! kubectl --namespace $appname-$shortenv get pvc | grep -q "$appname-$shortenv"
then
cat <<EOF | kubectl --namespace $appname-$shortenv apply -f -
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-pvc-$appname-$shortenv
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: $HOME_MINIKUBE_PV_SIZE
volumeName: "nfs-pv-$appname-$shortenv"
EOF
fi
until kubectl -n "$appname-$shortenv" get pvc | grep -q $appname-$shortenv; do sleep 1; done
done
done
# mount example
#minikube ssh "sudo mkdir /data-store"
#minikube ssh "sudo mount host.minikube.internal:$HOME/data-store /data-store"
|
TERMUX_SUBPKG_DESCRIPTION="Texlive's collection-langczechslovak"
TERMUX_SUBPKG_DEPENDS="texlive-fontsextra, texlive-luatex"
TERMUX_SUBPKG_INCLUDE=$(python3 $TERMUX_SCRIPTDIR/packages/texlive/parse_tlpdb.py langczechslovak $TERMUX_PKG_TMPDIR/texlive.tlpdb)
TERMUX_SUBPKG_CONFLICTS="texlive-bin (<< 20190410), texlive (<< 20190410)"
termux_step_create_subpkg_debscripts() {
echo "#!$TERMUX_PREFIX/bin/sh" > postinst
echo mktexlsr >> postinst
}
|
export GLUE_DIR=../../glue_data/
export TASK_NAME=MLSA
python3 ./run_sa.py \
--model_type bert \
--model_name_or_path bert-base-multilingual-uncased \
--task_name $TASK_NAME \
--do_train \
--do_eval \
--do_lower_case \
--data_dir $GLUE_DIR/$TASK_NAME \
--max_seq_length 128 \
--per_gpu_eval_batch_size=30 \
--per_gpu_train_batch_size=30 \
--learning_rate 2e-5 \
--num_train_epochs 2 \
--overwrite_output_dir \
--output_dir /tmp/$TASK_NAME/
|
#!/bin/bash
# Path to location directory and pref
LOCATION_DIR="/Library/Application Support/pinpoint"
LOCATION_PREF="${LOCATION_DIR}/location.plist"
MODULESCRIPT="init_location"
MODULE_NAME="location"
# map controller
CTL="${BASEURL}index.php?/module/${MODULE_NAME}/"
# Get the scripts in the proper directories
"${CURL[@]}" "${CTL}get_script/${MODULESCRIPT}" -o "${MUNKIPATH}preflight.d/${MODULESCRIPT}"
# Check exit status of curl
if [ $? = 0 ]; then
# Make executable
chmod a+x "${MUNKIPATH}preflight.d/${MODULESCRIPT}"
# Set preference to include this file in the preflight check
setreportpref "${MODULE_NAME}" "${LOCATION_PREF}"
else
echo "Failed to download all required components!"
rm -f "${MUNKIPATH}preflight.d/${MODULESCRIPT}"
# Signal that we had an error
ERR=1
fi
# Remove old testing script and cache if they exist
files=( "${MUNKIPATH}preflight.d/location.py" "${MUNKIPATH}preflight.d/cache/location.plist" )
for i in "${files[@]}"
do
/bin/rm -f $i
done
|
#!/bin/bash
# build latest app container
docker build -t test-data-store:latest .
# login to docker hub
echo logging into to docker hub...
docker login --username caselletuckelle
# tag container
docker tag test-data-store:latest caselletuckelle/test-data-store:latest
# push new app container to docker repo (https://hub.docker.com/r/caselletuckelle/test-data-store)
docker push caselletuckelle/test-data-store:latest |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
BCSYMBOLMAP_DIR="BCSymbolMaps"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then
# Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied
find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do
echo "Installing $f"
install_bcsymbolmap "$f" "$destination"
rm "$f"
done
rmdir "${source}/${BCSYMBOLMAP_DIR}"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures from the dSYM.
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 0 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
mkdir -p "${DWARF_DSYM_FOLDER_PATH}"
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=1
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=0
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/AppAuth/AppAuth.framework"
install_framework "${BUILT_PRODUCTS_DIR}/BoringSSL-GRPC/openssl_grpc.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseAuth/FirebaseAuth.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseCore/FirebaseCore.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseCoreDiagnostics/FirebaseCoreDiagnostics.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseFirestore/FirebaseFirestore.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseInstallations/FirebaseInstallations.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseStorage/FirebaseStorage.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GTMAppAuth/GTMAppAuth.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GTMSessionFetcher/GTMSessionFetcher.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleDataTransport/GoogleDataTransport.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleSignIn/GoogleSignIn.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleUtilities/GoogleUtilities.framework"
install_framework "${BUILT_PRODUCTS_DIR}/PromisesObjC/FBLPromises.framework"
install_framework "${BUILT_PRODUCTS_DIR}/abseil/absl.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-C++/grpcpp.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-Core/grpc.framework"
install_framework "${BUILT_PRODUCTS_DIR}/leveldb-library/leveldb.framework"
install_framework "${BUILT_PRODUCTS_DIR}/nanopb/nanopb.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/AppAuth/AppAuth.framework"
install_framework "${BUILT_PRODUCTS_DIR}/BoringSSL-GRPC/openssl_grpc.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseAuth/FirebaseAuth.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseCore/FirebaseCore.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseCoreDiagnostics/FirebaseCoreDiagnostics.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseFirestore/FirebaseFirestore.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseInstallations/FirebaseInstallations.framework"
install_framework "${BUILT_PRODUCTS_DIR}/FirebaseStorage/FirebaseStorage.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GTMAppAuth/GTMAppAuth.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GTMSessionFetcher/GTMSessionFetcher.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleDataTransport/GoogleDataTransport.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleSignIn/GoogleSignIn.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleUtilities/GoogleUtilities.framework"
install_framework "${BUILT_PRODUCTS_DIR}/PromisesObjC/FBLPromises.framework"
install_framework "${BUILT_PRODUCTS_DIR}/abseil/absl.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-C++/grpcpp.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-Core/grpc.framework"
install_framework "${BUILT_PRODUCTS_DIR}/leveldb-library/leveldb.framework"
install_framework "${BUILT_PRODUCTS_DIR}/nanopb/nanopb.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
#!/bin/bash
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
mkdir -p $BINDIR
(cd kent/src/lib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/utils/wordLine && make)
mkdir -p $PREFIX/bin
cp bin/wordLine $PREFIX/bin
chmod +x $PREFIX/bin/wordLine
|
# Prints the current weather in Celsius, Fahrenheits or lord Kelvins. The forecast is cached and updated with a period of $update_period.
# The update period in seconds.
update_period=600
TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER_DEFAULT="yahoo"
TMUX_POWERLINE_SEG_WEATHER_UNIT_DEFAULT="c"
TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD_DEFAULT="600"
if shell_is_bsd; then
TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT="/usr/local/bin/grep"
else
TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT="grep"
fi
generate_segmentrc() {
read -d '' rccontents << EORC
# The data provider to use. Currently only "yahoo" is supported.
export TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER="${TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER_DEFAULT}"
# What unit to use. Can be any of {c,f,k}.
export TMUX_POWERLINE_SEG_WEATHER_UNIT="${TMUX_POWERLINE_SEG_WEATHER_UNIT_DEFAULT}"
# How often to update the weather in seconds.
export TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD="${TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD_DEFAULT}"
# Name of GNU grep binary if in PATH, or path to it.
export TMUX_POWERLINE_SEG_WEATHER_GREP="${TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT}"
# Your location. Find a code that works for you:
# 1. Go to Yahoo weather http://weather.yahoo.com/
# 2. Find the weather for you location
# 3. Copy the last numbers in that URL. e.g. "http://weather.yahoo.com/united-states/california/newport-beach-12796587/" has the numbers "12796587"
export TMUX_POWERLINE_SEG_WEATHER_LOCATION=""
EORC
echo "$rccontents"
}
run_segment() {
__process_settings
local tmp_file="${TMUX_POWERLINE_DIR_TEMPORARY}/weather_yahoo.txt"
local weather
case "$TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER" in
"yahoo") weather=$(__yahoo_weather) ;;
*)
echo "Unknown weather provider [${$TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER}]";
return 1
esac
if [ -n "$weather" ]; then
echo "$weather"
fi
}
__process_settings() {
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER" ]; then
export TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER="${TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_UNIT" ]; then
export TMUX_POWERLINE_SEG_WEATHER_UNIT="${TMUX_POWERLINE_SEG_WEATHER_UNIT_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD" ]; then
export TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD="${TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_GREP" ]; then
export TMUX_POWERLINE_SEG_WEATHER_GREP="${TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_LOCATION" ]; then
echo "No weather location specified.";
exit 8
fi
}
__yahoo_weather() {
degree=""
if [ -f "$tmp_file" ]; then
if shell_is_osx || shell_is_bsd; then
last_update=$(stat -f "%m" ${tmp_file})
elif shell_is_linux; then
last_update=$(stat -c "%Y" ${tmp_file})
fi
time_now=$(date +%s)
up_to_date=$(echo "(${time_now}-${last_update}) < ${update_period}" | bc)
if [ "$up_to_date" -eq 1 ]; then
__read_tmp_file
fi
fi
if [ -z "$degree" ]; then
weather_data=$(curl --max-time 4 -s "https://query.yahooapis.com/v1/public/yql?format=xml&q=SELECT%20*%20FROM%20weather.forecast%20WHERE%20u=%27${TMUX_POWERLINE_SEG_WEATHER_UNIT}%27%20AND%20woeid%20=%20%27${TMUX_POWERLINE_SEG_WEATHER_LOCATION}%27")
if [ "$?" -eq "0" ]; then
error=$(echo "$weather_data" | grep "problem_cause\|DOCTYPE");
if [ -n "$error" ]; then
echo "error"
exit 1
fi
# Assume latest grep is in PATH
gnugrep="${TMUX_POWERLINE_SEG_WEATHER_GREP}"
# <yweather:units temperature="F" distance="mi" pressure="in" speed="mph"/>
unit=$(echo "$weather_data" | "$gnugrep" -Zo "<yweather:units [^<>]*/>" | sed 's/.*temperature="\([^"]*\)".*/\1/')
condition=$(echo "$weather_data" | "$gnugrep" -Zo "<yweather:condition [^<>]*/>")
# <yweather:condition text="Clear" code="31" temp="66" date="Mon, 01 Oct 2012 8:00 pm CST" />
degree=$(echo "$condition" | sed 's/.*temp="\([^"]*\)".*/\1/')
condition=$(echo "$condition" | sed 's/.*text="\([^"]*\)".*/\1/')
# Pull the times for sunrise and sunset so we know when to change the day/night indicator
# <yweather:astronomy sunrise="6:56 am" sunset="6:21 pm"/>
if shell_is_osx || shell_is_bsd; then
date_arg='-j -f "%H:%M %p "'
else
date_arg='-d'
fi
sunrise=$(date ${date_arg}"$(echo "$weather_data" | "$gnugrep" "yweather:astronomy" | sed 's/^\(.*\)sunset.*/\1/' | sed 's/^.*sunrise="\(.*m\)".*/\1/')" +%H%M)
sunset=$(date ${date_arg}"$(echo "$weather_data" | "$gnugrep" "yweather:astronomy" | sed 's/^.*sunset="\(.*m\)".*/\1/')" +%H%M)
elif [ -f "${tmp_file}" ]; then
__read_tmp_file
fi
fi
if [ -n "$degree" ]; then
if [ "$TMUX_POWERLINE_SEG_WEATHER_UNIT" == "k" ]; then
degree=$(echo "${degree} + 273.15" | bc)
fi
condition_symbol=$(__get_condition_symbol "$condition" "$sunrise" "$sunset")
echo "${condition_symbol} ${degree} $(echo "$TMUX_POWERLINE_SEG_WEATHER_UNIT" | tr '[:lower:]' '[:upper:]')" | tee "${tmp_file}"
fi
}
# Get symbol for condition. Available conditions: http://developer.yahoo.com/weather/#codes
__get_condition_symbol() {
local condition=$(echo "$1" | tr '[:upper:]' '[:lower:]')
local sunrise="$2"
local sunset="$3"
echo "$condition"
}
__read_tmp_file() {
if [ ! -f "$tmp_file" ]; then
return
fi
cat "${tmp_file}"
exit
}
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:2120
#
# Security announcement date: 2016-10-27 09:19:45 UTC
# Script generation date: 2017-01-31 21:25:43 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - kernel-abi-whitelists.noarch:2.6.32-431.75.1.el6
# - kernel-doc.noarch:2.6.32-431.75.1.el6
# - kernel-firmware.noarch:2.6.32-431.75.1.el6
# - kernel.x86_64:2.6.32-431.75.1.el6
# - kernel-debug.x86_64:2.6.32-431.75.1.el6
# - kernel-debug-debuginfo.x86_64:2.6.32-431.75.1.el6
# - kernel-debug-devel.x86_64:2.6.32-431.75.1.el6
# - kernel-debuginfo.x86_64:2.6.32-431.75.1.el6
# - kernel-debuginfo-common-x86_64.x86_64:2.6.32-431.75.1.el6
# - kernel-devel.x86_64:2.6.32-431.75.1.el6
# - kernel-headers.x86_64:2.6.32-431.75.1.el6
# - perf.x86_64:2.6.32-431.75.1.el6
# - perf-debuginfo.x86_64:2.6.32-431.75.1.el6
# - python-perf-debuginfo.x86_64:2.6.32-431.75.1.el6
# - python-perf.x86_64:2.6.32-431.75.1.el6
#
# Last versions recommanded by security team:
# - kernel-abi-whitelists.noarch:2.6.32-504.56.1.el6
# - kernel-doc.noarch:2.6.32-504.56.1.el6
# - kernel-firmware.noarch:2.6.32-504.56.1.el6
# - kernel.x86_64:2.6.32-220.69.1.el6
# - kernel-debug.x86_64:2.6.32-220.69.1.el6
# - kernel-debug-debuginfo.x86_64:2.6.32-220.69.1.el6
# - kernel-debug-devel.x86_64:2.6.32-220.69.1.el6
# - kernel-debuginfo.x86_64:2.6.32-220.69.1.el6
# - kernel-debuginfo-common-x86_64.x86_64:2.6.32-220.69.1.el6
# - kernel-devel.x86_64:2.6.32-220.69.1.el6
# - kernel-headers.x86_64:2.6.32-220.69.1.el6
# - perf.x86_64:2.6.32-220.69.1.el6
# - perf-debuginfo.x86_64:2.6.32-220.69.1.el6
# - python-perf-debuginfo.x86_64:2.6.32-220.69.1.el6
# - python-perf.x86_64:2.6.32-220.69.1.el6
#
# CVE List:
# - CVE-2016-5195
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install kernel-abi-whitelists.noarch-2.6.32 -y
sudo yum install kernel-doc.noarch-2.6.32 -y
sudo yum install kernel-firmware.noarch-2.6.32 -y
sudo yum install kernel.x86_64-2.6.32 -y
sudo yum install kernel-debug.x86_64-2.6.32 -y
sudo yum install kernel-debug-debuginfo.x86_64-2.6.32 -y
sudo yum install kernel-debug-devel.x86_64-2.6.32 -y
sudo yum install kernel-debuginfo.x86_64-2.6.32 -y
sudo yum install kernel-debuginfo-common-x86_64.x86_64-2.6.32 -y
sudo yum install kernel-devel.x86_64-2.6.32 -y
sudo yum install kernel-headers.x86_64-2.6.32 -y
sudo yum install perf.x86_64-2.6.32 -y
sudo yum install perf-debuginfo.x86_64-2.6.32 -y
sudo yum install python-perf-debuginfo.x86_64-2.6.32 -y
sudo yum install python-perf.x86_64-2.6.32 -y
|
#!/bin/bash
# DESCRIPTION
# --------------------------------------------------------------------------------------------------
# This script maps FreeSurfer .annot in fsaverage space to individual T1w (or dwi) space.
#
#
# DATA PREPARATION
# --------------------------------------------------------------------------------------------------
# Data need to be in BIDS format.
#
#
# PRE-ASSUMPTION
# --------------------------------------------------------------------------------------------------
# - FreeSurfer needs to be installed.
#
# - FreeSurfer recon-all needs to be run on all subjects.
#
# - If you want to transfer the atlas to DWI space, DWI->T1w tranformation matrix should be created,
# and converted to MRtrix format by, for example, running
# * mrtrix_tractography_BIDS_Raijin_singleShell.sh *
#
#
# USAGE
# --------------------------------------------------------------------------------------------------
# $1 = path to BIDS project folder
#
# $2 = subject ID (sub-*)
#
# $3 = path to FreeSurfer annot file (either lh or rh, do not use ?h). For example :
#
# /path/to/atlas/lh.myatlas.annot
#
# special case : if using HCP-MMP1 atlas (lh.HCP-MMP1.annot and rh.HCP-MMP1.annot),
# pass 'HCP-MMP1'. if using Desikan-Killiany atlas, pass ('Desikan')
#
# $4 = 'yesMap2dwi' or 'noMap2dwi'
#
#
# OUTPUT
# --------------------------------------------------------------------------------------------------
# ${BIDS_folder}/derivatives/atlas/${FSannot}_dwiSpace/${subjID}_${FSannot}_labelCorrected.mif
#
# OR
#
# ${BIDS_folder}/derivatives/atlas/${FSannot}_T1wSpace/${subjID}_${FSannot}_labelCorrected.mif
#
#
#
# NOTES AND REFERENCES
# --------------------------------------------------------------------------------------------------
# This script was modified from the BATMAN MRtrix tutorial appendix.
#
#
# --------------------------------------------------------------------------------------------------
#
# Dr. Jiyang Jiang, February 2019.
#
# --------------------------------------------------------------------------------------------------
FSatlas2dwiSpace(){
BIDS_folder=$1
subjID=$2
FSannot_path=$3
map2dwi_flag=$4
subjects_dir=${BIDS_folder}/derivatives/freesurfer/recon-all
export SUBJECTS_DIR=${subjects_dir}
# --== 3 ==-- Map the annotation files of HCP MMP 1.0 atlas from fsaverage to your subject
case ${FSannot_path} in
HCP-MMP1)
atlas_path=$(dirname $(which $0))
FSannot="HCP-MMP1"
;;
Desikan)
atlas_path="${subjects_dir}/fsaverage/label"
FSannot="Desikan"
;;
*)
atlas_path=$(dirname ${FSannot_path})
FSannot=$(basename ${FSannot_path} | awk -F '.' '{print $2}')
;;
esac
# ------------- #
# output folder #
# ------------- #
case ${map2dwi_flag} in
yesMap2dwi)
if [ ! -d "${BIDS_folder}/derivatives/atlas/${FSannot}_dwiSpace" ]; then
mkdir -p ${BIDS_folder}/derivatives/atlas/${FSannot}_dwiSpace
fi
outputFolder=${BIDS_folder}/derivatives/atlas/${FSannot}_dwiSpace
;;
noMap2dwi)
if [ ! -d "${BIDS_folder}/derivatives/atlas/${FSannot}_T1wSpace" ]; then
mkdir -p ${BIDS_folder}/derivatives/atlas/${FSannot}_T1wSpace
fi
outputFolder=${BIDS_folder}/derivatives/atlas/${FSannot}_T1wSpace
;;
esac
if [ ! -f "${subjects_dir}/fsaverage/label/lh.${FSannot}.annot" ]; then
cp ${atlas_path}/?h.${FSannot}.annot ${subjects_dir}/fsaverage/label/.
fi
mri_surf2surf --srcsubject fsaverage \
--trgsubject ${subjID} \
--hemi lh \
--sval-annot ${subjects_dir}/fsaverage/label/lh.${FSannot}.annot \
--tval ${subjects_dir}/${subjID}/label/lh.${FSannot}.annot
mri_surf2surf --srcsubject fsaverage \
--trgsubject ${subjID} \
--hemi rh \
--sval-annot ${subjects_dir}/fsaverage/label/rh.${FSannot}.annot \
--tval ${subjects_dir}/${subjID}/label/rh.${FSannot}.annot
# We will now map those annotations to the volumetric image, additionally labeling subcortical structures.
# The resulting image with have the atlas-based segmentation, but with more or less random integers assigned.
# MRtrix requires that the integers start with 1 and increase by 1.
# For that, we need to provide two color-lookup tables - one with original integers and one with the ordered
# integers. These are available in $MRtrix3/share/MRtrix3/labelconvert in the latest release, and also in the
# current BATMAN_tutorial folder.
# Finally, we need to coregister the parcellation image to diffusion space.
# --== 4 ==-- Map the HCP MMP 1.0 annotations onto the volumetric image and add FreeSurfer-specific
# subcortical segmentation. Convert the resulting file to .mif format (unit32 - best for MRtrix)
mri_aparc2aseg --old-ribbon \
--s ${subjID} \
--annot ${FSannot} \
--o ${outputFolder}/${subjID}_${FSannot}.mgz
mrconvert -datatype uint32 \
${outputFolder}/${subjID}_${FSannot}.mgz \
${outputFolder}/${subjID}_${FSannot}.mif \
-force
# --== 5 ==-- Replace the random integers of the hcpmmp1.mif file with integers that start from 1 and
# increase by 1
if [ "${FSannot_path}" = "HCP-MMP1" ]; then
labelconvert ${outputFolder}/${subjID}_${FSannot}.mif \
/short/ba64/jyj561/Software/mrtrix3/share/mrtrix3/labelconvert/hcpmmp1_original.txt \
/short/ba64/jyj561/Software/mrtrix3/share/mrtrix3/labelconvert/hcpmmp1_ordered.txt \
${outputFolder}/${subjID}_${FSannot}_t1space.mif \
-force
elif [ "${FSannot_path}" = "Desikan" ]; then
# Ref : https://mrtrix.readthedocs.io/en/latest/quantitative_structural_connectivity/labelconvert_tutorial.html
labelconvert ${subjects_dir}/${subjID}/mri/aparc+aseg.mgz \
${FREESURFER_HOME}/FreeSurferColorLUT.txt \
/short/ba64/jyj561/Software/mrtrix3/share/mrtrix3/labelconvert/fs_default.txt \
${outputFolder}/${subjID}_${FSannot}_t1space.mif \
-force
else
mv ${outputFolder}/${subjID}_${FSannot}.mif ${outputFolder}/${subjID}_${FSannot}_t1space.mif
fi
# --== 6 ==-- Register the ordered atlas-based volumetric parcellation to diffusion space
case ${map2dwi_flag} in
yesMap2dwi)
mrtransform ${outputFolder}/${subjID}_${FSannot}_t1space.mif \
-linear ${BIDS_folder}/derivatives/mrtrix/DWI_to_T1w/${subjID}_diff2annat_mrtrix.txt \
-inverse \
-datatype uint32 \
${outputFolder}/${subjID}_${FSannot}_dwiSpace.mif \
-force
;;
noMap2dwi)
# not mapping to dwi space
;;
esac
}
FSatlas2dwiSpace $1 $2 $3 $4 |
#!/bin/bash -u
# Copyright 2018 ConsenSys AG.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
NO_LOCK_REQUIRED=true
. ./.env
. ./.common.sh
HOST=${DOCKER_PORT_2375_TCP_ADDR:-"localhost"}
docker-compose -f docker-compose.blockscout.yml up --detach
sleep 60
echo "Blockscout service endpoint : http://${HOST}:26000"
|