code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
# -----------------------------------------------------------------------------
#
# Copyright (C) 2021 CERN & University of Surrey for the benefit of the
# BioDynaMo collaboration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
# exit as soon as an error occurs
set -e
if [[ $# -ne 1 ]]; then
echo "Wrong number of arguments.
DESCRIPTION:
Removes an archived branch locally and from origin
USAGE:
remove.sh ARCHIVE_NAME
ARGUEMNTS:
ARCHIVE_NAME Name of the archived branch
Use list.sh to get a list of all archived branch names
Use the name without 'refs/archive/' in the beginning"
exit 1
fi
ARCHIVE_NAME=$1
GIT_FOLDER_PATH=$(git rev-parse --show-toplevel)/.git
echo "Remove locally"
rm ${GIT_FOLDER_PATH}/refs/archive/${ARCHIVE_NAME}
echo "Remove from origin"
git push origin :refs/archive/${ARCHIVE_NAME}
| BioDynaMo/biodynamo | util/git/archive_branch/remove.sh | Shell | apache-2.0 | 1,230 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
set_bazel_outdir
install_ubuntu_16_pip_deps pip3.8
install_bazelisk
python2.7 tensorflow/tools/ci_build/update_version.py --nightly
# Run configure.
export TF_NEED_GCP=1
export TF_NEED_HDFS=1
export TF_NEED_S3=1
export TF_NEED_CUDA=0
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which python3.8)
yes "" | "$PYTHON_BIN_PATH" configure.py
# Build the pip package
bazel build --config=opt --config=v2 \
--crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \
tensorflow/tools/pip_package:build_pip_package
./bazel-bin/tensorflow/tools/pip_package/build_pip_package pip_pkg --cpu --nightly_flag
# Upload the built packages to pypi.
for WHL_PATH in $(ls pip_pkg/tf_nightly_cpu-*dev*.whl); do
WHL_DIR=$(dirname "${WHL_PATH}")
WHL_BASE_NAME=$(basename "${WHL_PATH}")
AUDITED_WHL_NAME="${WHL_DIR}"/$(echo "${WHL_BASE_NAME//linux/manylinux2010}")
auditwheel repair --plat manylinux2010_x86_64 -w "${WHL_DIR}" "${WHL_PATH}"
# test the whl pip package
chmod +x tensorflow/tools/ci_build/builds/nightly_release_smoke_test.sh
./tensorflow/tools/ci_build/builds/nightly_release_smoke_test.sh ${AUDITED_WHL_NAME}
RETVAL=$?
# Upload the PIP package if whl test passes.
if [ ${RETVAL} -eq 0 ]; then
echo "Basic PIP test PASSED, Uploading package: ${AUDITED_WHL_NAME}"
twine upload -r pypi-warehouse "${AUDITED_WHL_NAME}" || echo
else
echo "Basic PIP test FAILED, will not upload ${AUDITED_WHL_NAME} package"
return 1
fi
done
| jhseu/tensorflow | tensorflow/tools/ci_build/release/ubuntu_16/cpu_py38_full/nightly_release.sh | Shell | apache-2.0 | 2,289 |
#!/bin/bash -e
set -x
if [ -z "$4" ]; then
echo
echo "Error: Usage is $0 productName fullVersion releaseNumber outBuildDir"
exit 1
fi
FILENAME_PREFIX="$1"
FULL_VERSION="$2"
RELEASE_NUMBER="$3"
OUT_BUILD_DIR="$4"
curDir="`dirname $0`"
curDir="`cd $curDir; pwd`"
RPM_SOURCES_DIR="$OUT_BUILD_DIR/SOURCES"
if [ -z "$OUT_BUILD_DIR" ] || [ ! -d "$OUT_BUILD_DIR" ]; then
echo
echo "Error: The output directory $OUT_BUILD_DIR does not exists!"
exit 1
fi
echo
ARTIFACTORY_VERSION=`echo "$FULL_VERSION" | sed 's/SNAPSHOT/devel/g; s/-/./g;'`
cd $curDir && rpmbuild -bb \
--define="_tmppath $OUT_BUILD_DIR/tmp" \
--define="_topdir $PWD" \
--define="_rpmdir $OUT_BUILD_DIR" \
--define="buildroot $OUT_BUILD_DIR/BUILDROOT" \
--define="_sourcedir $RPM_SOURCES_DIR" \
--define="artifactory_version $ARTIFACTORY_VERSION" \
--define="artifactory_release $RELEASE_NUMBER" \
--define="filename_prefix $FILENAME_PREFIX" \
--define="full_version $FULL_VERSION" \
SPECS/artifactory-oss.spec
| alancnet/artifactory | distribution/rpm/build.rpm.sh | Shell | apache-2.0 | 997 |
#!/bin/bash
set -x
set -e
kops create -f cluster.yml
kops create -f masters.yml
kops create -f nodes.yml
bash ./secret.sh
| reactiveops/pentagon | pentagon/component/kops/files/kops.sh | Shell | apache-2.0 | 123 |
source $TREC_VENV/bin/activate
export DATA=$TREC/data2
source $CUTTSUM/events.sh
source $CUTTSUM/models.sh
for query_event in "${TS_EVENTS_2013[@]}"; do
IFS='|'; set ${query_event};
event=$2
for model in "${TS_MODELS_2013[@]}"; do
for dim in "100"; do
for temp_mode in temp,max,100 temp,agg,100 no-temp,ign,NaN ; do
IFS=","; set $temp_mode;
use_temp=$1
pen_mode=$2
scale=$3
SYS_FILE="${DATA}/summarizer-output/ap/${event}/scale.${scale}.${model}.gold.max.l${dim}.${use_temp}.${pen_mode}/updates.txt"
MOD_FILE=$DATA/summarizer-output/models/${event}/updates.txt
OFILE=$DATA/evaluation/auto-trec/ap/${event}/scale.${scale}.${model}.gold.max.l${dim}.${use_temp}.${pen_mode}/updates.txt
if [ ! -f $SYS_FILE ] || [ ! -f $MOD_FILE ]; then
continue
fi
echo "$SYS_FILE VS $MOD_FILE"
python -u $CUTTSUM/eval-scripts/auto_trec_eval.py \
-s $SYS_FILE \
-m $MOD_FILE \
--sent-sim-model $DATA/sentence-sim/${model}_spl_model/model_${dim}.p \
--sent-sim-vocab $DATA/sentence-sim/${model}_spl_model/vocab.txt \
-o $OFILE
done
done
done
done
RANK="${DATA}/summarizer-output/rank"
for query_event in "${TS_EVENTS_2013[@]}"; do
IFS='|'; set ${query_event};
event=$2
for model in "${TS_MODELS_2013[@]}"; do
for dim in "100"; do
for topn in 1 5 10; do
for temp_mode in temp,max temp,agg no-temp,ign ; do
IFS=","; set $temp_mode;
use_temp=$1
pen_mode=$2
SYS_FILE=${RANK}/${event}/top${topn}.${model}.gold.max.l${dim}.${use_temp}.${pen_mode}/updates.txt
MOD_FILE=$DATA/summarizer-output/models/${event}/updates.txt
OFILE=$DATA/evaluation/auto-trec/rank/${event}/top${topn}.${model}.gold.max.l${dim}.${use_temp}.${pen_mode}.tsv
if [ ! -f $SYS_FILE ] || [ ! -f $MOD_FILE ]; then
continue
fi
python -u $CUTTSUM/eval-scripts/auto_trec_eval.py \
-s $SYS_FILE \
-m $MOD_FILE \
--sent-sim-model $DATA/sentence-sim/${model}_spl_model/model_${dim}.p \
--sent-sim-vocab $DATA/sentence-sim/${model}_spl_model/vocab.txt \
-o $OFILE
done
done
done
done
done
| kedz/cuttsum | old/eval-scripts/run.sh | Shell | apache-2.0 | 2,148 |
DISPLAY=":0" DBUS_SESSION_BUS_ADDRESS="unix:abstract=/tmp/dbus-VtiCpVkNOX,guid=2190ab4179e97e6607b9e48b56f51b54" notify-send "Rating received $1" -i ~/Downloads/droid_control.png
echo "`date '+%Y-%m-%d.%H:%M:%S'` custom 1 ran" >> ~/control/control.log
| lerignoux/droid_control | scripts/custom_1.sh | Shell | apache-2.0 | 252 |
GBP="Group-Based Policy"
[[ $ENABLE_NFP = True ]] && NFP="Network Function Plugin"
function gbp_configure_nova {
iniset $NOVA_CONF neutron allow_duplicate_networks "True"
}
function gbp_configure_heat {
local HEAT_PLUGINS_DIR="/opt/stack/gbpautomation/gbpautomation/heat"
iniset $HEAT_CONF DEFAULT plugin_dirs "$HEAT_PLUGINS_DIR"
}
function gbp_configure_neutron {
iniset $NEUTRON_CONF group_policy policy_drivers "implicit_policy,resource_mapping,chain_mapping"
iniset $NEUTRON_CONF group_policy extension_drivers "proxy_group"
iniset $NEUTRON_CONF servicechain servicechain_drivers "simplechain_driver"
iniset $NEUTRON_CONF node_composition_plugin node_plumber "stitching_plumber"
iniset $NEUTRON_CONF node_composition_plugin node_drivers "heat_node_driver"
iniset $NEUTRON_CONF quotas default_quota "-1"
iniset $NEUTRON_CONF quotas quota_network "-1"
iniset $NEUTRON_CONF quotas quota_subnet "-1"
iniset $NEUTRON_CONF quotas quota_port "-1"
iniset $NEUTRON_CONF quotas quota_security_group "-1"
iniset $NEUTRON_CONF quotas quota_security_group_rule "-1"
iniset $NEUTRON_CONF quotas quota_router "-1"
iniset $NEUTRON_CONF quotas quota_floatingip "-1"
iniset $NEUTRON_CONF agent extensions "qos"
}
function nfp_configure_neutron {
NEUTRON_ML2_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini"
iniset $NEUTRON_CONF keystone_authtoken project_name "service"
iniset $NEUTRON_CONF keystone_authtoken username "neutron"
iniset $NEUTRON_CONF keystone_authtoken password $ADMIN_PASSWORD
iniset $NEUTRON_CONF node_composition_plugin node_plumber "admin_owned_resources_apic_plumber"
iniset $NEUTRON_CONF node_composition_plugin node_drivers "nfp_node_driver"
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_user "neutron"
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_tenant_name "service"
if [[ $EXT_NET_GATEWAY && $EXT_NET_ALLOCATION_POOL_START && $EXT_NET_ALLOCATION_POOL_END && $EXT_NET_CIDR ]]; then
iniset $NEUTRON_CONF group_policy_implicit_policy default_external_segment_name "default"
fi
iniset $NEUTRON_CONF nfp_node_driver is_service_admin_owned "False"
iniset $NEUTRON_CONF nfp_node_driver svc_management_ptg_name "svc_management_ptg"
extn_drivers=$(iniget $NEUTRON_ML2_CONF ml2 extension_drivers)
if [[ -n $extn_drivers ]];then
iniset $NEUTRON_ML2_CONF ml2 extension_drivers $extn_drivers,port_security
else
iniset $NEUTRON_ML2_CONF ml2 extension_drivers port_security
fi
}
function configure_nfp_loadbalancer {
echo "Configuring NFP Loadbalancer plugin driver"
LBAAS_SERVICE_PROVIDER=LOADBALANCERV2:loadbalancerv2:gbpservice.contrib.nfp.service_plugins.loadbalancer.drivers.nfp_lbaasv2_plugin_driver.HaproxyOnVMPluginDriver:default
sudo\
sed\
-i\
'/^service_provider.*:default/'\
's'/\
':default'/\
'\n'\
"service_provider = $LBAAS_SERVICE_PROVIDER"/\
/etc/neutron/neutron_lbaas.conf
}
function configure_nfp_firewall {
echo "Configuring NFP Firewall plugin"
sudo\
sed\
-i\
'/^service_plugins/'\
's'/\
'neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin'/\
'gbpservice.contrib.nfp.service_plugins.firewall.nfp_fwaas_plugin.NFPFirewallPlugin'/\
/etc/neutron/neutron.conf
}
function configure_nfp_vpn {
echo "Configuring NFP VPN plugin driver"
sudo\
sed\
-i\
'/^service_provider.*IPsecVPNDriver:default/'\
's'/\
':default'/\
'\n'\
'service_provider = VPN:vpn:gbpservice.contrib.nfp.service_plugins.vpn.drivers.nfp_vpnaas_driver.NFPIPsecVPNDriver:default'/\
/etc/neutron/neutron_vpnaas.conf
}
# Process contract
if is_service_enabled group-policy; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
echo_summary "Preparing $GBP"
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing $GBP"
[[ $ENABLE_APIC_AIM = True || $ENABLE_APIC_AIM_GATE = True ]] && install_apic_aim
if [[ $ENABLE_NFP = True ]]; then
echo_summary "Installing $NFP"
prepare_nfp_image_builder
fi
if [[ $ENABLE_NSX_POLICY = True ]]; then
echo_summary "Installing NSX Policy requirements"
prepare_nsx_policy
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring $GBP"
# REVIST Ideally, we should be configuring nova, heat and UI as well for GBP in the
# GBP devstack gate job. However, contrary to the documentation, this block
# of code is not being invoked by devstack after the nova, heat and
# dashboard config files have been created. Once this is sorted out, the
# ENABLE_GBP_GATE variable can be eliminated.
[[ $ENABLE_GBP_GATE = False && $ENABLE_APIC_AIM_GATE = False ]] && gbp_configure_nova
[[ $ENABLE_GBP_GATE = False && $ENABLE_APIC_AIM_GATE = False ]] && gbp_configure_heat
gbp_configure_neutron
if [[ $ENABLE_NSX_POLICY = True ]]; then
echo_summary "Configuring NSX"
nsx_configure_neutron
fi
if [[ $ENABLE_NFP = True ]]; then
echo_summary "Configuring $NFP"
nfp_configure_neutron
if [[ $NFP_DEVSTACK_MODE = advanced ]]; then
configure_nfp_loadbalancer
configure_nfp_firewall
configure_nfp_vpn
fi
fi
# REVISIT move installs to install phase?
install_gbpclient
install_gbpservice
[[ $ENABLE_NFP = True ]] && install_nfpgbpservice
init_gbpservice
[[ $ENABLE_NFP = True ]] && init_nfpgbpservice
[[ $ENABLE_GBP_GATE = False && $ENABLE_APIC_AIM_GATE = False ]] && install_gbpheat
[[ $ENABLE_GBP_GATE = False && $ENABLE_APIC_AIM_GATE = False ]] && install_gbpui
[[ $ENABLE_APIC_AIM = True || $ENABLE_APIC_AIM_GATE = True ]] && configure_apic_aim
[[ $ENABLE_GBP_GATE = False && $ENABLE_APIC_AIM_GATE = False ]] && stop_apache_server
[[ $ENABLE_GBP_GATE = False && $ENABLE_APIC_AIM_GATE = False ]] && start_apache_server
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing $GBP"
if [[ $ENABLE_NFP = True ]]; then
echo_summary "Initializing $NFP"
assign_user_role_credential
create_nfp_gbp_resources
create_nfp_image
[[ $NFP_DEVSTACK_MODE = advanced ]] && launch_configuratorVM
copy_nfp_files_and_start_process
fi
fi
if [[ "$1" == "unstack" ]]; then
echo_summary "Removing $GBP"
fi
if [[ "$1" == "clean" ]]; then
echo_summary "Cleaning $GBP"
fi
fi
| noironetworks/group-based-policy | devstack/plugin.sh | Shell | apache-2.0 | 6,861 |
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# This script extracts from all jars in the specified directory the NOTICE files and the
# licenses folders. It then concatenates all NOTICE files and collects the contents of all
# licenses folders in the specified output directory.
#
# This tool can be used to generate a rough skeleton for the binary NOTICE file. Be aware,
# that it does not deduplicate contents.
set -Eeuo pipefail
SRC=${1:-.}
DST=${2:-licenses-output}
PWD=$(pwd)
TMP="${DST}/tmp"
DIR=$(dirname "$0")
NOTICE_BINARY_PREAMBLE="${DIR}/NOTICE-binary_PREAMBLE.txt"
SLF4J_LICENSE="${DIR}/LICENSE.slf4j"
USAGE="collect_license_files <SOURCE_DIRECTORY:-.> <OUTPUT_DIRECTORY:-licenses-output>"
if [ "${SRC}" = "-h" ]; then
echo "${USAGE}"
exit 0
fi
for i in $(find -L "${SRC}" -name "*.jar")
do
DIR="${TMP}/$(basename -- "$i" .jar)"
mkdir -p "${DIR}"
JAR="${PWD}/${i}"
(cd "${DIR}" && jar xf ${JAR} META-INF/NOTICE META-INF/licenses)
done
NOTICE="${DST}/NOTICE-binary"
[ -f "${NOTICE}" ] && rm "${NOTICE}"
cp "${NOTICE_BINARY_PREAMBLE}" "${NOTICE}"
(export LC_ALL=C; find "${TMP}" -name "NOTICE" | sort | xargs cat >> "${NOTICE}")
LICENSES="${DST}/licenses-binary"
[ -f "${LICENSES}" ] && rm -r "${LICENSES}"
find "${TMP}" -name "licenses" -type d -exec cp -r -- "{}" "${DST}" \;
mv "${DST}/licenses" "${LICENSES}"
cp "${SLF4J_LICENSE}" "${LICENSES}"
rm -r "${TMP}"
| fhueske/flink | tools/releasing/collect_license_files.sh | Shell | apache-2.0 | 2,322 |
#!/bin/bash
set -e
exec supervisord -n
| vjekoslav/trusty-ssh | image/entrypoint.sh | Shell | apache-2.0 | 40 |
#!/usr/bin/env bash
# Verify variables
echo -e "Project is ${PROJECT_ID}"
# Create kubeconfig and get cluster creds
export WORKDIR=`pwd`
echo -e "Adding cluster ${CLUSTER} to kubeconfig located at ${WORKDIR}/tempkubeconfig"
echo -e "Creating tempkubeconfig."
touch ./tempkubeconfig
export KUBECONFIG=${WORKDIR}/tempkubeconfig
declare -t CLUSTER_NAMES=()
for i in `gcloud container clusters list --project ${PROJECT_ID} --format="value(name)"`; do
CLUSTER_NAMES+=("$i")
done
declare -t CLUSTER_LOCATIONS=()
for i in `gcloud container clusters list --project ${PROJECT_ID} --format="value(location)"`; do
CLUSTER_LOCATIONS+=("$i")
done
declare -A NAMES_LOCATIONS
for ((i=0; $i<${#CLUSTER_NAMES[@]}; i++))
do
NAMES_LOCATIONS+=( ["${CLUSTER_NAMES[i]}"]="${CLUSTER_LOCATIONS[i]}" )
done
for CLUSTER_NAME in "${!NAMES_LOCATIONS[@]}"; do
gcloud container clusters get-credentials $CLUSTER_NAME --region ${NAMES_LOCATIONS[$CLUSTER_NAME]} --project ${PROJECT_ID}
done
NUM_CONTEXTS=`kubectl config view -o jsonpath='{.users[*].name}' | wc -w`
NUM_CLUSTERS=`gcloud container clusters list --project ${PROJECT_ID} --format="value(name)" | wc -l`
if [[ ${NUM_CONTEXTS} != ${NUM_CLUSTERS} ]]; then
echo -e "There was an error getting credentials for all the gketoolkit clusters"
exit 1
else
echo -e "Kubeconfig is setup with all gketoolkit clusters credentials"
fi
| GoogleCloudPlatform/gke-poc-toolkit | terraform/modules/mcg/scripts/create_kube_config.sh | Shell | apache-2.0 | 1,387 |
#!/usr/bin/env bash
sudo apt update
sudo apt install -y \
python python-gtk2 python-xlib python-dbus python-wnck python-setuptools
QUICKTILE_INSTALL="$HOME/opt/quicktile"
git clone https://github.com/ssokolow/quicktile.git $QUICKTILE_INSTALL
| rafamoreira/dotfiles | bootstrap/debian/quicktile.sh | Shell | bsd-2-clause | 246 |
#!/usr/bin/env bash
MASON_NAME=benchmark
MASON_VERSION=1.0.0
MASON_LIB_FILE=lib/libbenchmark.a
. ${MASON_DIR}/mason.sh
function mason_load_source {
mason_download \
https://github.com/google/benchmark/archive/v1.0.0.tar.gz \
dcf87e5faead951fd1e9ab103cb36a7c8ebe4837
mason_extract_tar_gz
export MASON_BUILD_PATH=${MASON_ROOT}/.build/benchmark-${MASON_VERSION}
}
function mason_compile {
rm -rf build
mkdir -p build
cd build
if [ ${MASON_PLATFORM} == 'ios' ] ; then
# Make sure CMake thinks we're cross-compiling and manually set the exit codes
# because CMake can't run the test programs
echo "set (CMAKE_SYSTEM_NAME Darwin)" > toolchain.cmake
cmake \
-DCMAKE_TOOLCHAIN_FILE=toolchain.cmake \
-DRUN_HAVE_STD_REGEX=1 \
-DRUN_HAVE_POSIX_REGEX=0 \
-DRUN_HAVE_STEADY_CLOCK=0 \
-DCMAKE_CXX_FLAGS="${CFLAGS:-}" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}" \
-DBENCHMARK_ENABLE_LTO=ON \
-DBENCHMARK_ENABLE_TESTING=OFF \
..
else
cmake \
${MASON_CMAKE_TOOLCHAIN} \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX="${MASON_PREFIX}" \
-DBENCHMARK_ENABLE_LTO=ON \
-DBENCHMARK_ENABLE_TESTING=OFF \
..
fi
make install -j${MASON_CONCURRENCY}
}
function mason_cflags {
echo -isystem ${MASON_PREFIX}/include
}
function mason_ldflags {
echo -lpthread
}
function mason_static_libs {
echo ${MASON_PREFIX}/${MASON_LIB_FILE}
}
mason_run "$@"
| hydrays/osrm-backend | third_party/mason/scripts/benchmark/1.0.0/script.sh | Shell | bsd-2-clause | 1,661 |
#!/bin/bash
python -m py_compile script.py
| jonghough/SimpleWebsocket | test.sh | Shell | bsd-2-clause | 44 |
#!/bin/bash
cd ~/Projects/chippyash/source/Type-Calculator
vendor/bin/phpunit -c test/phpunit.xml --testdox-html contract.html test/
tdconv -t "Chippyash Strong Type Calculator" contract.html docs/Test-Contract.md
rm contract.html
| chippyash/Math-Type-Calculator | build.sh | Shell | bsd-3-clause | 232 |
#!/bin/sh
autoreconf -vfi
CC="arm-none-eabi-gcc"
CXX="arm-none-eabi-g++"
LD="arm-none-eabi-ld"
AR="arm-none-eabi-ar"
NUTTX_EXPORT_PATH=`test -d "${0%/*}/nuttx-export" && cd ${0%/*}/nuttx-export ; pwd`
CPPFLAGS="-isystem ${NUTTX_EXPORT_PATH}/include -isystem ${NUTTX_EXPORT_PATH}/include/cxx" \
CFLAGS="-g -O2 -Wall -Wstrict-prototypes -Wshadow -Wundef -fno-builtin -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard" \
CXXFLAGS="-g -O2 -Wall -Wshadow -Wundef -fno-builtin -fno-exceptions -fcheck-new -fno-rtti -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard" \
LDFLAGS="--entry=__start -nostartfiles -nodefaultlibs -L${NUTTX_EXPORT_PATH}/libs -T${NUTTX_EXPORT_PATH}/build/l476rg.ld" \
LIBS="-Wl,--start-group -lapps -lnuttx -lgcc -Wl,--end-group" \
./configure --host=arm-none-eabi
make
| TJ-Hidetaka-Takano/mrubyc | build_st-nucleo-l476rg.sh | Shell | bsd-3-clause | 812 |
#! /bin/bash
set -ex
testsdir=${0%/*}
cd ${testsdir}
PYTHONPATH=.. python ../src/jinja2-cui r -T . -C x.yaml -C yaml:y_yaml -C yaml:conf.d/*.conf -o /tmp/test.out -D b.template
| ssato/python-jinja2-cli | tests/test.sh | Shell | bsd-3-clause | 177 |
#!/bin/bash
:<<'COMMENT'
Author: Chris Duffy
Date: 2015
Name: setup.sh
Purpose: This installation file does the basic installation of PIP, and relevant Python libraries.
Systems: This has only been tested on Kali
Copyright (c) 2015, Christopher Duffy, Jon Fallone, Dev Patel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY, JON FALLONE, AND/OR DEV PATEL BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
COMMENT
# Installing PIP
#apt-get clean && apt-get update && apt-get upgrade -y && apt-get dist-upgrade -y # Uncomment if necessary
apt-get -y install python-setuptools python-dev python-pip
# Update setup tools
pip install setuptools --upgrade
# Install Python libraries
pip install netifaces python-nmap colorama
# Upgrade requests
pip install request --upgrade
touch /usr/bin/ranger && rm -f /usr/bin/ranger
rm -rf /opt/ranger
mkdir -m 777 -p /opt/ranger/smb
mkdir -m 777 -p /opt/ranger/web
mkdir -m 777 -p /opt/ranger/log
mkdir -m 777 -p /opt/ranger/results/secrets_dump
mkdir -m 777 -p /opt/ranger/results/invoker
mkdir -m 777 -p /opt/ranger/results/groups
mkdir -m 777 -p /opt/ranger/results/logged_in_users
mkdir -m 777 -p /opt/ranger/results/command
mkdir -m 777 -p /opt/ranger/results/downloader
mkdir -m 777 -p /opt/ranger/results/credentials
mkdir -m 777 -p /opt/ranger/results/recovery
touch /opt/ranger/web/pv.ps1 && rm /opt/ranger/web/pv.ps1
touch /opt/ranger/web/im.ps1 && rm /opt/ranger/web/im.ps1
touch /opt/ranger/smb/pv.ps1 && rm /opt/ranger/smb/pv.ps1
touch /opt/ranger/smb/im.ps1 && rm /opt/ranger/smb/im.ps1
wget https://raw.githubusercontent.com/funkandwagnalls/PowerTools/master/PowerView/powerview.ps1 -O /opt/ranger/web/pv.ps1
wget https://raw.githubusercontent.com/funkandwagnalls/PowerSploit/master/Exfiltration/Invoke-Mimikatz.ps1 -O /opt/ranger/web/im.ps1
cd /opt/ranger/web
chmod a+x pv.ps1 im.ps1
cp -p pv.ps1 im.ps1 /opt/ranger/smb/
cd /opt/ranger
wget https://pypi.python.org/packages/source/i/impacket/impacket-0.9.13.tar.gz -O /opt/ranger/impacket.tar.gz
tar -zxvf impacket.tar.gz
rm -rf impacket.tar.gz
mv impacket-0.9.13 impacket
rm -rf /opt/ranger/build
cd /opt/ranger/impacket
python ./setup.py install
python /opt/ranger/impacket/setup.py install
touch /opt/ranger/impacket/examples/ranger.py && rm -f /opt/ranger/impacket/examples/ranger.py
wget https://raw.githubusercontent.com/funkandwagnalls/ranger/master/ranger.py -O /opt/ranger/impacket/examples/ranger.py && chmod a+x /opt/ranger/impacket/examples/ranger.py
ln -sfT /opt/ranger/impacket/examples/ranger.py /usr/bin/ranger
chmod -R 755 /opt/ranger
chown -R root:root /opt/ranger
| funkandwagnalls/ranger | setup.sh | Shell | bsd-3-clause | 3,953 |
#!/bin/sh
cd %(HOME)s/work/OpenTrain/webserver/opentrain
exec gunicorn -p %(HOME)s/opentrain.id \
-b 127.0.0.1:9000 \
-w 3 opentrain.wsgi:application
| hasadna/OpenTrain | webserver/FAB/files/run_gunicorn.sh | Shell | bsd-3-clause | 161 |
#!/bin/bash
# set -x
# https://scriptingosx.com/2021/07/notarize-a-command-line-tool-with-notarytool/
VERSION=$1
if [[ -z "${VERSION}" ]]; then
echo "no version specified."
echo "invoke with 'notarize-macos.sh <version> <cert_id> <keychain_profile>'"
exit
fi
# the value for CERT_ID is the team name of the "Application ID"
# certificate obtained in the Apple developer portal, then imported
# into the system's keychain.
CERT_ID=$2
if [[ -z "${CERT_ID}" ]]; then
echo "no cert_id specified."
echo "invoke with 'notarize-macos.sh <version> <cert_id> <keychain_profile>'"
exit
fi
# the keychain profile is created locally as follows:
# xcrun notarytool store-credentials --apple-id "name@example.com"
# during the creation process you will be prompted for an app-level
# password; this was generated in the apple developer portal and
# saved in your password store. we usually call this keychain
# "musikcube-notarytool"
KEYCHAIN_PROFILE=$3
if [[ -z "${KEYCHAIN_PROFILE}" ]]; then
echo "no keychain_profile specified."
echo "invoke with 'notarize-macos.sh <version> <cert_id> <keychain_profile>'"
exit
fi
ARCH=$(uname -m)
DIR="./dist/${VERSION}/musikcube_standalone_macos_${ARCH}_${VERSION}"
ARCHIVE="./dist/${VERSION}/musikcube_standalone_macos_${ARCH}_${VERSION}.zip"
pushd $DIR
codesign --remove-signature musikcube musikcubed libmusikcore.dylib
codesign --remove-signature lib/*.dylib
codesign --remove-signature plugins/*.dylib
codesign --force --timestamp --options=runtime --sign $CERT_ID musikcube musikcubed
codesign --force --timestamp --sign $CERT_ID libmusikcore.dylib
codesign --force --timestamp --sign $CERT_ID lib/*.dylib
codesign --force --timestamp --sign $CERT_ID plugins/*.dylib
popd
ditto -c -k --keepParent $DIR $ARCHIVE
xcrun notarytool submit $ARCHIVE --keychain-profile "$KEYCHAIN_PROFILE" --wait
| clangen/musikcube | script/notarize-macos.sh | Shell | bsd-3-clause | 1,854 |
#!/bin/bash
#See http://tldp.org/HOWTO/Bash-Prog-Intro-HOWTO.html#toc6
# This function displays an error message and exit
# Usage: exitOnError "error message"
# Returnstatus: 1
function exitOnError
{
echo -e "\033[1;31mERROR:\033[0m $1" 1>&2
exit 1
}
# check if script is run in application root
script_base=$(pwd)
script_dir=$(dirname $0)
if [ $script_dir != '.' ];
then
exitOnError "Script should be run in ./scripts directory.";
fi
cd ..
# @todo for all *.po do... ;-)
cmd="xgettext -L PHP -k_ -ktranslate -o messages.pot --no-wrap --copyright-holder=EgonCommerce --package-name=PremiumCMS $(find . -name "*.php" -o -name "*.phtml")";
eval $cmd;
# what if previous command returned exitcode !== 0?
if [ "$?" -ne "0" ];
then
cd $script_base;
exitOnError "xgettext could not create a new POT file... exiting"
fi
TRANSLATION_PATH="module/Application/language"
msgmerge $TRANSLATION_PATH/nl_NL.po messages.pot -o $TRANSLATION_PATH/nl_NL.po
msgfmt -o $TRANSLATION_PATH/nl_NL.mo $TRANSLATION_PATH/nl_NL.po
rm messages.pot
cd $script_base;
| egoncommerce/ember-tutorial | scripts/i18n.sh | Shell | bsd-3-clause | 1,069 |
#!/bin/sh
#untested but hopefully works
#if it doesn't then consider this the install guide
if ! [ $(id -u) = 0 ]; then
echo "Must be run as root. Exiting"
exit 1
fi
make
mv frequencyanalyzer /bin
mv frequencycrack /bin
| 1ndy/frequency_analyzer | install.sh | Shell | bsd-3-clause | 224 |
#!/bin/bash
#
# sample lsf bsub to run an interactive job, optionally on a selected host.
#
# pick a host to land on.
host=${1:-tulgb007}
#
# the -Is says you want an interactive session
# the s says you want a terminal session.
#
# shared_int is the "shared interactive queue"
if [ -z $LSB_BATCH_JID ]; then
set -x
bsub \
-Is \
-n 1 \
-q test_int \
-m $host \
-W 4200 \
/bin/bash
fi
# -q test_int \
# -q shared_int \
# -q excl_int \
| teddylfwu/RandomBinning | randFeatureCodes/codes_binning/qsub_Int.sh | Shell | bsd-3-clause | 455 |
#! /bin/sh
openrc default
rc-update del iptables wifi
rc-update del dnsmasq wifi
rc-update del hostapd wifi
rc-update add wpa_supplicant wifi
sed -i '/^interface wlan0/{s/^interface/#interface/;n;s/^static/#static/}' /etc/dhcpcd.conf
service dhcpcd restart
| fbalakirev/red-pitaya-notes | alpine/wifi/client.sh | Shell | mit | 262 |
#!/bin/bash
# Label Configs location
Github_label_config_directory="$Ash__ACTIVE_MODULE_DIRECTORY/extras/label_configs"
##################################################
# Loads in a config file and handles it
#
# @param $1: The repo name
# @param $2: The labels config name
# @param $3: 1 if this is an import
# 0 if this is the base file
##################################################
Github__labels_handle_config_file(){
# Checking if we've got a valid config file
local label_config_file="$Github_label_config_directory/$2"
if [[ ! -f "$label_config_file" ]]; then
# Import
if [[ $3 -eq 1 ]]; then
Logger__error "Failed to import: $2"
# Base File
else
Logger__error "Requires a valid label config file to be passed in"
Logger__error "Here are the current available label config files:"
ls $Github_label_config_directory
Logger__prompt "Input a label config from above (ex, carrots): "; read label
label_config_file="$Github_label_config_directory/$label"
if [[ ! -f "$label_config_file" ]]; then
# Retry Import
if [[ $3 -eq 1 ]]; then
Logger__error "Failed to import: $label"
else
Logger__error "Label config does not exist."
return
fi
fi
fi
fi
# Adding all labels
while read line; do
# Removing comments
local line=$(echo "$line" | sed 's/\ *#.*//g')
if [[ ${#line} -eq 0 ]]; then
continue
fi
# Handling action
Github__handle_action "$1" "$line"
done < $label_config_file
}
##################################################
# Handles a single line within a config file
#
# @param $1: The repo name
# @param $2: The comment parsed line in a
# label_config file.
##################################################
Github__handle_action(){
# Checking if action
if [[ $2 == -* ]]; then
local action=$(echo $2 | awk -F':' '{print $1}')
# Action is delete
if [[ "$action" == "-delete" ]]; then
local label=$(echo $2 | awk -F':' '{print $2}')
if [[ "$label" == "all" ]]; then
Github__delete_labels "$1"
else
local response=$(Github__delete_label "$1" "$label")
if [[ "$response" = "deleted" ]]; then
Logger__success "Deleted Label: $label"
else
Logger__warning "Failed to delete label: $label"
fi
fi
# Action is import
elif [[ "$action" == "-import" ]]; then
local import_file=$(echo $2 | awk -F':' '{print $2}')
Github__labels_handle_config_file "$1" "$import_file" 1
fi
# Default add line
else
local label=$(echo $2 | awk -F':' '{print $1}')
local color=$(echo $2 | awk -F':' '{print $2}')
local response=$(Github__create_single_label "$1" "$label" "$color")
if [[ "$response" = "added" ]]; then
Logger__success "Added label: $label"
elif [[ "$response" = "updated" ]]; then
Logger__success "Updated label: $label"
else
Logger__warning "Failed to add label: $label"
fi
fi
}
##################################################
# Deletes a single label from a repository
#
# @param $1: The repo name
# @param $2: The label name
#
# @returns: 'failure' if we failed to delete the label
# 'deleted' if we successfully deleted the label
##################################################
Github__delete_label(){
local repo="$1"
local label="$2"
label=$(echo "$label" | sed 's/\ /%20/g')
# Try to delete via DELETE
local delete_response=$(curl \
-s -o /dev/null -w "%{http_code}" \
-H "Authorization: token $GITHUB_TOKEN" \
-X DELETE "https://api.github.com/repos/$repo/labels/$label")
# Checking if DELETE worked
if [[ $delete_response =~ 2.. ]]; then
echo "deleted"
return
fi
echo "failure"
}
##################################################
# Deletes all labels from a repository
#
# @param $1: The repo name
#
# @returns: 'failure' if we failed to delete the label
# 'deleted' if we successfully deleted the label
##################################################
Github__delete_labels(){
local repo="$1"
# fetch all labels
local labels=$(curl \
-s \
-H "Authorization: token $GITHUB_TOKEN" \
-X GET "https://api.github.com/repos/$repo/labels")
echo "$labels" | while IFS='' read -r line || [[ -n "$line" ]]; do
if [[ "$line" =~ (\"name\":)(\s?)(.*)\" ]]; then
if [[ "${BASH_REMATCH[3]}" =~ \"(.*) ]]; then
label="${BASH_REMATCH[1]}"
local delete_response=$(curl \
-s -o /dev/null -w "%{http_code}" \
-H "Authorization: token $GITHUB_TOKEN" \
-X DELETE "https://api.github.com/repos/$repo/labels/$label")
if [[ $delete_response =~ 2.. ]]; then
Logger__success "Deleted Label: $label"
else
Logger__warning "Failed to delete label: $label"
fi
fi
fi
done
}
##################################################
# Creates a single label on a repository
#
# @param $1: Repository name
# @param $2: Label name
# @param $3: Label color
#
# @returns: 'failure' if we failed to add / update the label
# 'added' if we successfully added the label
# 'updated' if we successfully updated the label
##################################################
Github__create_single_label(){
# Try to create via POST
local post_response=$(curl \
-s -o /dev/null -w "%{http_code}" \
-H "Authorization: token $GITHUB_TOKEN" \
-X POST "https://api.github.com/repos/$1/labels" \
-d "{\"name\":\"$2\", \"color\":\"$3\"}")
# Checking if POST worked
if [[ $post_response =~ 2.. ]]; then
echo "added"
return
fi
# Update via PATCH
local patch_response=$(curl \
-s -o /dev/null -w "%{http_code}" \
-H "Authorization: token $GITHUB_TOKEN" \
-X PATCH "https://api.github.com/repos/$1/labels/$2" \
-d "{\"name\":\"$2\", \"color\":\"$3\"}")
# Checking if PATCH worked
if [[ $patch_response =~ 2.. ]]; then
echo "updated"
else
echo "failure"
fi
}
| carrot/ash-github | lib/labels.sh | Shell | mit | 6,674 |
#!/bin/sh
#
# Copyright (c) 2014 Christian Couder
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="Test mount command"
. lib/test-lib.sh
# if in travis CI, dont test mount (no fuse)
if ! test_have_prereq FUSE; then
skip_all='skipping mount tests, fuse not available'
test_done
fi
test_init_ipfs
test_launch_ipfs_daemon
# test mount failure before mounting properly.
test_expect_success "'ipfs mount' fails when there is no mount dir" '
test_must_fail ipfs mount -f=not_ipfs -n=not_ipns >output 2>output.err
'
test_expect_success "'ipfs mount' output looks good" '
test_must_be_empty output &&
test_should_contain "not_ipns\|not_ipfs" output.err
'
# now mount properly, and keep going
test_mount_ipfs
test_expect_success "mount directories cannot be removed while active" '
test_must_fail rmdir ipfs ipns 2>/dev/null
'
test_kill_ipfs_daemon
test_expect_success "mount directories can be removed after shutdown" '
rmdir ipfs ipns
'
test_done
| srikumarks/go-ipfs | test/sharness/t0030-mount.sh | Shell | mit | 989 |
#!/bin/bash
#
# Run this script from a logged in user - with the user you want to run the Xcode Unit Tests with!
#
# For launchctr related configs check the _launchctl_common.sh file
#
THIS_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "${THIS_SCRIPT_DIR}"
source _launchctl_common.sh
echo " (i) curr_user_lib_launch_agents_dir: ${curr_user_lib_launch_agents_dir}"
mkdir -p "${curr_user_lib_launch_agents_dir}"
if [ $? -ne 0 ]; then
echo " [!] Failed to create the required LaunchAgents dir at ${curr_user_lib_launch_agents_dir}!"
exit 1
fi
echo " (i) server_full_path: ${server_full_path}"
if [ ! -f "${server_full_path}" ]; then
echo " [!] Server full path is invalid - server not found at path: ${server_full_path}"
exit 1
fi
echo " (i) server_logs_dir_path: ${server_logs_dir_path}"
echo " (i) server_log_file_path: ${server_log_file_path}"
mkdir -p "${server_logs_dir_path}"
if [ $? -ne 0 ]; then
echo " [!] Failed to create the required 'logs' dir at ${server_logs_dir_path}!"
exit 1
fi
echo " (i) server_plist_path: ${server_plist_path}"
cat >"${server_plist_path}" <<EOL
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>${server_label_id}</string>
<key>ProgramArguments</key>
<array>
<string>${server_full_path}</string>
</array>
<key>StandardOutPath</key>
<string>${server_log_file_path}</string>
<key>StandardErrorPath</key>
<string>${server_log_file_path}</string>
<key>KeepAlive</key>
<true/>
</dict>
</plist>
EOL
if [ $? -ne 0 ]; then
echo " [!] Failed to write LaunchAgent plist to path: ${server_plist_path}"
exit 1
fi
echo " (i) LaunchAgent plist content:"
cat "${server_plist_path}"
if [ $? -ne 0 ]; then
echo " [!] Failed to read LaunchAgent plist from path: ${server_plist_path}"
exit 1
fi
echo
echo "==> INSTALL SUCCESS"
echo " * LaunchAgent plist saved to path: ${server_plist_path}"
echo " * You can start (or restart) the server with the reload_server_with_launchctl.sh script"
echo " * You can start the server with: launchctl load "${server_plist_path}""
echo " * You can stop the server with: launchctl unload "${server_plist_path}""
| bitrise-io/xcodebuild-unittest-miniserver | _scripts/install_launchctl_plist_for_current_user.sh | Shell | mit | 2,303 |
#!/bin/bash
# Script Name: AtoMiC SickGear Updater
source "$SCRIPTPATH/inc/commons.sh"
source "$SCRIPTPATH/inc/header.sh"
echo -e "${GREEN}AtoMiC $APPTITLE Update Script$ENDCOLOR"
source "$SCRIPTPATH/inc/pause.sh"
if DoesAppFolderExist; then
source "$SCRIPTPATH/inc/app-stop.sh"
source "$SCRIPTPATH/utils/python/python-installer.sh"
source "$SCRIPTPATH/sickgear/sickgear-constants.sh"
source "$SCRIPTPATH/inc/app-install-deps.sh"
source "$SCRIPTPATH/inc/app-git-update.sh"
source "$SCRIPTPATH/inc/app-start.sh"
source "$SCRIPTPATH/inc/app-update-confirmation.sh"
source "$SCRIPTPATH/inc/thankyou.sh"
fi
source "$SCRIPTPATH/inc/exit.sh"
| htpcBeginner/AtoMiC-ToolKit | sickgear/sickgear-update.sh | Shell | mit | 670 |
#bundle: group files into destribution package
echo '# To unbundle, sh this file'
for i
do
echo "echo $i 1>&2"
echo "cat >$i <<'End of $i'"
cat $i
echo "End of $i"
done
| yimng/LEARN | unixenv/bundle.sh | Shell | mit | 175 |
#!/usr/bin/env bash
## Test for issue1636 - primitive match type: hunk
##
## Copyright (C) Kamil Dworakowski
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without
## restriction, including without limitation the rights to use, copy,
## modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
## BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
## ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
## CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
. lib # Load some portability helpers.
rm -rf R
darcs init --repo R # Create our test repos.
cd R
echo 'first line' > f
darcs record -lam 'one'
echo 'second line' >> f
darcs record -am 'two'
darcs changes --match 'hunk first' > log
grep one log
not grep two log
darcs changes --match 'hunk line' > log
grep one log
grep two log
darcs changes --match 'hunk one' > log
not grep one log
# test searching for lines in the remove part of the hunk
echo 'first line' > f
darcs record -am 'three'
darcs changes --match 'hunk second' > log
grep three log
grep two log
not grep first log
| DavidAlphaFox/darcs | tests/issue1636-match-hunk.sh | Shell | gpl-2.0 | 1,806 |
#!/usr/bin/env bash
# vim: set sw=4 et sts=4 tw=80 :
# Copyright 2009 Ali Polatel <polatel@gmail.com>
# Distributed under the terms of the GNU General Public License v2
. test-lib.bash
clean_files+=( "arnold.layne.hard" )
# To make sure links are handled correctly, add see.emily.play to
# SYDBOX_WRITE as we're creating a hard link to that file.
export SYDBOX_WRITE="$cwd"/see.emily.play
start_test "t06-link-deny"
sydbox -- ./t06_link
if [[ 0 == $? ]]; then
die "failed to deny link"
fi
end_test
start_test "t06-link-write"
SYDBOX_WRITE="${cwd}" sydbox -- ./t06_link
if [[ 0 != $? ]]; then
die "write didn't allow access"
elif [[ ! -f arnold.layne.hard ]]; then
die "file doesn't exist, write didn't allow access"
fi
end_test
| larsuhartmann/sydbox | tests/progtests/t06-link.bash | Shell | gpl-2.0 | 745 |
#!/bin/bash
# Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
. regression.core.sh
create_mode="false"
echo ""
do_test stopfail2 "Stop Failed - Block "
do_test stopfail3 "Stop Failed - Ignore (1 node)"
do_test stopfail4 "Stop Failed - Ignore (2 node)"
do_test stopfail1 "Stop Failed - STONITH (block)"
do_test stopfail5 "Stop Failed - STONITH (pass)"
do_test stopfail6 "Stop Failed - STONITH (pass2)"
do_test stopfail7 "Stop Failed - STONITH (should fail)"
create_mode="true"
test_results
| ClusterLabs/pacemaker-1.0 | pengine/stonith.sh | Shell | gpl-2.0 | 1,229 |
#!/bin/bash
PG_VERSION=$(psql --version 2> /dev/null | tr -s ' ' | cut -d' ' -f 3)
DS_VERSION=$(dachs --version 2> /dev/null | tr -d '(-)' | cut -d' ' -f 3)
DS_VERSION=${DS_VERSION:-"2.x"}
DS_PORT=$(cat $GAVOSETTINGS | grep 'serverPort' | cut -d' ' -f2)
echo ""
echo "=========================================================="
echo "This image provides dachs & postgresql bundled together,"
echo "same scenario as you would have if installed the package,"
echo "gavodachs-server, on your own linux box"
echo ""
echo "To start DaCHS (and Postgres), type:"
echo "--------------------"
echo " $ /dachs.sh start"
echo "--------------------"
echo "It is just a convenience script to start/stop the services."
echo "See its '--help' for further information about its usage."
echo ""
echo ""
echo "After starting DaCHS, you should see it working at:"
echo " - http://localhost[:$DS_PORT]"
echo ""
echo ""
echo "Use 'gavo/dachs' as usual:"
echo "--------------------"
echo " $ dachs --help"
echo "--------------------"
echo "DaCHS documents are available at:"
echo " - http://dachs-doc.rtfd.io/tutorial.html"
echo ""
echo ""
echo "DaCHS version: $DS_VERSION"
echo "PSQL version: $PG_VERSION"
echo "=========================================================="
echo ""
| chbrandt/dachs | dockerfiles/dachs/bin/help.sh | Shell | gpl-2.0 | 1,261 |
export DB_SERVER=127.0.0.1
export MONGO_URL="mongodb://$DB_SERVER/steedos"
export MONGO_OPLOG_URL="mongodb://$DB_SERVER/local"
export MULTIPLE_INSTANCES_COLLECTION_NAME=workflow_instances
export ROOT_URL=http://127.0.0.1:3000/
meteor run --settings settings.json
| steedos/apps | start.sh | Shell | gpl-2.0 | 264 |
# ----------------------------------------------------------------------------
# Converte todas as letras para minúsculas, inclusive acentuadas.
# Uso: zzminusculas [texto]
# Ex.: zzminusculas NÃO ESTOU GRITANDO # via argumentos
# echo NÃO ESTOU GRITANDOO | zzminusculas # via STDIN
#
# Autor: Aurelio Marinho Jargas, www.aurelio.net
# Desde: 2003-06-12
# Versão: 2
# Requisitos: zzzz zztool
# Tags: texto, conversão
# ----------------------------------------------------------------------------
zzminusculas ()
{
zzzz -h minusculas "$1" && return
# Dados via STDIN ou argumentos
zztool multi_stdin "$@" |
sed '
y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/
y/ÀÁÂÃÄÅÈÉÊËÌÍÎÏÒÓÔÕÖÙÚÛÜÇÑ/àáâãäåèéêëìíîïòóôõöùúûüçñ/'
}
| funcoeszz/funcoeszz | zz/zzminusculas.sh | Shell | gpl-2.0 | 813 |
#!/usr/bin/env bash
# See https://raw.githubusercontent.com/wp-cli/scaffold-command/master/templates/install-wp-tests.sh
if [ $# -lt 3 ]; then
echo "usage: $0 <db-name> <db-user> <db-pass> [db-host] [wp-version] [skip-database-creation]"
exit 1
fi
DB_NAME=$1
DB_USER=$2
DB_PASS=$3
DB_HOST=${4-localhost}
WP_VERSION=${5-latest}
SKIP_DB_CREATE=${6-false}
TMPDIR=${TMPDIR-/tmp}
TMPDIR=$(echo $TMPDIR | sed -e "s/\/$//")
WP_TESTS_DIR=${WP_TESTS_DIR-$TMPDIR/wordpress-tests-lib}
WP_CORE_DIR=${WP_CORE_DIR-$TMPDIR/wordpress/}
download() {
if [ `which curl` ]; then
#curl -s "$1" > "$2";
# WordPress.org seems to block requests without proper user agent.
curl -H "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (K HTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36" -s "$1" > "$2";
elif [ `which wget` ]; then
wget -nv -O "$2" "$1"
fi
}
if [[ $WP_VERSION =~ ^[0-9]+\.[0-9]+\-(beta|RC)[0-9]+$ ]]; then
WP_BRANCH=${WP_VERSION%\-*}
WP_TESTS_TAG="branches/$WP_BRANCH"
elif [[ $WP_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then
WP_TESTS_TAG="branches/$WP_VERSION"
elif [[ $WP_VERSION =~ [0-9]+\.[0-9]+\.[0-9]+ ]]; then
if [[ $WP_VERSION =~ [0-9]+\.[0-9]+\.[0] ]]; then
# version x.x.0 means the first release of the major version, so strip off the .0 and download version x.x
WP_TESTS_TAG="tags/${WP_VERSION%??}"
else
WP_TESTS_TAG="tags/$WP_VERSION"
fi
elif [[ $WP_VERSION == 'nightly' || $WP_VERSION == 'trunk' ]]; then
WP_TESTS_TAG="trunk"
else
# http serves a single offer, whereas https serves multiple. we only want one
download http://api.wordpress.org/core/version-check/1.7/ /tmp/wp-latest.json
grep '[0-9]+\.[0-9]+(\.[0-9]+)?' /tmp/wp-latest.json
LATEST_VERSION=$(grep -o '"version":"[^"]*' /tmp/wp-latest.json | sed 's/"version":"//')
if [[ -z "$LATEST_VERSION" ]]; then
echo "Latest WordPress version could not be found"
exit 1
fi
WP_TESTS_TAG="tags/$LATEST_VERSION"
fi
set -ex
install_wp() {
if [ -d $WP_CORE_DIR ]; then
return;
fi
mkdir -p $WP_CORE_DIR
if [[ $WP_VERSION == 'nightly' || $WP_VERSION == 'trunk' ]]; then
mkdir -p $TMPDIR/wordpress-trunk
svn export --quiet https://core.svn.wordpress.org/trunk $TMPDIR/wordpress-trunk/wordpress
mv $TMPDIR/wordpress-trunk/wordpress/* $WP_CORE_DIR
else
if [ $WP_VERSION == 'latest' ]; then
local ARCHIVE_NAME='latest'
elif [[ $WP_VERSION =~ [0-9]+\.[0-9]+ ]]; then
# https serves multiple offers, whereas http serves single.
download https://api.wordpress.org/core/version-check/1.7/ $TMPDIR/wp-latest.json
if [[ $WP_VERSION =~ [0-9]+\.[0-9]+\.[0] ]]; then
# version x.x.0 means the first release of the major version, so strip off the .0 and download version x.x
LATEST_VERSION=${WP_VERSION%??}
else
# otherwise, scan the releases and get the most up to date minor version of the major release
local VERSION_ESCAPED=`echo $WP_VERSION | sed 's/\./\\\\./g'`
LATEST_VERSION=$(grep -o '"version":"'$VERSION_ESCAPED'[^"]*' $TMPDIR/wp-latest.json | sed 's/"version":"//' | head -1)
fi
if [[ -z "$LATEST_VERSION" ]]; then
local ARCHIVE_NAME="wordpress-$WP_VERSION"
else
local ARCHIVE_NAME="wordpress-$LATEST_VERSION"
fi
else
local ARCHIVE_NAME="wordpress-$WP_VERSION"
fi
download https://wordpress.org/${ARCHIVE_NAME}.tar.gz $TMPDIR/wordpress.tar.gz
tar --strip-components=1 -zxmf $TMPDIR/wordpress.tar.gz -C $WP_CORE_DIR
fi
download https://raw.github.com/markoheijnen/wp-mysqli/master/db.php $WP_CORE_DIR/wp-content/db.php
}
install_test_suite() {
# portable in-place argument for both GNU sed and Mac OSX sed
if [[ $(uname -s) == 'Darwin' ]]; then
local ioption='-i.bak'
else
local ioption='-i'
fi
# set up testing suite if it doesn't yet exist
if [ ! -d $WP_TESTS_DIR ]; then
# set up testing suite
mkdir -p $WP_TESTS_DIR
svn export --quiet --ignore-externals https://develop.svn.wordpress.org/${WP_TESTS_TAG}/tests/phpunit/includes/ $WP_TESTS_DIR/includes
svn export --quiet --ignore-externals https://develop.svn.wordpress.org/${WP_TESTS_TAG}/tests/phpunit/data/ $WP_TESTS_DIR/data
fi
if [ ! -f wp-tests-config.php ]; then
download https://develop.svn.wordpress.org/${WP_TESTS_TAG}/wp-tests-config-sample.php "$WP_TESTS_DIR"/wp-tests-config.php
# remove all forward slashes in the end
WP_CORE_DIR=$(echo $WP_CORE_DIR | sed "s:/\+$::")
sed $ioption "s:dirname( __FILE__ ) . '/src/':'$WP_CORE_DIR/':" "$WP_TESTS_DIR"/wp-tests-config.php
sed $ioption "s/youremptytestdbnamehere/$DB_NAME/" "$WP_TESTS_DIR"/wp-tests-config.php
sed $ioption "s/yourusernamehere/$DB_USER/" "$WP_TESTS_DIR"/wp-tests-config.php
sed $ioption "s/yourpasswordhere/$DB_PASS/" "$WP_TESTS_DIR"/wp-tests-config.php
sed $ioption "s|localhost|${DB_HOST}|" "$WP_TESTS_DIR"/wp-tests-config.php
fi
}
install_db() {
if [ ${SKIP_DB_CREATE} = "true" ]; then
return 0
fi
# parse DB_HOST for port or socket references
local PARTS=(${DB_HOST//\:/ })
local DB_HOSTNAME=${PARTS[0]};
local DB_SOCK_OR_PORT=${PARTS[1]};
local EXTRA=""
if ! [ -z $DB_HOSTNAME ] ; then
if [ $(echo $DB_SOCK_OR_PORT | grep -e '^[0-9]\{1,\}$') ]; then
EXTRA=" --host=$DB_HOSTNAME --port=$DB_SOCK_OR_PORT --protocol=tcp"
elif ! [ -z $DB_SOCK_OR_PORT ] ; then
EXTRA=" --socket=$DB_SOCK_OR_PORT"
elif ! [ -z $DB_HOSTNAME ] ; then
EXTRA=" --host=$DB_HOSTNAME --protocol=tcp"
fi
fi
# create database
mysqladmin create $DB_NAME --user="$DB_USER" --password="$DB_PASS"$EXTRA
}
install_wp
install_test_suite
install_db
| swissspidy/preferred-languages | bin/install-wp-tests.sh | Shell | gpl-2.0 | 5,558 |
#!/bin/sh
#
# Copyright (C) 2009-2011 OpenWrt.org
#
AR71XX_BOARD_NAME=
AR71XX_MODEL=
ar71xx_get_mtd_offset_size_format() {
local mtd="$1"
local offset="$2"
local size="$3"
local format="$4"
local dev
dev=$(find_mtd_part $mtd)
[ -z "$dev" ] && return
dd if=$dev bs=1 skip=$offset count=$size 2>/dev/null | hexdump -v -e "1/1 \"$format\""
}
ar71xx_get_mtd_part_magic() {
local mtd="$1"
ar71xx_get_mtd_offset_size_format "$mtd" 0 4 %02x
}
wndr3700_board_detect() {
local machine="$1"
local magic
local name
name="wndr3700"
magic="$(ar71xx_get_mtd_part_magic firmware)"
case $magic in
"33373030")
machine="NETGEAR WNDR3700"
;;
"33373031")
local model
model=$(ar71xx_get_mtd_offset_size_format art 56 10 %c)
if [ -z "$model" ] || [ "$model" = $'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff' ]; then
machine="NETGEAR WNDR3700v2"
elif [ -z "$model" ] || [ "$model" = $'\xff\xff\xff\xff\xff\xff\xff\xff\xffN' ]; then
machine="NETGEAR WNDRMAC"
else
machine="NETGEAR $model"
fi
;;
esac
AR71XX_BOARD_NAME="$name"
AR71XX_MODEL="$machine"
}
cybertan_get_hw_magic() {
local part
part=$(find_mtd_part firmware)
[ -z "$part" ] && return 1
dd bs=8 count=1 skip=0 if=$part 2>/dev/null | hexdump -v -n 8 -e '1/1 "%02x"'
}
tplink_get_hwid() {
local part
part=$(find_mtd_part firmware)
[ -z "$part" ] && return 1
dd if=$part bs=4 count=1 skip=16 2>/dev/null | hexdump -v -n 4 -e '1/1 "%02x"'
}
tplink_get_mid() {
local part
part=$(find_mtd_part firmware)
[ -z "$part" ] && return 1
dd if=$part bs=4 count=1 skip=17 2>/dev/null | hexdump -v -n 4 -e '1/1 "%02x"'
}
tplink_board_detect() {
local model="$1"
local hwid
local hwver
hwid=$(tplink_get_hwid)
mid=$(tplink_get_mid)
hwver=${hwid:6:2}
hwver="v${hwver#0}"
case "$hwid" in
"015000"*)
model="EasyLink EL-M150"
;;
"015300"*)
model="EasyLink EL-MINI"
;;
"3C0001"*)
model="OOLITE"
;;
"070300"*)
model="TP-Link TL-WR703N"
;;
"071000"*)
model="TP-Link TL-WR710N"
;;
"072001"*)
model="TP-Link TL-WR720N"
;;
"070100"*)
model="TP-Link TL-WA701N/ND"
;;
"073000"*)
model="TP-Link TL-WA730RE"
;;
"074000"*)
model="TP-Link TL-WR740N/ND"
;;
"074100"*)
model="TP-Link TL-WR741N/ND"
;;
"074300"*)
model="TP-Link TL-WR743N/ND"
;;
"075000"*)
model="TP-Link TL-WA750RE"
;;
"751000"*)
model="TP-Link TL-WA7510N"
;;
"080100"*)
model="TP-Link TL-WA801N/ND"
;;
"083000"*)
model="TP-Link TL-WA830RE"
;;
"084100"*)
model="TP-Link TL-WR841N/ND"
;;
"084200"*)
model="TP-Link TL-WR842N/ND"
;;
"085000"*)
model="TP-Link TL-WA850RE"
;;
"090100"*)
model="TP-Link TL-WA901N/ND"
;;
"094100"*)
if [ "$hwid" == "09410002" -a "$mid" == "00420001" ]; then
model="Rosewill RNX-N360RT"
hwver=""
else
model="TP-Link TL-WR941N/ND"
fi
;;
"104100"*)
model="TP-Link TL-WR1041N/ND"
;;
"104300"*)
model="TP-Link TL-WR1043N/ND"
;;
"254300"*)
model="TP-Link TL-WR2543N/ND"
;;
"001001"*)
model="TP-Link TL-MR10U"
;;
"001101"*)
model="TP-Link TL-MR11U"
;;
"001301"*)
model="TP-Link TL-MR13U"
;;
"302000"*)
model="TP-Link TL-MR3020"
;;
"304000"*)
model="TP-Link TL-MR3040"
;;
"322000"*)
model="TP-Link TL-MR3220"
;;
"342000"*)
model="TP-Link TL-MR3420"
;;
"350000"*)
model="TP-Link TL-WDR3500"
;;
"360000"*)
model="TP-Link TL-WDR3600"
;;
"430000"*)
model="TP-Link TL-WDR4300"
;;
"430080"*)
iw reg set IL
model="TP-Link TL-WDR4300 (IL)"
;;
"431000"*)
model="TP-Link TL-WDR4310"
;;
"49000002")
model="TP-Link TL-WDR4900"
;;
"453000"*)
model="MERCURY MW4530R"
;;
*)
hwver=""
;;
esac
AR71XX_MODEL="$model $hwver"
}
ar71xx_board_detect() {
local machine
local name
machine=$(awk 'BEGIN{FS="[ \t]+:[ \t]"} /machine/ {print $2}' /proc/cpuinfo)
case "$machine" in
*"Oolite V1.0")
name="oolite"
;;
*"AirRouter")
name="airrouter"
;;
*"ALFA Network AP96")
name="alfa-ap96"
;;
*"ALFA Network N2/N5")
name="alfa-nx"
;;
*ALL0258N)
name="all0258n"
;;
*ALL0305)
name="all0305"
;;
*ALL0315N)
name="all0315n"
;;
*AP113)
name="ap113"
;;
*AP121)
name="ap121"
;;
*AP121-MINI)
name="ap121-mini"
;;
*"AP132 reference board")
name="ap132"
;;
*"AP136-010 reference board")
name="ap136-010"
;;
*"AP136-020 reference board")
name="ap136-020"
;;
*"AP135-020 reference board")
name="ap135-020"
;;
*AP81)
name="ap81"
;;
*AP83)
name="ap83"
;;
*"Archer C7")
name="archer-c7"
;;
*"Atheros AP96")
name="ap96"
;;
*AW-NR580)
name="aw-nr580"
;;
*CAP4200AG)
name="cap4200ag"
;;
*"DB120 reference board")
name="db120"
;;
*"DIR-505 rev. A1")
name="dir-505-a1"
;;
*"DIR-600 rev. A1")
name="dir-600-a1"
;;
*"DIR-615 rev. E1")
name="dir-615-e1"
;;
*"DIR-615 rev. E4")
name="dir-615-e4"
;;
*"DIR-825 rev. B1")
name="dir-825-b1"
;;
*"DIR-825 rev. C1")
name="dir-825-c1"
;;
*"DIR-835 rev. A1")
name="dir-835-a1"
;;
*"Dragino v2")
name="dragino2"
;;
*EAP7660D)
name="eap7660d"
;;
*EL-M150)
name="el-m150"
;;
*EL-MINI)
name="el-mini"
;;
*JA76PF)
name="ja76pf"
;;
*JA76PF2)
name="ja76pf2"
;;
*"Bullet M")
name="bullet-m"
;;
*"Nanostation M")
name="nanostation-m"
;;
*JWAP003)
name="jwap003"
;;
*"Hornet-UB")
local size
size=$(awk '/firmware/ { print $2 }' /proc/mtd)
if [ "x$size" = "x00790000" ]; then
name="hornet-ub"
fi
if [ "x$size" = "x00f90000" ]; then
name="hornet-ub-x2"
fi
;;
*LS-SR71)
name="ls-sr71"
;;
*MR600v2)
name="mr600v2"
;;
*MR600)
name="mr600"
;;
*"My Net N600")
name="mynet-n600"
;;
*"My Net N750")
name="mynet-n750"
;;
*"WD My Net Wi-Fi Range Extender")
name="mynet-rext"
;;
*MZK-W04NU)
name="mzk-w04nu"
;;
*MZK-W300NH)
name="mzk-w300nh"
;;
*"NBG460N/550N/550NH")
name="nbg460n_550n_550nh"
;;
*"Zyxel NBG6716")
name="nbg6716"
;;
*OM2P)
name="om2p"
;;
*OM2Pv2)
name="om2pv2"
;;
*"OM2P HS")
name="om2p-hs"
;;
*"OM2P HSv2")
name="om2p-hsv2"
;;
*"OM2P LC")
name="om2p-lc"
;;
*PB42)
name="pb42"
;;
*"PB44 reference board")
name="pb44"
;;
*PB92)
name="pb92"
;;
*"RouterBOARD 411/A/AH")
name="rb-411"
;;
*"RouterBOARD 411U")
name="rb-411u"
;;
*"RouterBOARD 433/AH")
name="rb-433"
;;
*"RouterBOARD 433UAH")
name="rb-433u"
;;
*"RouterBOARD 435G")
name="rb-435g"
;;
*"RouterBOARD 450")
name="rb-450"
;;
*"RouterBOARD 450G")
name="rb-450g"
;;
*"RouterBOARD 493/AH")
name="rb-493"
;;
*"RouterBOARD 493G")
name="rb-493g"
;;
*"RouterBOARD 750")
name="rb-750"
;;
*"RouterBOARD 750GL")
name="rb-750gl"
;;
*"RouterBOARD 751")
name="rb-751"
;;
*"RouterBOARD 751G")
name="rb-751g"
;;
*"RouterBOARD 911G-2HPnD")
name="rb-911g-2hpnd"
;;
*"RouterBOARD 911G-5HPnD")
name="rb-911g-5hpnd"
;;
*"RouterBOARD 912UAG-2HPnD")
name="rb-912uag-2hpnd"
;;
*"RouterBOARD 912UAG-5HPnD")
name="rb-912uag-5hpnd"
;;
*"RouterBOARD 951G-2HnD")
name="rb-951g-2hnd"
;;
*"RouterBOARD 951Ui-2HnD")
name="rb-951ui-2hnd"
;;
*"RouterBOARD 2011L")
name="rb-2011l"
;;
*"RouterBOARD 2011UAS")
name="rb-2011uas"
;;
*"RouterBOARD 2011UAS-2HnD")
name="rb-2011uas-2hnd"
;;
*"Rocket M")
name="rocket-m"
;;
*RouterStation)
name="routerstation"
;;
*"RouterStation Pro")
name="routerstation-pro"
;;
*RW2458N)
name="rw2458n"
;;
*TEW-632BRP)
name="tew-632brp"
;;
*TEW-673GRU)
name="tew-673gru"
;;
*TEW-712BR)
name="tew-712br"
;;
*TEW-732BR)
name="tew-732br"
;;
*"TL-WR1041N v2")
name="tl-wr1041n-v2"
;;
*TL-WR1043ND)
name="tl-wr1043nd"
;;
*"TL-WR1043ND v2")
name="tl-wr1043nd-v2"
;;
*TL-WR2543N*)
name="tl-wr2543n"
;;
*"DIR-615 rev. C1")
name="dir-615-c1"
;;
*TL-MR3020)
name="tl-mr3020"
;;
*TL-MR3040)
name="tl-mr3040"
;;
*"TL-MR3040 v2")
name="tl-mr3040-v2"
;;
*TL-MR3220)
name="tl-mr3220"
;;
*"TL-MR3220 v2")
name="tl-mr3220-v2"
;;
*TL-MR3420)
name="tl-mr3420"
;;
*"TL-MR3420 v2")
name="tl-mr3420-v2"
;;
*TL-WA750RE)
name="tl-wa750re"
;;
*"TL-WA7510N v1")
name="tl-wa7510n"
;;
*TL-WA850RE)
name="tl-wa850re"
;;
*"TL-WA801ND v2")
name="tl-wa801nd-v2"
;;
*TL-WA901ND)
name="tl-wa901nd"
;;
*"TL-WA901ND v2")
name="tl-wa901nd-v2"
;;
*"TL-WA901ND v3")
name="tl-wa901nd-v3"
;;
*"TL-WDR3500")
name="tl-wdr3500"
;;
*"TL-WDR3600/4300/4310")
name="tl-wdr4300"
;;
*"TL-WDR4900 v2")
name="tl-wdr4900-v2"
;;
*TL-WR741ND)
name="tl-wr741nd"
;;
*"TL-WR741ND v4")
name="tl-wr741nd-v4"
;;
*"TL-WR841N v1")
name="tl-wr841n-v1"
;;
*"TL-WR841N/ND v7")
name="tl-wr841n-v7"
;;
*"TL-WR841N/ND v8")
name="tl-wr841n-v8"
;;
*"TL-WR841N/ND v9")
name="tl-wr841n-v9"
;;
*"TL-WR842N/ND v2")
name="tl-wr842n-v2"
;;
*TL-WR941ND)
name="tl-wr941nd"
;;
*"TL-WR703N v1")
name="tl-wr703n"
;;
*"TL-WR710N v1")
name="tl-wr710n"
;;
*"TL-WR720N v3")
name="tl-wr720n-v3"
;;
*"TL-MR10U")
name="tl-mr10u"
;;
*"TL-MR11U")
name="tl-mr11u"
;;
*"TL-MR13U")
name="tl-mr13u"
;;
*UniFi)
name="unifi"
;;
*"UniFi AP Pro")
name="uap-pro"
;;
*WHR-G301N)
name="whr-g301n"
;;
*WHR-HP-GN)
name="whr-hp-gn"
;;
*WLAE-AG300N)
name="wlae-ag300n"
;;
*"UniFiAP Outdoor")
name="unifi-outdoor"
;;
*WP543)
name="wp543"
;;
*WPE72)
name="wpe72"
;;
*WNDAP360)
name="wndap360"
;;
*"WNDR3700/WNDR3800/WNDRMAC")
wndr3700_board_detect "$machine"
;;
*"WNDR4300")
name="wndr4300"
;;
*"WNR2000 V3")
name="wnr2000-v3"
;;
*WNR2000)
name="wnr2000"
;;
*WNR2200)
name="wnr2200"
;;
*"WNR612 V2")
name="wnr612-v2"
;;
*WRT160NL)
name="wrt160nl"
;;
*WRT400N)
name="wrt400n"
;;
*"WZR-HP-AG300H/WZR-600DHP")
name="wzr-hp-ag300h"
;;
*WZR-HP-G300NH)
name="wzr-hp-g300nh"
;;
*WZR-HP-G450H)
name="wzr-hp-g450h"
;;
*WZR-HP-G300NH2)
name="wzr-hp-g300nh2"
;;
*WHR-HP-G300N)
name="whr-hp-g300n"
;;
*ZCN-1523H-2)
name="zcn-1523h-2"
;;
*ZCN-1523H-5)
name="zcn-1523h-5"
;;
*EmbWir-Dorin)
name="ew-dorin"
;;
*EmbWir-Dorin-Router)
name="ew-dorin-router"
;;
"8devices Carambola2"*)
name="carambola2"
;;
*"Sitecom WLR-8100")
name="wlr8100"
;;
*"BHU BXU2000n-2 rev. A1")
name="bxu2000n-2-a1"
;;
*"HiWiFi HC6361")
name="hiwifi-hc6361"
;;
esac
case "$machine" in
*TL-WR* | *TL-WA* | *TL-MR* | *TL-WD*)
tplink_board_detect "$machine"
;;
esac
[ -z "$name" ] && name="unknown"
[ -z "$AR71XX_BOARD_NAME" ] && AR71XX_BOARD_NAME="$name"
[ -z "$AR71XX_MODEL" ] && AR71XX_MODEL="$machine"
[ -e "/tmp/sysinfo/" ] || mkdir -p "/tmp/sysinfo/"
echo "$AR71XX_BOARD_NAME" > /tmp/sysinfo/board_name
echo "$AR71XX_MODEL" > /tmp/sysinfo/model
}
ar71xx_board_name() {
local name
[ -f /tmp/sysinfo/board_name ] && name=$(cat /tmp/sysinfo/board_name)
[ -z "$name" ] && name="unknown"
echo "$name"
}
| fevenor/openwrt | target/linux/ar71xx/base-files/lib/ar71xx.sh | Shell | gpl-2.0 | 10,971 |
#!/bin/bash
n=0
until [ $n -ge 5 ]
do
$@ && break # substitute your command here
n=$[$n+1]
echo "Retrying ($n/5): sleeping for 15 ..."
sleep 15
done
| icgc-dcc/dcc-storage | docker/retry-command.sh | Shell | gpl-3.0 | 162 |
#
# restore data with Galaxy
#
# verify that we have a backupset
[ "$GALAXY10_BACKUPSET" ]
StopIfError "Galaxy Backup Set not defined [GALAXY10_BACKUPSET=]."
# create argument file
cat <<EOF >$TMP_DIR/galaxy.restore.options
$(test -r "$GALAXY10_Q_ARGUMENTFILE" && cat "$GALAXY10_Q_ARGUMENTFILE")
[sourceclient]
$HOSTNAME
[level]
1
[options]
QR_PRESERVE_LEVEL
QR_DO_NOT_OVERWRITE_FILE_ON_DISK
$GALAXY10_PIT
[dataagent]
Q_LINUX_FS
[backupset]
$GALAXY10_BACKUPSET
[sourcepaths]
/
[destinationpath]
$TARGET_FS_ROOT
EOF
if [ "x$GALAXY10_ZEIT" != "x" ]; then
cat <<EOF >>$TMP_DIR/galaxy.restore.options
[browseto]
$GALAXY10_ZEIT
EOF
fi
# initialize variable
jobstatus=Unknown
if jobid=$(qoperation restore -af $TMP_DIR/galaxy.restore.options) ; then
jobid=${jobid// /} # remove trailing blanks
LogPrint "Restoring data with Galaxy (job $jobid)"
while true ; do
# output of qlist job -co s -j ## :
# STATUS
# ------
# Pending
# the array gets rid of the line breaks :-)
jobdetails=( $(qlist job -co s -j $jobid) )
StopIfError "Could not receive job details. Check log file."
jobstatus="${jobdetails[2]}"
# stop waiting if the job reached a final status
case "$jobstatus" in
?omplet*)
echo
LogPrint "Restore completed successfully."
break
;;
?uspend*|*end*|?unn*|?ait*)
printf "\r%-79s" "$(date +"%Y-%m-%d %H:%M:%S") job is $jobstatus"
;;
?ail*|?ill*)
echo
Error "Restore job failed or was killed, aborting recovery."
;;
*)
echo
Error "Restore job has an unknown state [$jobstatus], aborting."
;;
esac
sleep 10
done
else
Error "Could not start Galaxy restore job. Check log file."
fi
# create missing directories
pushd $TARGET_FS_ROOT >&8
for dir in opt/simpana/Base/Temp opt/simpana/Updates opt/simpana/iDataAgent/jobResults ; do
mkdir -p "$dir"
done
popd >&8
| terreActive/rear | usr/share/rear/restore/GALAXY10/default/400_restore_with_galaxy.sh | Shell | gpl-3.0 | 1,854 |
#!/bin/bash
/usr/sbin/groupadd tstpamgrpg
/usr/sbin/useradd -p '!!' tstpamgrp
/usr/sbin/useradd -p '!!' tstpamgrp2
./tst-pam_group1
RET=$?
/usr/sbin/userdel -r tstpamgrp 2> /dev/null
/usr/sbin/userdel -r tstpamgrp2 2> /dev/null
/usr/sbin/groupdel tstpamgrpg 2> /dev/null
exit $RET
| KubaKaszycki/kubux | pam/xtests/tst-pam_group1.sh | Shell | gpl-3.0 | 282 |
#!/bin/bash
# Usage : source pretzel/resources/tools/dev/functions_convert.bash
# sp=~/pretzel/resources/tools/dev/snps2Dataset.pl;
# commonName=Chickpea;
# shortName=WGS_SNP;
# platform=WGS_SNP;
# parentName=...
# genBankRename= sed script of the form :
# s/gi|442654316|gb|CM001764.1|/Ca1/
# s/gi|442654315|gb|CM001765.1|/Ca2/
# setup :
# mkdir out out_json
# for i in *.xlsx; do echo $i; ssconvert -S "$i" out/"$i.%s.csv"; done
function snp1() {
echo "$i"; <"$i" tail -n +2 | sed -f $genBankRename | sort -t, -k 2 | \
$sp -d "$parentName.$datasetName" -s "$shortName" -p $parentName -n"$parentName:$platform" -c "$commonName" \
> ../out_json/"$i".json ; ls -gG ../out_json/"$i".json
}
function datasetName2shortName() {
sed 's/_Submission//ig;s/_Gydle//ig;s/SSRs/SSR/;s/SNPs/SNP/;s/^CP_//;s/FieldPea//;s/FABABEAN_//;s/FABA_//;s/^FB_//;s/_FP$//;s/^Len_//;s/Lentil_//;s/inhouse_Pretzel//;s/ (2)//' ; }
function fileName2DatasetName() {
sed -n 's/\.csv$//;s/[ _]*Linkage[ _]*map[_ ]*//ig;s/Pretzel_submission_//ig;s/ $//;s/ map$//i;s/\([^ ls]\)[xX]\([^ ls]\)/\1 x \2/g;s/ x / x /ig;s/.*\.xlsx\.//p;'; }
# env var $snpFile is the name of the file which contains SNPs which associate the markers in this map file with chromosome names
# See also mapChrsCN()
# usage e.g. snpFile=*mission*CP_EST_SNP-OPA*
function mapChrs() {
lm_c=$( awk -F, ' { print $2; }' "$i" | uniq)
datasetName=$( echo "$i" | fileName2DatasetName ); echo "$datasetName $i";
mkdir chrSnps/"$datasetName"
if [ -f chrSnps/"$datasetName".chrCount ]
then
rm chrSnps/"$datasetName".chrCount
fi
for j in $lm_c; do echo $j; awk -F, "/,$j,/ {print \$1;}" "$i" >chrSnps/"$datasetName"/$j; done
for j in $(cd chrSnps/"$datasetName"; ls ); do suffix=$(echo $j | sed -n "s/.*\(\..*\)/\1/p"); fgrep -f "chrSnps/$datasetName/$j" $snpFile | sed -f $genBankRename | awk -F, '{a[$2]++;} END {for (i in a) print a[i], i;}' | sort -n -r | head -1 | tee -a chrSnps/"$datasetName".chrCount | awk ' {printf("s/,%s,/,%s%s,/\n", "'$j'", $2, "'$suffix'"); }' ; done > chrSnps/"$datasetName".chrRename.sed
}
function map1() {
j=$(echo "$i" | fileName2DatasetName); \
datasetName=$j;
echo "$j"; <"$i" sed -f chrSnps/"$datasetName".chrRename.sed | $sp -d "$j" -p '' -n 'SNP_OPA' -c "$commonName" -g > ../out_json/"$i".json ; ls -gG ../out_json/"$i".json
}
# Convert a linkage / genetic map from csv to Pretzel json.
# Similar to mapChrs() except the column order here is assumed to be
# columnsKeyString="chr name pos"
# i.e. chr is in $1, name is in $2 (awk)
# This also impacts the regexp /^$j
#
# snpFile=*mission*CP_EST_SNP-OPA*
# snpFile=*CP_GBS-TC*
function mapChrsCN() {
lm_c=$( awk -F, ' { print $1; }' "$i" | uniq)
datasetName=$( echo "$i" | fileName2DatasetName ); echo "$datasetName $i";
mkdir chrSnps/"$datasetName"
for j in $lm_c; do echo $j; awk -F, "/^$j,/ {print \$2;}" "$i" >chrSnps/"$datasetName"/$j; done
for j in $(cd chrSnps/"$datasetName"; ls L*); do suffix=$(echo $j | sed -n "s/.*\(\..*\)/\1/p"); fgrep -f "chrSnps/$datasetName/$j" $snpFile | sed -f $genBankRename | awk -F, '{a[$2]++;} END {for (i in a) print a[i], i;}' | sort -n -r | head -1 | awk ' {printf("s/^%s,/%s%s,/\n", "'$j'", $2, "'$suffix'"); }' ; done > chrSnps/"$datasetName".chrRename.sed
}
function CP_GM() {
export columnsKeyString="name chr pos";
for i in *inkage*_LasseterxICC3996* ; do mapChrs; done
export columnsKeyString="chr name pos";
for i in *inkage*_SonalixGenesis* ; do mapChrsCN; done
export columnsKeyString="chr name pos";
for i in *inkage*_SonalixGenesis* ; do map1; done
export columnsKeyString="name chr pos";
for i in *inkage*_LasseterxICC3996* ; do map1; done
}
| plantinformatics/pretzel | resources/tools/dev/functions_convert.bash | Shell | gpl-3.0 | 3,728 |
#!/bin/sh
# 2006 (c) Etersoft www.etersoft.ru
# Public domain
# load common functions, compatible with local and installed script
. `dirname $0`/../share/eterbuild/korinf/common
check()
{
local REPL
echo
echo -n "Source line: '$TEST1' with result '$REAL' "
#REPL=`echo $TEST1 | sed -r -e $NRL`
#REPL=`echo $TEST1 | perl -pi "$NRL"`
[ "$REPL1" != "$REAL" ] && failure || success
echo
}
TEST1="ALT"
REPL1=" "
REAL=`print_spaces_instead_string "$TEST1"`
check
TEST1="ALTLinux/4.0"
REPL1=" "
REAL=`print_spaces_instead_string "$TEST1"`
check
TEST1="ALT Linux 4.0"
REPL1=" "
REAL=`print_spaces_instead_string "$TEST1"`
check
| vitlav/korinf | tests/test_spaces.sh | Shell | agpl-3.0 | 659 |
#!/bin/sh
# -----------------------------------------------------------------------------
# Start/Stop Script for the CATALINA Server
#
# Environment Variable Prequisites
#
# CATALINA_HOME May point at your Catalina "build" directory.
#
# CATALINA_BASE (Optional) Base directory for resolving dynamic portions
# of a Catalina installation. If not present, resolves to
# the same directory that CATALINA_HOME points to.
#
# CATALINA_OPTS (Optional) Java runtime options used when the "start",
# "stop", or "run" command is executed.
#
# CATALINA_TMPDIR (Optional) Directory path location of temporary directory
# the JVM should use (java.io.tmpdir). Defaults to
# $CATALINA_BASE/temp.
#
# CATALINA_OUT (Optional) Location of the file to which stdout and stderr
# are written. Defaults to $CATALINA_BASE/logs/catalina.out.
#
# JAVA_HOME Must point at your Java Development Kit installation.
#
# JAVA_OPTS (Optional) Java runtime options used when the "start",
# "stop", or "run" command is executed.
#
# JPDA_TRANSPORT (Optional) JPDA transport used when the "jpda start"
# command is executed. The default is "dt_socket".
#
# JPDA_ADDRESS (Optional) Java runtime options used when the "jpda start"
# command is executed. The default is 8000.
#
# JSSE_HOME (Optional) May point at your Java Secure Sockets Extension
# (JSSE) installation, whose JAR files will be added to the
# system class path used to start Tomcat.
#
# CATALINA_PID (Optional) Path of the file which should contains the pid
# of catalina startup java process, when start (fork) is used
#
# $Id: catalina.sh 289059 2004-12-24 17:17:34Z markt $
# -----------------------------------------------------------------------------
# OS specific support. $var _must_ be set to either true or false.
cygwin=false
os400=false
case "`uname`" in
CYGWIN*) cygwin=true;;
OS400*) os400=true;;
esac
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
# Get standard environment variables
PRGDIR=`dirname "$PRG"`
# Only set CATALINA_HOME if not already set
[ -z "$CATALINA_HOME" ] && CATALINA_HOME=`cd "$PRGDIR/.." ; pwd`
if [ -r "$CATALINA_HOME"/bin/setenv.sh ]; then
. "$CATALINA_HOME"/bin/setenv.sh
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin; then
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CATALINA_HOME" ] && CATALINA_HOME=`cygpath --unix "$CATALINA_HOME"`
[ -n "$CATALINA_BASE" ] && CATALINA_BASE=`cygpath --unix "$CATALINA_BASE"`
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
[ -n "$JSSE_HOME" ] && JSSE_HOME=`cygpath --path --unix "$JSSE_HOME"`
fi
# For OS400
if $os400; then
# Set job priority to standard for interactive (interactive - 6) by using
# the interactive priority - 6, the helper threads that respond to requests
# will be running at the same priority as interactive jobs.
COMMAND='chgjob job('$JOBNAME') runpty(6)'
system $COMMAND
# Enable multi threading
export QIBM_MULTI_THREADED=Y
fi
# Get standard Java environment variables
if [ -r "$CATALINA_HOME"/bin/setclasspath.sh ]; then
BASEDIR="$CATALINA_HOME"
. "$CATALINA_HOME"/bin/setclasspath.sh
else
echo "Cannot find $CATALINA_HOME/bin/setclasspath.sh"
echo "This file is needed to run this program"
exit 1
fi
# Add on extra jar files to CLASSPATH
if [ -n "$JSSE_HOME" ]; then
CLASSPATH="$CLASSPATH":"$JSSE_HOME"/lib/jcert.jar:"$JSSE_HOME"/lib/jnet.jar:"$JSSE_HOME"/lib/jsse.jar
fi
CLASSPATH="$CLASSPATH":"$CATALINA_HOME"/bin/bootstrap.jar
if [ -z "$CATALINA_BASE" ] ; then
CATALINA_BASE="$CATALINA_HOME"
fi
if [ -z "$CATALINA_TMPDIR" ] ; then
# Define the java.io.tmpdir to use for Catalina
CATALINA_TMPDIR="$CATALINA_BASE"/temp
fi
if [ -z "$CATALINA_OUT" ] ; then
# Use default location for redirected stdout/stderr
CATALINA_OUT="$CATALINA_BASE"/logs/catalina.out
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
CATALINA_HOME=`cygpath --path --windows "$CATALINA_HOME"`
CATALINA_BASE=`cygpath --path --windows "$CATALINA_BASE"`
CATALINA_TMPDIR=`cygpath --path --windows "$CATALINA_TMPDIR"`
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
JSSE_HOME=`cygpath --path --windows "$JSSE_HOME"`
JAVA_ENDORSED_DIRS=`cygpath --path --windows "$JAVA_ENDORSED_DIRS"`
fi
# ----- Execute The Requested Command -----------------------------------------
echo "Using CATALINA_BASE: $CATALINA_BASE"
echo "Using CATALINA_HOME: $CATALINA_HOME"
echo "Using CATALINA_TMPDIR: $CATALINA_TMPDIR"
echo "Using CATALINA_OUT: $CATALINA_OUT"
echo "Using JAVA_HOME: $JAVA_HOME"
if [ "$1" = "jpda" ] ; then
if [ -z "$JPDA_TRANSPORT" ]; then
JPDA_TRANSPORT="dt_socket"
fi
if [ -z "$JPDA_ADDRESS" ]; then
JPDA_ADDRESS="8000"
fi
if [ -z "$JPDA_OPTS" ]; then
JPDA_OPTS="-Xdebug -Xrunjdwp:transport=$JPDA_TRANSPORT,address=$JPDA_ADDRESS,server=y,suspend=n"
fi
CATALINA_OPTS="$CATALINA_OPTS $JPDA_OPTS"
shift
fi
if [ "$1" = "debug" ] ; then
if $os400; then
echo "Debug command not available on OS400"
exit 1
else
shift
if [ "$1" = "-security" ] ; then
echo "Using Security Manager"
shift
exec "$_RUNJDB" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-sourcepath "$CATALINA_HOME"/../../jakarta-tomcat-4.0/catalina/src/share \
-Djava.security.manager \
-Djava.security.policy=="$CATALINA_BASE"/conf/catalina.policy \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Bootstrap "$@" start
else
exec "$_RUNJDB" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-sourcepath "$CATALINA_HOME"/../../jakarta-tomcat-4.0/catalina/src/share \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Bootstrap "$@" start
fi
fi
elif [ "$1" = "embedded" ] ; then
shift
echo "Embedded Classpath: $CLASSPATH"
exec "$_RUNJAVA" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Embedded "$@"
elif [ "$1" = "run" ]; then
shift
if [ "$1" = "-security" ] ; then
echo "Using Security Manager"
shift
exec "$_RUNJAVA" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-Djava.security.manager \
-Djava.security.policy=="$CATALINA_BASE"/conf/catalina.policy \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Bootstrap "$@" start
else
exec "$_RUNJAVA" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Bootstrap "$@" start
fi
elif [ "$1" = "start" ] ; then
shift
touch "$CATALINA_OUT"
if [ "$1" = "-security" ] ; then
echo "Using Security Manager"
shift
"$_RUNJAVA" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-Djava.security.manager \
-Djava.security.policy=="$CATALINA_BASE"/conf/catalina.policy \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Bootstrap "$@" start \
>> "$CATALINA_OUT" 2>&1 &
if [ ! -z "$CATALINA_PID" ]; then
echo $! > $CATALINA_PID
fi
else
"$_RUNJAVA" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Bootstrap "$@" start \
>> "$CATALINA_OUT" 2>&1 &
if [ ! -z "$CATALINA_PID" ]; then
echo $! > $CATALINA_PID
fi
fi
elif [ "$1" = "stop" ] ; then
shift
exec "$_RUNJAVA" $JAVA_OPTS $CATALINA_OPTS \
-Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
-Dcatalina.base="$CATALINA_BASE" \
-Dcatalina.home="$CATALINA_HOME" \
-Djava.io.tmpdir="$CATALINA_TMPDIR" \
org.apache.catalina.startup.Bootstrap "$@" stop
else
echo "Usage: catalina.sh ( commands ... )"
echo "commands:"
if $os400; then
echo " debug Start Catalina in a debugger (not available on OS400)"
echo " debug -security Debug Catalina with a security manager (not available on OS400)"
else
echo " debug Start Catalina in a debugger"
echo " debug -security Debug Catalina with a security manager"
fi
echo " embedded Start Catalina in embedded mode"
echo " jpda start Start Catalina under JPDA debugger"
echo " run Start Catalina in the current window"
echo " run -security Start in the current window with security manager"
echo " start Start Catalina in a separate window"
echo " start -security Start in a separate window with security manager"
echo " stop Stop Catalina"
exit 1
fi
| simeshev/parabuild-ci | 3rdparty/apache-tomcat-4.1.39/bin/catalina.sh | Shell | lgpl-3.0 | 10,134 |
#!/usr/bin/env bash
update_node_modules()
{
typeset var root_path="$1"
echo "* updating node deps of $root_path..."
mkdir -p "$root_path/node_modules"
cd "$root_path"
npm update
npm prune
cd -
}
upate_bower_components()
{
typeset var root_path="$1"
echo "* reinstalling bower deps of $root_path..."
mkdir -p "$root_path/bower_components"
cd "$root_path"
bower update
bower prune
cd -
}
update_offirmo_module()
{
typeset var module_path="$1"
echo "* reinstalling deps of module $module_path..."
update_node_modules "$module_path/test_runner"
upate_bower_components "$module_path/test_runner"
}
echo "* updating global npm..."
npm update -g
#npm install -g bower
echo "* updating deps of html_tests..."
#update_offirmo_module base-objects.js
update_offirmo_module html_tests/app/other_components/base-objects.js
#update_offirmo_module extended-exceptions.js
update_offirmo_module html_tests/app/other_components/extended-exceptions.js
#update_offirmo_module network-constants.js
update_offirmo_module html_tests/app/other_components/network-constants.js
#update_offirmo_module restlink.js
update_offirmo_module html_tests/app/other_components/restlink.js
update_node_modules "html_tests"
upate_bower_components "html_tests/app"
| Offirmo/web-tech-experiments | tosort/2014/deps_update.sh | Shell | unlicense | 1,255 |
#!/bin/bash
# Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PRODUCT_HOME="$HOME/products"
SCRIPT_TAG="[SEC_AUTOMATION_UPDATE_PRODUCTS]"
echo "$SCRIPT_TAG [START]"
echo "$SCRIPT_TAG Cleaning WUM products and updates"
rm -rf ~/.wum-wso2/products
rm -rf ~/.wum-wso2/updates
echo "$SCRIPT_TAG Cleaning product home: ($PRODUCT_HOME)"
rm -rf $PRODUCT_HOME
mkdir $PRODUCT_HOME
for product in $(cat $HOME/scripts/config/SupportedProductList.conf)
do
echo "$SCRIPT_TAG Adding $product to WUM"
wum add -yv $product
done
echo "$SCRIPT_TAG Starting WUM update process"
wum update -v
for product in $(cat $HOME/scripts/config/SupportedProductList.conf)
do
IFS='-' read -r -a namesplits <<< "$product"
echo "$SCRIPT_TAG Listing versions of $product available in WUM directory"
ls -ltr $HOME/.wum-wso2/products/${namesplits[0]}/${namesplits[1]} | tr -s ' ' | cut -d ' ' -f9 | grep -v -e '^$' | paste -sd "," -
echo "$SCRIPT_TAG Latest version of $product available in WUM directory"
ls -ltr $HOME/.wum-wso2/products/${namesplits[0]}/${namesplits[1]} | tr -s ' ' | cut -d ' ' -f9 | grep -v -e '^$' | tail -1
latestZip=$(ls -ltr $HOME/.wum-wso2/products/${namesplits[0]}/${namesplits[1]} | tr -s ' ' | cut -d ' ' -f9 | grep -v -e '^$' | tail -1)
unzip -q $HOME/.wum-wso2/products/${namesplits[0]}/${namesplits[1]}/$latestZip -d $PRODUCT_HOME
echo "$SCRIPT_TAG Extracted ${namesplits[0]}/${namesplits[1]}/$latestZip to $PRODUCT_HOME"
done
echo "$SCRIPT_TAG [END]"
| Prakhash/security-tools | internal/automation-scripts/UpdateProducts.sh | Shell | apache-2.0 | 2,037 |
#!/bin/bash
# http://stackoverflow.com/questions/32597209/python-not-working-in-the-command-line-of-git-bash
# http://how-to.wikia.com/wiki/How_to_read_command_line_arguments_in_a_bash_script
winpty python git_update_all.py $1
| autodrive/utils3 | git_update_all.sh | Shell | apache-2.0 | 227 |
#!/bin/sh
# https://cloud.google.com/iot/docs/how-tos/credentials/keys
#
# Generate RSA256
openssl genrsa -out rsa_private.pem 2048
openssl rsa -in rsa_private.pem -pubout -out rsa_public.pem
| libopenstorage/openstorage | hack/generate-jwt-rsa-keys.sh | Shell | apache-2.0 | 193 |
#!/usr/bin/env bash
set -e
BUILD_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
source "${BUILD_DIR}/ci/common/common.sh"
source "${BUILD_DIR}/ci/common/neovim.sh"
COVERITY_BRANCH=${COVERITY_BRANCH:-master}
COVERITY_LOG_FILE="${BUILD_DIR}/build/neovim/cov-int/scm_log.txt"
# Check day of week to run Coverity only on Monday, Wednesday, Friday, and Saturday.
is_date_ok() {
local current_weekday=$(date -u +'%u')
if [[ ${current_weekday} == 2 || ${current_weekday} == 4 || ${current_weekday} == 7 ]]; then
echo "Today is $(date -u +'%A'), not triggering Coverity."
echo "Next Coverity build is scheduled for $(date -u -d 'tomorrow' +'%A')."
return 1
fi
}
trigger_coverity() {
require_environment_variable NEOVIM_DIR "${BASH_SOURCE[0]}" ${LINENO}
cd "${NEOVIM_DIR}"
wget -q -O - https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh |
TRAVIS_BRANCH="${NEOVIM_BRANCH}" \
COVERITY_SCAN_PROJECT_NAME="${NEOVIM_REPO}" \
COVERITY_SCAN_NOTIFICATION_EMAIL="coverity@aktau.be" \
COVERITY_SCAN_BRANCH_PATTERN="${COVERITY_BRANCH}" \
COVERITY_SCAN_BUILD_COMMAND_PREPEND="${MAKE_CMD} deps" \
COVERITY_SCAN_BUILD_COMMAND="${MAKE_CMD} nvim" \
bash
if [[ -f "${COVERITY_LOG_FILE}" ]]; then
echo "Contents of ${COVERITY_LOG_FILE}:"
cat "${COVERITY_LOG_FILE}"
fi
}
is_date_ok && {
clone_neovim
trigger_coverity
}
exit 0
| fwalch/bot-ci | ci/coverity.sh | Shell | apache-2.0 | 1,398 |
#!/bin/sh
set -o errexit -o nounset
# Book
mdbook build -d ../docs/ book
# API docs
cargo doc --no-deps
mv -T target/doc docs/api
| withoutboats/cargonauts | deploy_docs.sh | Shell | apache-2.0 | 132 |
#!/usr/bin/env bash
# Copyright 2012 Citrix Systems, Inc. Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. Citrix Systems, Inc.
# reserves all rights not expressly granted by the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Automatically generated by addcopyright.py at 04/03/2012
# $Id: ipassoc.sh 9804 2010-06-22 18:36:49Z alex $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/network/domr/ipassoc.sh $
# ipassoc.sh -- associate/disassociate a public ip with an instance
# @VERSION@
source /root/func.sh
lock="biglock"
locked=$(getLockFile $lock)
if [ "$locked" != "1" ]
then
exit 1
fi
usage() {
printf "Usage:\n %s -A -l <public-ip-address> -c <dev> [-f] \n" $(basename $0) >&2
printf " %s -D -l <public-ip-address> -c <dev> [-f] \n" $(basename $0) >&2
}
add_fw_chain_for_ip () {
local pubIp=$(echo $1 | awk -F'/' '{print $1}')
if sudo iptables -t mangle -N FIREWALL_$pubIp &> /dev/null
then
logger -t cloud "$(basename $0): created firewall chain for $pubIp"
sudo iptables -t mangle -A FIREWALL_$pubIp -j DROP> /dev/null
#ensure outgoing connections are maintained (first rule in chain)
sudo iptables -t mangle -I FIREWALL_$pubIp -m state --state RELATED,ESTABLISHED -j ACCEPT> /dev/null
#ensure that this table is after VPN chain
sudo iptables -t mangle -I PREROUTING 2 -d $pubIp -j FIREWALL_$pubIp
return $?
fi
logger -t cloud "$(basename $0): firewall chain for $pubIp already exists"
}
add_vpn_chain_for_ip () {
local pubIp=$(echo $1 | awk -F'/' '{print $1}')
if sudo iptables -t mangle -N VPN_$pubIp &> /dev/null
then
logger -t cloud "$(basename $0): created VPN chain for $pubIp"
#ensure outgoing connections are maintained (first rule in chain)
sudo iptables -t mangle -I VPN_$pubIp -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -t mangle -A VPN_$pubIp -j RETURN
#ensure that this table is the first
sudo iptables -t mangle -I PREROUTING 1 -d $pubIp -j VPN_$pubIp
return $?
fi
logger -t cloud "$(basename $0): VPN chain for $pubIp already exists"
}
del_fw_chain_for_ip () {
local pubIp=$(echo $1 | awk -F'/' '{print $1}')
if ! sudo iptables -t mangle -N FIREWALL_$pubIp &> /dev/null
then
logger -t cloud "$(basename $0): destroying firewall chain for $pubIp"
sudo iptables -t mangle -D PREROUTING -d $pubIp -j FIREWALL_$pubIp
sudo iptables -t mangle -F FIREWALL_$pubIp
sudo iptables -t mangle -X FIREWALL_$pubIp
return $?
fi
# firewall chain got created as a result of testing for the chain, cleanup
sudo iptables -t mangle -F FIREWALL_$pubIp
sudo iptables -t mangle -X FIREWALL_$pubIp
logger -t cloud "$(basename $0): firewall chain did not exist for $pubIp, cleaned up"
}
del_vpn_chain_for_ip () {
local pubIp=$(echo $1 | awk -F'/' '{print $1}')
if ! sudo iptables -t mangle -N VPN_$pubIp &> /dev/null
then
logger -t cloud "$(basename $0): destroying vpn chain for $pubIp"
sudo iptables -t mangle -D PREROUTING -d $pubIp -j VPN_$pubIp
sudo iptables -t mangle -F VPN_$pubIp
sudo iptables -t mangle -X VPN_$pubIp
return $?
fi
# vpn chain got created as a result of testing for the chain, cleanup
sudo iptables -t mangle -F VPN_$pubIp
sudo iptables -t mangle -X VPN_$pubIp
logger -t cloud "$(basename $0): vpn chain did not exist for $pubIp, cleaned up"
}
remove_routing() {
local pubIp=$1
logger -t cloud "$(basename $0):Remove routing $pubIp on interface $ethDev"
local ipNoMask=$(echo $pubIp | awk -F'/' '{print $1}')
local mask=$(echo $pubIp | awk -F'/' '{print $2}')
local tableNo=$(echo $ethDev | awk -F'eth' '{print $2}')
local tableName="Table_$ethDev"
local ethMask=$(ip route list scope link dev $ethDev | awk '{print $1}')
if [ "$ethMask" == "" ]
then
# rules and routes will be deleted for the last ip of the interface.
sudo ip rule delete fwmark $tableNo table $tableName
sudo ip rule delete table $tableName
sudo ip route flush table $tableName
sudo ip route flush cache
logger -t cloud "$(basename $0):Remove routing $pubIp - routes and rules deleted"
fi
}
# copy eth0,eth1 and the current public interface
copy_routes_from_main() {
local tableName=$1
#get the network masks from the main table
local eth0Mask=$(ip route list scope link dev eth0 | awk '{print $1}')
local eth1Mask=$(ip route list scope link dev eth1 | awk '{print $1}')
local ethMask=$(ip route list scope link dev $ethDev | awk '{print $1}')
# eth0,eth1 and other know routes will be skipped, so as main routing table will decide the route. This will be useful if the interface is down and up.
sudo ip route add throw $eth0Mask table $tableName proto static
sudo ip route add throw $eth1Mask table $tableName proto static
sudo ip route add throw $ethMask table $tableName proto static
return 0;
}
ip_addr_add() {
local dev="$1"
local ip="$2"
sudo ip addr add dev $dev $ip > /dev/null
}
add_routing() {
local pubIp=$1
logger -t cloud "$(basename $0):Add routing $pubIp on interface $ethDev"
local ipNoMask=$(echo $1 | awk -F'/' '{print $1}')
local mask=$(echo $1 | awk -F'/' '{print $2}')
local tableName="Table_$ethDev"
local tablePresent=$(grep $tableName /etc/iproute2/rt_tables)
local tableNo=$(echo $ethDev | awk -F'eth' '{print $2}')
if [ "$tablePresent" == "" ]
then
if [ "$tableNo" == ""]
then
return 0;
fi
sudo echo "$tableNo $tableName" >> /etc/iproute2/rt_tables
fi
copy_routes_from_main $tableName
# NOTE: this entry will be deleted if the interface is down without knowing to Management server, in that case all the outside traffic will be send through main routing table or it will be the first public NIC.
sudo ip route add default via $defaultGwIP table $tableName proto static
sudo ip route flush cache
local ethMask=$(ip route list scope link dev $ethDev | awk '{print $1}')
local rulePresent=$(ip rule show | grep $ethMask)
if [ "$rulePresent" == "" ]
then
# rules will be added while adding the first ip of the interface
sudo ip rule add from $ethMask table $tableName
sudo ip rule add fwmark $tableNo table $tableName
logger -t cloud "$(basename $0):Add routing $pubIp rules added"
fi
return 0;
}
add_snat() {
local pubIp=$1
local ipNoMask=$(echo $1 | awk -F'/' '{print $1}')
if [ "$sflag" == "0" ]
then
logger -t cloud "$(basename $0):Remove SourceNAT $pubIp on interface $ethDev if it is present"
sudo iptables -t nat -D POSTROUTING -j SNAT -o $ethDev --to-source $ipNoMask ;
return 0;
fi
logger -t cloud "$(basename $0):Added SourceNAT $pubIp on interface $ethDev"
sudo iptables -t nat -D POSTROUTING -j SNAT -o $ethDev --to-source $ipNoMask ;
sudo iptables -t nat -A POSTROUTING -j SNAT -o $ethDev --to-source $ipNoMask ;
return $?
}
remove_snat() {
if [ "$sflag" == "0" ]
then
return 0;
fi
local pubIp=$1
logger -t cloud "$(basename $0):Removing SourceNAT $pubIp on interface $ethDev"
sudo iptables -t nat -D POSTROUTING -j SNAT -o $ethDev --to-source $ipNoMask;
return $?
}
add_first_ip() {
local pubIp=$1
logger -t cloud "$(basename $0):Adding first ip $pubIp on interface $ethDev"
local ipNoMask=$(echo $1 | awk -F'/' '{print $1}')
local mask=$(echo $1 | awk -F'/' '{print $2}')
sudo ip link show $ethDev | grep "state DOWN" > /dev/null
local old_state=$?
ip_addr_add $ethDev $pubIp
sudo iptables -D FORWARD -i $ethDev -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -D FORWARD -i eth0 -o $ethDev -j ACCEPT
sudo iptables -A FORWARD -i $ethDev -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -A FORWARD -i eth0 -o $ethDev -j ACCEPT
add_snat $1
if [ $? -gt 0 -a $? -ne 2 ]
then
logger -t cloud "$(basename $0):Failed adding source nat entry for ip $pubIp on interface $ethDev"
return 1
fi
logger -t cloud "$(basename $0):Added first ip $pubIp on interface $ethDev"
if [ $if_keep_state -ne 1 -o $old_state -ne 0 ]
then
sudo ip link set $ethDev up
sudo arping -c 3 -I $ethDev -A -U -s $ipNoMask $ipNoMask;
fi
add_routing $1
return 0
}
remove_first_ip() {
local pubIp=$1
logger -t cloud "$(basename $0):Removing first ip $pubIp on interface $ethDev"
local ipNoMask=$(echo $1 | awk -F'/' '{print $1}')
local mask=$(echo $1 | awk -F'/' '{print $2}')
local existingIpMask=$(sudo ip addr show dev $ethDev | grep inet | awk '{print $2}' | grep -w $ipNoMask)
[ "$existingIpMask" == "" ] && return 0
[ "$mask" == "" ] && mask="32"
sudo iptables -D FORWARD -i $ethDev -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -D FORWARD -i eth0 -o $ethDev -j ACCEPT
remove_snat $1
sudo ip addr del dev $ethDev "$ipNoMask/$mask"
if [ $? -gt 0 -a $? -ne 2 ]
then
remove_routing $1
return 1
fi
remove_routing $1
return $?
}
add_an_ip () {
local pubIp=$1
logger -t cloud "$(basename $0):Adding ip $pubIp on interface $ethDev"
local ipNoMask=$(echo $1 | awk -F'/' '{print $1}')
sudo ip link show $ethDev | grep "state DOWN" > /dev/null
local old_state=$?
ip_addr_add $ethDev $pubIp
add_snat $1
if [ $if_keep_state -ne 1 -o $old_state -ne 0 ]
then
sudo ip link set $ethDev up
sudo arping -c 3 -I $ethDev -A -U -s $ipNoMask $ipNoMask;
fi
add_routing $1
return $?
}
remove_an_ip () {
local pubIp=$1
logger -t cloud "$(basename $0):Removing ip $pubIp on interface $ethDev"
remove_snat $1
local existingIpMask=$(sudo ip addr show dev $ethDev | grep "inet " | awk '{print $2}')
sudo ip addr del dev $ethDev $pubIp
for ipMask in $existingIpMask
do
if [ "$ipMask" == "$pubIp" ]
then
continue
fi
sudo ip addr add dev $ethDev $ipMask
done
remove_routing $1
return 0
}
#set -x
sflag=0
lflag=
fflag=
cflag=
op=""
is_master=0
is_redundant=0
if_keep_state=0
grep "redundant_router=1" /var/cache/cloud/cmdline > /dev/null
if [ $? -eq 0 ]
then
is_redundant=1
sudo /root/checkrouter.sh --no-lock|grep "Status: MASTER" > /dev/null 2>&1
if [ $? -eq 0 ]
then
is_master=1
fi
fi
if [ $is_redundant -eq 1 -a $is_master -ne 1 ]
then
if_keep_state=1
fi
while getopts 'sfADa:l:c:g:' OPTION
do
case $OPTION in
A) Aflag=1
op="-A"
;;
D) Dflag=1
op="-D"
;;
f) fflag=1
;;
s) sflag=1
;;
l) lflag=1
publicIp="$OPTARG"
;;
c) cflag=1
ethDev="$OPTARG"
;;
g) gflag=1
defaultGwIP="$OPTARG"
;;
?) usage
unlock_exit 2 $lock $locked
;;
esac
done
if [ "$Aflag$Dflag" != "1" ]
then
usage
unlock_exit 2 $lock $locked
fi
if [ "$lflag$cflag" != "11" ]
then
usage
unlock_exit 2 $lock $locked
fi
if [ "$fflag" == "1" ] && [ "$Aflag" == "1" ]
then
add_first_ip $publicIp &&
add_vpn_chain_for_ip $publicIp &&
add_fw_chain_for_ip $publicIp
unlock_exit $? $lock $locked
fi
if [ "$Aflag" == "1" ]
then
add_an_ip $publicIp &&
add_fw_chain_for_ip $publicIp
unlock_exit $? $lock $locked
fi
if [ "$fflag" == "1" ] && [ "$Dflag" == "1" ]
then
remove_first_ip $publicIp &&
del_fw_chain_for_ip $publicIp &&
del_vpn_chain_for_ip $publicIp
unlock_exit $? $lock $locked
fi
if [ "$Dflag" == "1" ]
then
remove_an_ip $publicIp &&
del_fw_chain_for_ip $publicIp
unlock_exit $? $lock $locked
fi
unlock_exit 0 $lock $locked
| argv0/cloudstack | patches/systemvm/debian/config/root/ipassoc.sh | Shell | apache-2.0 | 11,891 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sets up environment variables for an e2e test specified in JOB_NAME, then
# runs e2e-runner.sh.
set -o errexit
set -o nounset
set -o pipefail
# Join all args with |
# Example: join_regex_allow_empty a b "c d" e => a|b|c d|e
function join_regex_allow_empty() {
local IFS="|"
echo "$*"
}
# Join all args with |, butin case of empty result prints "EMPTY\sSET" instead.
# Example: join_regex_no_empty a b "c d" e => a|b|c d|e
# join_regex_no_empty => EMPTY\sSET
function join_regex_no_empty() {
local IFS="|"
if [ -z "$*" ]; then
echo "EMPTY\sSET"
else
echo "$*"
fi
}
# Properly configure globals for an upgrade step in a GKE or GCE upgrade suite
#
# These suites:
# step1: launch a cluster at $old_version,
# step2: runs $new_version Kubectl e2es,
# step3: upgrades the master to $new_version,
# step4: runs $old_version e2es,
# step5: upgrades the rest of the cluster,
# step6: runs $old_version e2es again, then
# step7: runs $new_version e2es and tears down the cluster.
#
# Assumes globals:
# $JOB_NAME
# $KUBERNETES_PROVIDER
# $GCE_DEFAULT_SKIP_TESTS
# $GCE_FLAKY_TESTS
# $GCE_SLOW_TESTS
#
# Args:
# $1 old_version: the version to deploy a cluster at, and old e2e tests to run
# against the upgraded cluster (should be something like
# 'release/latest', to work with JENKINS_PUBLISHED_VERSION logic)
# $2 new_version: the version to upgrade the cluster to, and new e2e tests to run
# against the upgraded cluster (should be something like
# 'ci/latest', to work with JENKINS_PUBLISHED_VERSION logic)
# $3 cluster_name: determines E2E_CLUSTER_NAME and E2E_NETWORK
# $4 project: determines PROJECT
function configure_upgrade_step() {
local -r old_version="$1"
local -r new_version="$2"
local -r cluster_name="$3"
local -r project="$4"
[[ "${JOB_NAME}" =~ .*-(step[1-7])-.* ]] || {
echo "JOB_NAME ${JOB_NAME} is not a valid upgrade job name, could not parse"
exit 1
}
local -r step="${BASH_REMATCH[1]}"
local -r gce_test_args="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"
local -r gke_test_args="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"
if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
KUBE_GCE_INSTANCE_PREFIX="$cluster_name"
NUM_NODES=5
KUBE_ENABLE_DEPLOYMENTS=true
KUBE_ENABLE_DAEMONSETS=true
fi
E2E_CLUSTER_NAME="$cluster_name"
E2E_NETWORK="$cluster_name"
PROJECT="$project"
case $step in
step1)
# Deploy at old version
JENKINS_PUBLISHED_VERSION="${old_version}"
E2E_UP="true"
E2E_TEST="false"
E2E_DOWN="false"
if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
E2E_SET_CLUSTER_API_VERSION=y
fi
;;
step2)
# Run new e2e kubectl tests
JENKINS_PUBLISHED_VERSION="${new_version}"
JENKINS_FORCE_GET_TARS=y
E2E_OPT="--check_version_skew=false"
E2E_UP="false"
E2E_TEST="true"
E2E_DOWN="false"
GINKGO_TEST_ARGS="--ginkgo.focus=Kubectl"
;;
step3)
# Use upgrade logic of version we're upgrading to.
JENKINS_PUBLISHED_VERSION="${new_version}"
JENKINS_FORCE_GET_TARS=y
E2E_OPT="--check_version_skew=false"
E2E_UP="false"
E2E_TEST="true"
E2E_DOWN="false"
GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-master --upgrade-target=${new_version}"
;;
step4)
# Run old e2es
JENKINS_PUBLISHED_VERSION="${old_version}"
JENKINS_FORCE_GET_TARS=y
E2E_OPT="--check_version_skew=false"
E2E_UP="false"
E2E_TEST="true"
E2E_DOWN="false"
if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
GINKGO_TEST_ARGS="${gke_test_args}"
else
GINKGO_TEST_ARGS="${gce_test_args}"
fi
;;
step5)
# Use upgrade logic of version we're upgrading to.
JENKINS_PUBLISHED_VERSION="${new_version}"
JENKINS_FORCE_GET_TARS=y
E2E_OPT="--check_version_skew=false"
E2E_UP="false"
E2E_TEST="true"
E2E_DOWN="false"
GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-cluster --upgrade-target=${new_version}"
;;
step6)
# Run old e2es
JENKINS_PUBLISHED_VERSION="${old_version}"
JENKINS_FORCE_GET_TARS=y
E2E_OPT="--check_version_skew=false"
E2E_UP="false"
E2E_TEST="true"
E2E_DOWN="false"
if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
GINKGO_TEST_ARGS="${gke_test_args}"
else
GINKGO_TEST_ARGS="${gce_test_args}"
fi
;;
step7)
# Run new e2es
JENKINS_PUBLISHED_VERSION="${new_version}"
JENKINS_FORCE_GET_TARS=y
# TODO(15011): these really shouldn't be (very) version skewed, but
# because we have to get ci/latest again, it could get slightly out of
# whack.
E2E_OPT="--check_version_skew=false"
E2E_UP="false"
E2E_TEST="true"
E2E_DOWN="true"
if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
GINKGO_TEST_ARGS="${gke_test_args}"
else
GINKGO_TEST_ARGS="${gce_test_args}"
fi
;;
esac
}
echo "--------------------------------------------------------------------------------"
echo "Initial Environment:"
printenv | sort
echo "--------------------------------------------------------------------------------"
if [[ "${CIRCLECI:-}" == "true" ]]; then
JOB_NAME="circleci-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}"
BUILD_NUMBER=${CIRCLE_BUILD_NUM}
WORKSPACE=`pwd`
else
# Jenkins?
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
fi
# Additional parameters that are passed to hack/e2e.go
E2E_OPT=${E2E_OPT:-""}
# Set environment variables shared for all of the GCE Jenkins projects.
if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then
KUBERNETES_PROVIDER="gce"
: ${E2E_MIN_STARTUP_PODS:="1"}
: ${E2E_ZONE:="us-central1-f"}
: ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel
elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then
KUBERNETES_PROVIDER="gke"
: ${E2E_ZONE:="us-central1-f"}
# By default, GKE tests run against the GKE test endpoint using CI Cloud SDK.
# Release jobs (e.g. prod, staging, and test) override these two variables.
: ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/staging"}
: ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"}
elif [[ ${JOB_NAME} =~ ^kubernetes-.*-aws ]]; then
KUBERNETES_PROVIDER="aws"
: ${E2E_MIN_STARTUP_PODS:="1"}
: ${E2E_ZONE:="us-east-1a"}
: ${NUM_NODES_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel
fi
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
if [[ "${PERFORMANCE:-}" == "true" ]]; then
: ${MASTER_SIZE:="m3.xlarge"}
: ${NUM_NODES:="100"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"}
else
: ${MASTER_SIZE:="m3.medium"}
: ${NODE_SIZE:="m3.medium"}
: ${NUM_NODES:="3"}
fi
fi
# CURRENT_RELEASE_PUBLISHED_VERSION is the JENKINS_PUBLISHED_VERSION for the
# release we are currently pointing our release testing infrastructure at.
# When 1.2.0-beta.0 comes out, e.g., this will become "ci/latest-1.2"
CURRENT_RELEASE_PUBLISHED_VERSION="ci/latest-1.1"
# Specialized tests which should be skipped by default for projects.
GCE_DEFAULT_SKIP_TESTS=(
"\[Skipped\]"
"\[Feature:.+\]"
)
# Tests which kills or restarts components and/or nodes.
DISRUPTIVE_TESTS=(
"\[Disruptive\]"
)
# The following tests are known to be flaky, and are thus run only in their own
# -flaky- build variants.
GCE_FLAKY_TESTS=(
"\[Flaky\]"
)
# The following tests are known to be slow running (> 2 min), and are
# thus run only in their own -slow- build variants. Note that tests
# can be slow by explicit design (e.g. some soak tests), or slow
# through poor implementation. Please indicate which applies in the
# comments below, and for poorly implemented tests, please quote the
# issue number tracking speed improvements.
GCE_SLOW_TESTS=(
"\[Slow\]"
)
# Tests which are not able to be run in parallel.
#
# TODO(ihmccreery) I'd like to get these combined with DISRUPTIVE_TESTS.
GCE_PARALLEL_SKIP_TESTS=(
"\[Serial\]"
"\[Disruptive\]"
)
# Define environment variables based on the Jenkins project name.
# NOTE: Not all jobs are defined here. The hack/jenkins/e2e.sh in master and
# release branches defines relevant jobs for that particular version of
# Kubernetes.
case ${JOB_NAME} in
# Runs all non-slow, non-serial, non-flaky, tests on GCE in parallel.
kubernetes-e2e-gce)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e"}
: ${E2E_PUBLISH_GREEN_VERSION:="true"}
: ${E2E_NETWORK:="e2e-gce"}
# TODO(ihmccreery) remove [Skipped] once tests are relabeled
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[Skipped\]"}
: ${GINKGO_PARALLEL:="y"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
: ${PROJECT:="k8s-jkns-e2e-gce"}
: ${ENABLE_DEPLOYMENTS:=true}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
;;
# Runs all non-flaky, non-slow tests on AWS, sequentially.
kubernetes-e2e-aws)
: ${E2E_PUBLISH_GREEN_VERSION:=true}
: ${E2E_CLUSTER_NAME:="jenkins-aws-e2e"}
: ${E2E_ZONE:="us-west-2a"}
: ${ZONE:="us-west-2a"}
: ${E2E_NETWORK:="e2e-aws"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-aws"}
: ${PROJECT:="k8s-jkns-e2e-aws"}
: ${ENABLE_DEPLOYMENTS:=true}
: ${AWS_CONFIG_FILE:='/var/lib/jenkins/.aws/credentials'}
: ${AWS_SSH_KEY:='/var/lib/jenkins/.ssh/kube_aws_rsa'}
: ${KUBE_SSH_USER:='ubuntu'}
# This is needed to be able to create PD from the e2e test
: ${AWS_SHARED_CREDENTIALS_FILE:='/var/lib/jenkins/.aws/credentials'}
;;
# Runs only the examples tests on GCE.
kubernetes-e2e-gce-examples)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-examples"}
: ${E2E_NETWORK:="e2e-examples"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Example\]"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-examples"}
: ${PROJECT:="kubernetes-jenkins"}
;;
# Runs only the autoscaling tests on GCE.
kubernetes-e2e-gce-autoscaling)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-autoscaling"}
: ${E2E_NETWORK:="e2e-autoscaling"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:ClusterSizeAutoscaling\]|\[Feature:InitialResources\] \
--ginkgo.skip=\[Flaky\]"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-autoscaling"}
: ${PROJECT:="k8s-jnks-e2e-gce-autoscaling"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
: ${ENABLE_DEPLOYMENTS:=true}
ADMISSION_CONTROL="NamespaceLifecycle,InitialResources,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
;;
# Runs the flaky tests on GCE, sequentially.
kubernetes-e2e-gce-flaky)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-flaky"}
: ${E2E_NETWORK:="e2e-flaky"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
) --ginkgo.focus=$(join_regex_no_empty \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flaky"}
: ${PROJECT:="k8s-jkns-e2e-gce-flaky"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
: ${E2E_DOWN:="true"}
;;
# Runs slow tests on GCE, sequentially.
kubernetes-e2e-gce-slow)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-slow"}
: ${E2E_NETWORK:="e2e-slow"}
# TODO(ihmccreery) remove [Skipped] once tetss are relabeled
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Slow\] \
--ginkgo.skip=\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[Skipped\]"}
: ${GINKGO_PARALLEL:="y"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-slow"}
: ${PROJECT:="k8s-jkns-e2e-gce-slow"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
;;
# Runs a subset of tests on GCE in parallel. Run against all pending PRs.
kubernetes-pull-build-test-e2e-gce)
: ${E2E_CLUSTER_NAME:="jnks-e2e-gce-${NODE_NAME}-${EXECUTOR_NUMBER}"}
: ${E2E_NETWORK:="e2e-gce-${NODE_NAME}-${EXECUTOR_NUMBER}"}
: ${GINKGO_PARALLEL:="y"}
# This list should match the list in kubernetes-e2e-gce-parallel.
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-gce-${NODE_NAME}-${EXECUTOR_NUMBER}"}
: ${PROJECT:="kubernetes-jenkins-pull"}
: ${ENABLE_DEPLOYMENTS:=true}
# Override GCE defaults
NUM_NODES=${NUM_NODES_PARALLEL}
;;
# Runs all non-flaky tests on AWS in parallel.
kubernetes-e2e-aws-parallel)
: ${E2E_CLUSTER_NAME:="jenkins-aws-e2e-parallel"}
: ${E2E_NETWORK:="e2e-parallel"}
: ${GINKGO_PARALLEL:="y"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${ENABLE_DEPLOYMENTS:=true}
# Override AWS defaults.
NUM_NODES=${NUM_NODES_PARALLEL}
;;
# Runs the flaky tests on GCE in parallel.
kubernetes-e2e-gce-parallel-flaky)
: ${E2E_CLUSTER_NAME:="parallel-flaky"}
: ${E2E_NETWORK:="e2e-parallel-flaky"}
: ${GINKGO_PARALLEL:="y"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
) --ginkgo.focus=$(join_regex_no_empty \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="parallel-flaky"}
: ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
# Override GCE defaults.
NUM_NODES=${NUM_NODES_PARALLEL}
;;
# Run the [Serial], [Disruptive], and [Feature:Restart] tests on GCE.
kubernetes-e2e-gce-serial)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-serial"}
: ${E2E_NETWORK:="jenkins-gce-e2e-serial"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Serial\]|\[Disruptive\] \
--ginkgo.skip=\[Flaky\]|\[Feature:.+\]"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-serial"}
: ${PROJECT:="kubernetes-jkns-e2e-gce-serial"}
;;
# Run the [Serial], [Disruptive], and [Feature:Restart] tests on GKE.
kubernetes-e2e-gke-serial)
: ${E2E_CLUSTER_NAME:="jenkins-gke-e2e-serial"}
: ${E2E_NETWORK:="jenkins-gke-e2e-serial"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Serial\]|\[Disruptive\] \
--ginkgo.skip=\[Flaky\]|\[Feature:.+\]"}
: ${PROJECT:="jenkins-gke-e2e-serial"}
;;
# Runs the performance/scalability tests on GCE. A larger cluster is used.
kubernetes-e2e-gce-scalability)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-scalability"}
: ${E2E_NETWORK:="e2e-scalability"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\] \
--gather-resource-usage=true \
--gather-metrics-at-teardown=true \
--gather-logs-sizes=true \
--output-print-type=json"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-scalability"}
: ${PROJECT:="kubernetes-jenkins"}
# Override GCE defaults.
MASTER_SIZE="n1-standard-4"
NODE_SIZE="n1-standard-2"
NODE_DISK_SIZE="50GB"
NUM_NODES="100"
# Reduce logs verbosity
TEST_CLUSTER_LOG_LEVEL="--v=2"
# TODO: Remove when we figure out the reason for ocassional failures #19048
KUBELET_TEST_LOG_LEVEL="--v=4"
# Increase resync period to simulate production
TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h"
;;
# Runs e2e on GCE with flannel and VXLAN.
kubernetes-e2e-gce-flannel)
: ${E2E_CLUSTER_NAME:="jenkins-gce-e2e-flannel"}
: ${E2E_PUBLISH_GREEN_VERSION:="true"}
: ${E2E_NETWORK:="e2e-gce-flannel"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flannel"}
: ${PROJECT:="kubernetes-flannel"}
# Override GCE defaults.
NETWORK_PROVIDER="flannel"
;;
# Runs the performance/scalability test on huge 1000-node cluster on GCE.
# Flannel is used as network provider.
kubernetes-e2e-gce-enormous-cluster)
: ${E2E_CLUSTER_NAME:="jenkins-gce-enormous-cluster"}
: ${E2E_NETWORK:="e2e-enormous-cluster"}
# TODO: Currently run only density test.
# Once this is stable, run the whole [Performance] suite.
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=starting\s30\spods\sper\snode"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-enormous-cluster"}
: ${PROJECT:="kubernetes-scale"}
# Override GCE defaults.
NETWORK_PROVIDER="flannel"
# Temporarily switch of Heapster, as this will not schedule anywhere.
# TODO: Think of a solution to enable it.
ENABLE_CLUSTER_MONITORING="none"
E2E_ZONE="asia-east1-a"
MASTER_SIZE="n1-standard-32"
NODE_SIZE="n1-standard-1"
NODE_DISK_SIZE="50GB"
NUM_NODES="1000"
# Reduce logs verbosity
TEST_CLUSTER_LOG_LEVEL="--v=1"
# Increase resync period to simulate production
TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h"
;;
# Sets up the GCE soak cluster weekly using the latest CI release.
kubernetes-soak-weekly-deploy-gce)
: ${E2E_CLUSTER_NAME:="gce-soak-weekly"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="gce-soak-weekly"}
: ${E2E_TEST:="false"}
: ${E2E_UP:="true"}
: ${KUBE_GCE_INSTANCE_PREFIX:="gce-soak-weekly"}
: ${PROJECT:="kubernetes-jenkins"}
;;
# Runs tests on GCE soak cluster.
kubernetes-soak-continuous-e2e-gce)
: ${E2E_CLUSTER_NAME:="gce-soak-weekly"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="gce-soak-weekly"}
: ${E2E_UP:="false"}
# Clear out any orphaned namespaces in case previous run was interrupted.
: ${E2E_CLEAN_START:="true"}
# We should be testing the reliability of a long-running cluster. The
# DISRUPTIVE_TESTS kill/restart components or nodes in the cluster,
# defeating the purpose of a soak cluster. (#15722)
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${DISRUPTIVE_TESTS[@]:+${DISRUPTIVE_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="gce-soak-weekly"}
: ${PROJECT:="kubernetes-jenkins"}
;;
# Runs all non-slow, non-serial, non-flaky, tests on GKE in parallel.
kubernetes-e2e-gke)
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-ci"}
: ${E2E_NETWORK:="e2e-gke-ci"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
# TODO(ihmccreery) remove [Skipped] once tests are relabeled
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[Skipped\]"}
: ${GINKGO_PARALLEL:="y"}
;;
kubernetes-e2e-gke-slow)
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-slow"}
: ${E2E_NETWORK:="e2e-gke-slow"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-slow"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
# TODO(ihmccreery) remove [Skipped] once tetss are relabeled
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Slow\] \
--ginkgo.skip=\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[Skipped\]"}
: ${GINKGO_PARALLEL:="y"}
;;
kubernetes-e2e-gke-flaky)
: ${E2E_CLUSTER_NAME:="kubernetes-gke-e2e-flaky"}
: ${E2E_NETWORK:="gke-e2e-flaky"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci-flaky"}
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=$(join_regex_no_empty \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
# Sets up the GKE soak cluster weekly using the latest CI release.
kubernetes-soak-weekly-deploy-gke)
: ${E2E_CLUSTER_NAME:="jenkins-gke-soak-weekly"}
: ${E2E_DOWN:="false"}
: ${E2E_NETWORK:="gke-soak-weekly"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${JENKINS_PUBLISHED_VERSION:="ci/latest"}
: ${E2E_TEST:="false"}
: ${E2E_UP:="true"}
: ${PROJECT:="kubernetes-jenkins"}
# Need at least n1-standard-2 nodes to run kubelet_perf tests
NODE_SIZE="n1-standard-2"
;;
# Runs tests on GKE soak cluster.
kubernetes-soak-continuous-e2e-gke)
: ${E2E_CLUSTER_NAME:="jenkins-gke-soak-weekly"}
: ${E2E_NETWORK:="gke-soak-weekly"}
: ${E2E_DOWN:="false"}
: ${E2E_UP:="false"}
# Clear out any orphaned namespaces in case previous run was interrupted.
: ${E2E_CLEAN_START:="true"}
: ${PROJECT:="kubernetes-jenkins"}
: ${E2E_OPT:="--check_version_skew=false"}
# We should be testing the reliability of a long-running cluster. The
# DISRUPTIVE_TESTS kill/restart components or nodes in the cluster,
# defeating the purpose of a soak cluster. (#15722)
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${DISRUPTIVE_TESTS[@]:+${DISRUPTIVE_TESTS[@]}} \
)"}
;;
# kubernetes-upgrade-gke-1.0-master
#
# Test upgrades from the latest release-1.0 build to the latest master build.
#
# Configurations for step1, step4, and step6 live in the release-1.0 branch.
kubernetes-upgrade-gke-1.0-master-step2-kubectl-e2e-new)
configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade'
;;
kubernetes-upgrade-gke-1.0-master-step3-upgrade-master)
configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade'
;;
kubernetes-upgrade-gke-1.0-master-step5-upgrade-cluster)
configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade'
;;
kubernetes-upgrade-gke-1.0-master-step7-e2e-new)
configure_upgrade_step 'configured-in-release-1.0' 'ci/latest' 'upgrade-gke-1-0-master' 'kubernetes-jenkins-gke-upgrade'
;;
# kubernetes-upgrade-gke-1.1-master
#
# Test upgrades from the latest release-1.1 build to the latest master build.
#
# Configurations for step1, step4, and step6 live in the release-1.1 branch.
kubernetes-upgrade-gke-1.1-master-step2-kubectl-e2e-new)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade'
;;
kubernetes-upgrade-gke-1.1-master-step3-upgrade-master)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade'
;;
kubernetes-upgrade-gke-1.1-master-step5-upgrade-cluster)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade'
;;
kubernetes-upgrade-gke-1.1-master-step7-e2e-new)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gke-1-1-master' 'kubernetes-jenkins-gke-upgrade'
;;
# kubernetes-upgrade-gce-1.1-master
#
# Test upgrades from the latest release-1.1 build to the latest master build.
#
# Configurations for step1, step4, and step6 live in the release-1.1 branch.
kubernetes-upgrade-gce-1.1-master-step2-kubectl-e2e-new)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade'
;;
kubernetes-upgrade-gce-1.1-master-step3-upgrade-master)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade'
;;
kubernetes-upgrade-gce-1.1-master-step5-upgrade-cluster)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade'
;;
kubernetes-upgrade-gce-1.1-master-step7-e2e-new)
configure_upgrade_step 'configured-in-release-1.1' 'ci/latest' 'upgrade-gce-1-1-master' 'k8s-jkns-gce-upgrade'
;;
# kubernetes-upgrade-gce-1.0-current-release
#
# This suite:
#
# 1. launches a cluster at ci/latest-1.0,
# 2. upgrades the master to CURRENT_RELEASE_PUBLISHED_VERSION
# 3. runs ci/latest-1.0 e2es,
# 4. upgrades the rest of the cluster,
# 5. runs ci/latest-1.0 e2es again, then
# 6. runs CURRENT_RELEASE_PUBLISHED_VERSION e2es and tears down the cluster.
kubernetes-upgrade-1.0-current-release-gce-step1-deploy)
: ${E2E_CLUSTER_NAME:="gce-upgrade-1-0"}
: ${E2E_NETWORK:="gce-upgrade-1-0"}
: ${JENKINS_PUBLISHED_VERSION:="ci/latest-1.0"}
: ${PROJECT:="k8s-jkns-gce-upgrade"}
: ${E2E_UP:="true"}
: ${E2E_TEST:="false"}
: ${E2E_DOWN:="false"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_NODES:=5}
;;
kubernetes-upgrade-1.0-current-release-gce-step2-upgrade-master)
: ${E2E_CLUSTER_NAME:="gce-upgrade-1-0"}
: ${E2E_NETWORK:="gce-upgrade-1-0"}
: ${E2E_OPT:="--check_version_skew=false"}
# Use upgrade logic of version we're upgrading to.
: ${JENKINS_PUBLISHED_VERSION:="${CURRENT_RELEASE_PUBLISHED_VERSION}"}
: ${JENKINS_FORCE_GET_TARS:=y}
: ${PROJECT:="k8s-jkns-gce-upgrade"}
: ${E2E_UP:="false"}
: ${E2E_TEST:="true"}
: ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_NODES:=5}
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
: ${KUBE_ENABLE_DAEMONSETS:=true}
;;
kubernetes-upgrade-1.0-current-release-gce-step3-e2e-old)
: ${E2E_CLUSTER_NAME:="gce-upgrade-1-0"}
: ${E2E_NETWORK:="gce-upgrade-1-0"}
: ${E2E_OPT:="--check_version_skew=false"}
: ${JENKINS_FORCE_GET_TARS:=y}
# Run old e2es
: ${JENKINS_PUBLISHED_VERSION:="ci/latest-1.0"}
: ${PROJECT:="k8s-jkns-gce-upgrade"}
: ${E2E_UP:="false"}
: ${E2E_TEST:="true"}
: ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_NODES:=5}
;;
kubernetes-upgrade-1.0-current-release-gce-step4-upgrade-cluster)
: ${E2E_CLUSTER_NAME:="gce-upgrade-1-0"}
: ${E2E_NETWORK:="gce-upgrade-1-0"}
: ${E2E_OPT:="--check_version_skew=false"}
# Use upgrade logic of version we're upgrading to.
: ${JENKINS_PUBLISHED_VERSION:="${CURRENT_RELEASE_PUBLISHED_VERSION}"}
: ${JENKINS_FORCE_GET_TARS:=y}
: ${PROJECT:="k8s-jkns-gce-upgrade"}
: ${E2E_UP:="false"}
: ${E2E_TEST:="true"}
: ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Feature:Upgrade\].*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_NODES:=5}
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
: ${KUBE_ENABLE_DAEMONSETS:=true}
;;
kubernetes-upgrade-1.0-current-release-gce-step5-e2e-old)
: ${E2E_CLUSTER_NAME:="gce-upgrade-1-0"}
: ${E2E_NETWORK:="gce-upgrade-1-0"}
: ${E2E_OPT:="--check_version_skew=false"}
: ${JENKINS_FORCE_GET_TARS:=y}
# Run old e2es
: ${JENKINS_PUBLISHED_VERSION:="ci/latest-1.0"}
: ${PROJECT:="k8s-jkns-gce-upgrade"}
: ${E2E_UP:="false"}
: ${E2E_TEST:="true"}
: ${E2E_DOWN:="false"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_NODES:=5}
;;
kubernetes-upgrade-1.0-current-release-gce-step6-e2e-new)
: ${E2E_CLUSTER_NAME:="gce-upgrade-1-0"}
: ${E2E_NETWORK:="gce-upgrade-1-0"}
# TODO(15011): these really shouldn't be (very) version skewed, but because
# we have to get CURRENT_RELEASE_PUBLISHED_VERSION again, it could get slightly out of whack.
: ${E2E_OPT:="--check_version_skew=false"}
: ${JENKINS_FORCE_GET_TARS:=y}
: ${JENKINS_PUBLISHED_VERSION:="${CURRENT_RELEASE_PUBLISHED_VERSION}"}
: ${PROJECT:="k8s-jkns-gce-upgrade"}
: ${E2E_UP:="false"}
: ${E2E_TEST:="true"}
: ${E2E_DOWN:="true"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
: ${NUM_NODES:=5}
;;
# Run Kubemark test on a fake 100 node cluster to have a comparison
# to the real results from scalability suite
kubernetes-kubemark-gce)
: ${E2E_CLUSTER_NAME:="kubernetes-kubemark"}
: ${E2E_NETWORK:="kubernetes-kubemark"}
: ${PROJECT:="k8s-jenkins-kubemark"}
: ${E2E_UP:="true"}
: ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
KUBE_GCE_INSTANCE_PREFIX="kubemark100"
NUM_NODES="10"
MASTER_SIZE="n1-standard-2"
NODE_SIZE="n1-standard-1"
E2E_ZONE="asia-east1-a"
KUBEMARK_MASTER_SIZE="n1-standard-4"
KUBEMARK_NUM_NODES="100"
;;
# Run Kubemark test on a fake 500 node cluster to test for regressions on
# bigger clusters
kubernetes-kubemark-500-gce)
: ${E2E_CLUSTER_NAME:="kubernetes-kubemark-500"}
: ${E2E_NETWORK:="kubernetes-kubemark-500"}
: ${PROJECT:="kubernetes-scale"}
: ${E2E_UP:="true"}
: ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
NUM_NODES="6"
MASTER_SIZE="n1-standard-4"
NODE_SIZE="n1-standard-8"
KUBE_GCE_INSTANCE_PREFIX="kubemark500"
E2E_ZONE="us-east1-b"
KUBEMARK_MASTER_SIZE="n1-standard-16"
KUBEMARK_NUM_NODES="500"
;;
# Run big Kubemark test, this currently means a 1000 node cluster and 16 core master
kubernetes-kubemark-gce-scale)
: ${E2E_CLUSTER_NAME:="kubernetes-kubemark-scale"}
: ${E2E_NETWORK:="kubernetes-kubemark-scale"}
: ${PROJECT:="kubernetes-scale"}
: ${E2E_UP:="true"}
: ${E2E_DOWN:="true"}
: ${E2E_TEST:="false"}
: ${USE_KUBEMARK:="true"}
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
# We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way.
NUM_NODES="11"
MASTER_SIZE="n1-standard-4"
NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core
# so NUM_NODES x cores_per_node should
# be set accordingly.
KUBE_GCE_INSTANCE_PREFIX="kubemark1000"
E2E_ZONE="us-east1-b"
KUBEMARK_MASTER_SIZE="n1-standard-16"
KUBEMARK_NUM_NODES="1000"
;;
esac
# Skip gcloud update checking
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true
# AWS variables
export KUBE_AWS_INSTANCE_PREFIX=${E2E_CLUSTER_NAME}
export KUBE_AWS_ZONE=${E2E_ZONE}
export AWS_CONFIG_FILE=${AWS_CONFIG_FILE:-}
export AWS_SSH_KEY=${AWS_SSH_KEY:-}
export KUBE_SSH_USER=${KUBE_SSH_USER:-}
export AWS_SHARED_CREDENTIALS_FILE=${AWS_SHARED_CREDENTIALS_FILE:-}
# GCE variables
export INSTANCE_PREFIX=${E2E_CLUSTER_NAME}
export KUBE_GCE_ZONE=${E2E_ZONE}
export KUBE_GCE_NETWORK=${E2E_NETWORK}
export KUBE_GCE_INSTANCE_PREFIX=${KUBE_GCE_INSTANCE_PREFIX:-}
export KUBE_GCE_NODE_PROJECT=${KUBE_GCE_NODE_PROJECT:-}
export KUBE_GCE_NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-}
export KUBE_OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-}
export GCE_SERVICE_ACCOUNT=$(gcloud auth list 2> /dev/null | grep active | cut -f3 -d' ')
export FAIL_ON_GCP_RESOURCE_LEAK="${FAIL_ON_GCP_RESOURCE_LEAK:-false}"
# GKE variables
export CLUSTER_NAME=${E2E_CLUSTER_NAME}
export ZONE=${E2E_ZONE}
export KUBE_GKE_NETWORK=${E2E_NETWORK}
export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-}
export CMD_GROUP=${CMD_GROUP:-}
export MACHINE_TYPE=${NODE_SIZE:-} # GKE scripts use MACHINE_TYPE for the node vm size
export CLOUDSDK_BUCKET="${CLOUDSDK_BUCKET:-}"
if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then
export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT}
fi
# Shared cluster variables
export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-}
export KUBE_ENABLE_CLUSTER_MONITORING=${ENABLE_CLUSTER_MONITORING:-}
export KUBE_ENABLE_CLUSTER_REGISTRY=${ENABLE_CLUSTER_REGISTRY:-}
export KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER=${ENABLE_HORIZONTAL_POD_AUTOSCALER:-}
export KUBE_ENABLE_DEPLOYMENTS=${ENABLE_DEPLOYMENTS:-}
export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-}
export MASTER_SIZE=${MASTER_SIZE:-}
export NODE_SIZE=${NODE_SIZE:-}
export NODE_DISK_SIZE=${NODE_DISK_SIZE:-}
export NUM_NODES=${NUM_NODES:-}
export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-}
export KUBELET_TEST_LOG_LEVEL=${KUBELET_TEST_LOG_LEVEL:-}
export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-}
export PROJECT=${PROJECT:-}
export NETWORK_PROVIDER=${NETWORK_PROVIDER:-}
export JENKINS_PUBLISHED_VERSION=${JENKINS_PUBLISHED_VERSION:-'ci/latest'}
export KUBE_ADMISSION_CONTROL=${ADMISSION_CONTROL:-}
export KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER}
export PATH=${PATH}:/usr/local/go/bin
export KUBE_SKIP_UPDATE=y
export KUBE_SKIP_CONFIRMATIONS=y
# Kubemark
export USE_KUBEMARK="${USE_KUBEMARK:-false}"
export KUBEMARK_MASTER_SIZE="${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}"
export KUBEMARK_NUM_NODES="${KUBEMARK_NUM_NODES:-$NUM_NODES}"
# E2E Control Variables
export E2E_OPT="${E2E_OPT:-}"
export E2E_UP="${E2E_UP:-true}"
export E2E_TEST="${E2E_TEST:-true}"
export E2E_DOWN="${E2E_DOWN:-true}"
export E2E_CLEAN_START="${E2E_CLEAN_START:-}"
export E2E_PUBLISH_GREEN_VERSION="${E2E_PUBLISH_GREEN_VERSION:-false}"
# Used by hack/ginkgo-e2e.sh to enable ginkgo's parallel test runner.
export GINKGO_PARALLEL=${GINKGO_PARALLEL:-}
export GINKGO_PARALLEL_NODES=${GINKGO_PARALLEL_NODES:-}
export GINKGO_TEST_ARGS="${GINKGO_TEST_ARGS:-}"
# If we are on PR Jenkins merging into master, use the local e2e.sh. Otherwise, use the latest on github.
if [[ "${ghprbTargetBranch:-}" == "master" ]]; then
source "hack/jenkins/e2e-runner.sh"
else
source <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh")
fi
| BugRoger/kubernetes | hack/jenkins/e2e.sh | Shell | apache-2.0 | 36,599 |
#!/bin/bash
# Copyright 2015-2016 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage
if [ "$#" -lt 3 ]
then
echo "$0: Benchmark memory usage of IoT.js"
echo ""
echo "Usage: $0 [-d] IOTJS IOTJS_MEMSTATS BENCHMARK..."
echo ""
echo "Positional arguments:"
echo " IOTJS path to IoT.js engine built without memory"
echo " statistics support"
echo " IOTJS_MEMSTATS path to IoT.js engine built with memory statistics"
echo " support"
echo " BENCHMARK... paths to JavaScript programs to be used as the"
echo " benchmark suite"
echo ""
echo "Optional arguments:"
echo " -d generate semicolon-delimited output (default:"
echo " formatted human-readable output)"
echo ""
echo "The tool benchmarks the memory usage of IoT.js with the help of two"
echo "different builds and a suite of JavaScript programs. Each benchmark"
echo "script is executed by both builds: the \"memstats\" build reports"
echo "statistics retrieved from JerryScript, while the \"normal\" build"
echo "reports RSS results."
exit 1
fi
# Choosing table or semicolon-separated output mode
if [ "$1" == "-d" ]
then
TABLE="no"
PRINT_TEST_NAME_AWK_SCRIPT='{printf "%s;", $1}'
PRINT_TOTAL_AWK_SCRIPT='{printf "%d;%d\n", $1, $2 * 1024}'
shift
else
PRINT_TEST_NAME_AWK_SCRIPT='{printf "%30s", $1}'
PRINT_TOTAL_AWK_SCRIPT='{printf "%25d%25d\n", $1, $2 * 1024}'
TABLE="yes"
fi
function fail_msg
{
echo "$1"
exit 1
}
# Engine
# Check if the specified build supports memory statistics options
function is_mem_stats_build
{
[ -x "$1" ] || fail_msg "Engine '$1' is not executable"
tmpfile=`mktemp`
"$1" $tmpfile --memstat 2>&1 | \
grep -- "Ignoring memory statistics option" 2>&1 > /dev/null
code=$?
rm $tmpfile
return $code
}
IOTJS=$(readlink -f "$1")
shift
is_mem_stats_build "$IOTJS" || fail_msg \
"First engine specified should be built without memory statistics support"
IOTJS_MEM_STATS=$(readlink -f "$1")
shift
is_mem_stats_build "$IOTJS_MEM_STATS" && fail_msg \
"Second engine specified should be built with memory statistics support"
# Benchmarks list
BENCHMARKS=""
while [ $# -ne 0 ]
do
BENCHMARKS="$BENCHMARKS $1"
shift
done
# Running
if [ "$TABLE" == "yes" ]
then
awk 'BEGIN {printf "%30s%25s%25s\n", "Test name", "Peak Heap (jerry)", \
"Maximum RSS"}'
echo
fi
STARTDIR=$(pwd)
for bench in $BENCHMARKS
do
bench_name=$(basename -s '.js' $bench)
bench_canon=$(readlink -f $bench)
cd `dirname $bench_canon`
echo "$bench_name" | awk "$PRINT_TEST_NAME_AWK_SCRIPT"
MEM_STATS=$("$IOTJS_MEM_STATS" $bench_canon --memstat | \
grep -e "Peak allocated =" | grep -o "[0-9]*")
RSS=$($STARTDIR/deps/jerry/tools/rss-measure.sh "$IOTJS" $bench_canon | \
tail -n 1 | grep -o "[0-9]*")
echo $MEM_STATS $RSS | xargs | awk "$PRINT_TOTAL_AWK_SCRIPT"
cd $STARTDIR
done
| lemmaa/iotjs | tools/mem_stats.sh | Shell | apache-2.0 | 3,488 |
#!/bin/bash
# 1: REV
function validateRev() {
validateNumber "SvnRev" $1
}
function validateRevRange() {
local RANGE=$1
if ! echo $RANGE | grep -E '^[0-9]+(:[0-9]+)?$' >/dev/null; then
echo "Illegal Revision Range: $RANGE"
exit 1
fi
}
# 1: CMD
function validateCmd() {
local CMD=$1
if ! echo $CMD | grep -E "(export|checkout)" >/dev/null; then
echo "Illegal Svn Command: $CMD"
exit 1
fi
}
function getCommitter() {
local SRCDIR=$1
local REV=$2
svn log -q -r $REV $SRCDIR \
| grep '^r.*|' \
| cut -d '|' -f2 \
| sed -e 's/^ *//g' -e 's/ *$//g'
}
function getLog() {
local SRCDIR=$1
local REV1=$2
local REV2=$3
if [ -z "$REV2" ]; then
REVRANGE="${REV1}"
else
REVRANGE="${REV1}:${REV2}"
fi
svn log -r $REVRANGE $SRCDIR
}
function exportURL() {
local REV=$1
local URL=$2
local DIR=$3
dirMustNotExist $DIR
getBranchUrl $BRANCH
$SSHCMD svn $VCSEXPORTCMD -q $VCSBASE/$BRANCHURL/$URL@$REV $DIR
} | drptbl/dash | scripts/shared/vcs/svn.sh | Shell | apache-2.0 | 971 |
arm-none-eabi-objcopy -O binary ../../../output/da15000/bin/arm-none-eabi-ot-cli-ftd ../../../output/da15000/bin/arm-none-eabi-ot-cli-ftd.bin
| jjlee9/openthread | third_party/dialog/DialogTools/imgprep.sh | Shell | bsd-3-clause | 145 |
#!/bin/bash
mkdir -p /sbin
for i in sbin/dhcpcanon-script; do install "$i" /sbin; done
mkdir -p /share/doc/dhcpcanon
for i in README.md LICENSE; do install -m 644 "$i" /share/doc/dhcpcanon; done
mkdir -p /share/man/man8
for i in man/dhcpcanon.8; do install -m 644 "$i" /share/man/man8; done
python3 setup.py install --record installed.txt --install-scripts=/sbin
adduser --system dhcpcanon
mkdir -p /lib/systemd/system
cp systemd/dhcpcanon.service /lib/systemd/system/dhcpcanon.service
mkdir -p /lib/tmpfiles.d
for i in tmpfiles.d/dhcpcanon.conf; do install -m 644 "$i" /lib/tmpfiles.d; done
systemctl enable /lib/systemd/system/dhcpcanon.service
systemd-tmpfiles --create --root=/lib/tmpfiles.d/dhcpcanon.conf
mkdir -p /lib/systemd/network
for i in systemd/network/90-dhcpcanon.link; do install -m 644 "$i" /lib/systemd/network; done
mkdir -p /etc/apparmor.d
for i in apparmor.d/sbin.dhcpcanon; do install -m 644 "$i" /etc/apparmor.d; done
for i in apparmor.d/sbin.dhcpcanon; do aa-complain /etc/apparmor.d/"$i"; done
| juxor/dhcpcanon_debian | install.sh | Shell | mit | 1,022 |
#!/bin/bash
if [ $# -eq 0 ];
then
echo "Usage: $0 \"cores\" num_repetitions value_to_keep \"executable1 excutable2 ...\" [params]";
echo " where \"cores\" can be the actual thread num to use, such as \"1 10 20\", or"
echo " one of the predefined specifications for that platform (e.g., socket -- see "
echo " scripts/config)";
echo " and value_to_keep can be the min, max, or median";
exit;
fi;
cores=$1;
shift;
reps=$1;
shift;
source scripts/lock_exec;
source scripts/config;
source scripts/help;
result_type=$1;
if [ "$result_type" = "max" ];
then
run_script="./scripts/run_rep_max.sh $reps";
echo "# Result from $reps repetitions: max";
shift;
elif [ "$result_type" = "min" ];
then
run_script="./scripts/run_rep_min.sh $reps";
echo "# Result from $reps repetitions: min";
shift;
elif [ "$result_type" = "median" ];
then
run_script="./scripts/run_rep_med.sh $reps";
echo "# Result from $reps repetitions: median";
shift;
else
run_script="./scripts/run_rep_max.sh $reps";
echo "# Result from $reps repetitions: max (default). Available: min, max, median";
fi;
progs="$1";
shift;
progs_num=$(echo $progs | wc -w);
params="$@";
progs_stripped=$(echo $progs | sed -e 's/bin//g' -e 's/[\.\/]//g');
print_n "# " "%-12s " "$progs_stripped" "\n"
print_rep "#cores " $progs_num "throughput " "\n"
printf "%-8d" 1;
thr1="";
for p in $progs;
do
thr=$($run_script ./$p $params -n1);
thr1="$thr1 $thr";
printf "%-12d " $thr;
# printf "%-8.2f" 100.00;
# printf "%-12d" 1;
done;
echo "";
for c in $cores
do
if [ $c -eq 1 ]
then
continue;
fi;
printf "%-8d" $c;
i=0;
for p in $progs;
do
i=$(($i+1));
thr1p=$(get_n $i "$thr1");
thr=$($run_script ./$p $params -n$c);
printf "%-12d " $thr;
# scl=$(echo "$thr/$thr1p" | bc -l);
# linear_p=$(echo "100*(1-(($c-$scl)/$c))" | bc -l);
# printf "%-8.2f" $linear_p;
# printf "%-12.2f" $scl;
done;
echo "";
done;
source scripts/unlock_exec;
| egeyar/ASCYLIB | scripts/scalability_rep_simple.sh | Shell | gpl-2.0 | 2,036 |
#///////////////////////////////////////////////////////////////////////////////
# This file was originally written by Michael J. Bojazi and Bradley S. Meyer.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
#///////////////////////////////////////////////////////////////////////////////
#///////////////////////////////////////////////////////////////////////////////
#//!
#//! \file net_view.sh
#//! \brief A short shell script to generate network view graphs.
#//!
#///////////////////////////////////////////////////////////////////////////////
#/bin/bash
#///////////////////////////////////////////////////////////////////////////////
# Check input.
#///////////////////////////////////////////////////////////////////////////////
if [ ! $# -eq 5 -a $1 == "--example" ]
then
echo -e "\n$0 ../network/my_output.xml \"\" \"\" \"[z >= 20 and z <= 30 and a - z >= 20 and a - z <= 30]\" my_network_view.pdf\n"
exit
fi
if [ ! $# -eq 5 ]
then
echo -e "\nUsage: $0 input_xml nuc_xpath reac_xpath induced_nuc_xpath out_pdf\n"
echo -e " input_xml = network xml file\n"
echo -e " nuc_xpath = xpath to select nuclides for valid reactions\n"
echo -e " reac_xpath = xpath to select reactions\n"
echo -e " induced_nuc_xpath = xpath to induce a subgraph\n"
echo -e " out_pdf = output pdf file\n"
echo -e "For an example, type: $0 --example\n"
exit
fi
if $( !( echo $5 | grep --quiet '.pdf' ) )
then
echo
echo "name of output pdf file must contain a single instance .pdf"
echo
exit
fi
full_string=$5
base_string=`echo ${full_string%*.pdf*}`
if [ $full_string != $base_string.pdf ]
then
echo
echo "output pdf file name needs to end with .pdf"
echo
exit
fi
#///////////////////////////////////////////////////////////////////////////////
# Process files.
#///////////////////////////////////////////////////////////////////////////////
./net_view_graph "$1" "$2" "$3" "$4" ${base_string}.dot
neato -n -Txdot ${base_string}.dot | dot2tex -tmath --crop > ${base_string}.tex
pdflatex ${base_string}
rm -rf ${base_string}.log
rm -rf ${base_string}.aux
#//////////////////////////////////////////////////////////////////////////////
# Remove or comment out these lines if you want to retain the individual dot
# and/or tex files.
#///////////////////////////////////////////////////////////////////////////////
rm -rf ${base_string}.tex
rm -rf ${base_string}.dot
| shenyp09/nucnet-tools-code | my_examples/graph/net_view.sh | Shell | gpl-3.0 | 3,094 |
rsync -rvu --perms \
"/cygdrive/D/User/Dropbox/Applications/SoftwareVersioning/MultiModServer/plugins/" \
"/cygdrive/F/SteamCMD/steamapps/common/Half-Life/czero/"
| evandrocoan/Galileo | installers/install_multimod_plugins.sh | Shell | gpl-3.0 | 170 |
#!/bin/sh
mkdir -p build/aux || exit 1
mkdir -p build/m4 || exit 1
autoreconf -vfi "$@" || exit 1
| scengine/core | autogen.sh | Shell | gpl-3.0 | 113 |
#!/bin/bash
set -ex
###########################################################
# UTILS
###########################################################
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install --no-install-recommends -y tzdata ca-certificates net-tools libxml2-utils git curl libudev1 libxml2-utils iptables iproute2 jq unzip
ln -fs /usr/share/zoneinfo/UTC /etc/localtime
dpkg-reconfigure --frontend noninteractive tzdata
rm -rf /var/lib/apt/lists/*
curl https://raw.githubusercontent.com/spring-io/concourse-java-scripts/v0.0.4/concourse-java.sh > /opt/concourse-java.sh
curl --output /opt/concourse-release-scripts.jar https://repo.spring.io/release/io/spring/concourse/releasescripts/concourse-release-scripts/0.3.2/concourse-release-scripts-0.3.2.jar
###########################################################
# JAVA
###########################################################
JDK_URL=$( ./get-jdk-url.sh $1 )
mkdir -p /opt/openjdk
cd /opt/openjdk
curl -L ${JDK_URL} | tar zx --strip-components=1
test -f /opt/openjdk/bin/java
test -f /opt/openjdk/bin/javac | spring-io/initializr | ci/images/setup.sh | Shell | apache-2.0 | 1,092 |
#!/usr/bin/env bash
# **install_prereqs.sh**
# Install system package prerequisites
#
# install_prereqs.sh [-f]
#
# -f Force an install run now
FORCE_PREREQ=0
while getopts ":f" opt; do
case $opt in
f)
FORCE_PREREQ=1
;;
esac
done
# If TOP_DIR is set we're being sourced rather than running stand-alone
# or in a sub-shell
if [[ -z "$TOP_DIR" ]]; then
# Keep track of the devstack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
# Import common functions
source $TOP_DIR/functions
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
# and ``DISTRO``
GetDistro
# Needed to get ``ENABLED_SERVICES``
source $TOP_DIR/stackrc
# Prereq dirs are here
FILES=$TOP_DIR/files
fi
# Minimum wait time
PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs}
PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2}
PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS))
NOW=$(date "+%s")
LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0")
DELTA=$(($NOW - $LAST_RUN))
if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then
echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining) "
echo "and FORCE_PREREQ not set; exiting..."
return 0
fi
# Make sure the proxy config is visible to sub-processes
export_proxy_variables
# Install Packages
# ================
# Install package requirements
PACKAGES=$(get_packages general $ENABLED_SERVICES)
if is_ubuntu && echo $PACKAGES | grep -q dkms ; then
# ensure headers for the running kernel are installed for any DKMS builds
PACKAGES="$PACKAGES linux-headers-$(uname -r)"
fi
install_package $PACKAGES
if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then
if is_ubuntu || is_fedora; then
install_package rsyslog-relp
elif is_suse; then
install_package rsyslog-module-relp
else
exit_distro_not_supported "rsyslog-relp installation"
fi
fi
# Mark end of run
# ---------------
date "+%s" >$PREREQ_RERUN_MARKER
date >>$PREREQ_RERUN_MARKER
| sc68cal/devstack | tools/install_prereqs.sh | Shell | apache-2.0 | 2,170 |
#!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "$0 must be run as root"
exit 1
fi
RELEASE=`grep -1 -A 0 -B 0 '<version>' /vagrant/pom.xml | head -n 1 | awk '{print $1}' | sed -e 's/.*<version>//' | sed -e 's/<\/version>.*//'`
echo "`date` - Starting nimbus!!" > start-nimbus.log
cd /vagrant/_release/storm-mesos-${RELEASE}-*
# kill existing MesosNimbus and storm UI processes
kill `ps aux | grep MesosNimbu[s] | awk '{print $2}'` &> /dev/null || /bin/true
# Grr... the below *was* working, but now the jar paths are too long with the long package version name.
kill `ps aux | grep org.apache.storm.ui.cor[e] | awk '{print $2}'` &> /dev/null || /bin/true
# So using this more aggressive form now.
kill `ps aux | grep stor[m] | grep -v grep | awk '{print $2}'` &> /dev/null || /bin/true
# Start storm nimbus, which also acts as the mesos scheduler in this case.
# Point the STORM_CONF_DIR to where the repo's storm.yaml lives, so we can modify it
# without having to rebuild the framework tarball and fully reprovision.
MESOS_NATIVE_JAVA_LIBRARY=/usr/lib/libmesos.so STORM_CONF_DIR=/vagrant bin/storm-mesos nimbus &
# Start storm UI
bin/storm ui &
| changreytang/storm-mesos | vagrant/start-nimbus.sh | Shell | apache-2.0 | 1,156 |
#!/bin/bash
set -e
# Find the directory we exist within
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd ${DIR}/../../build
cat << EOF
# Tools
Metrictank comes with a bunch of helper tools.
Here is an overview of them all.
This file is generated by [tools-to-doc](https://github.com/grafana/metrictank/blob/master/scripts/dev/tools-to-doc.sh)
---
EOF
for tool in mt-*; do
echo
echo "## $tool"
echo
echo '```'
./$tool -h 2>&1 | sed 's#run at most n tests in parallel (default .*)#run at most n tests in parallel (default num-processors)#'
echo '```'
echo
done
| raintank/raintank-metric | scripts/dev/tools-to-doc.sh | Shell | apache-2.0 | 582 |
#!/usr/bin/env sh
rootfolder=/nfs/hn46/xiaolonw/cnncode/caffe-3dnormal_r_n
GLOG_logtostderr=1 $rootfolder/build/examples/3dnormal/convert_normal_win_high_test.bin /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window_edge_high_test/images/ /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window_edge_high_test/locs/testLoc1.txt /home/xiaolonw/3ddata/3dnormal_win_cls_high/leveldb_fusetest/3d_localedge_test_db1 0 0 55 55 /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window_edge_high_test/reg_coarse_tri/ /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window_edge_high_test/reg_localedge_tri/
| xiaolonw/caffe-3dnormal_joint_pose | scripts/3dnormal_win_fuse_tri_2fc/testnet_locedge/convert_3dnormal_win_tri_test.sh | Shell | bsd-2-clause | 624 |
#!/usr/bin/env sh
# test_net_seg.bin test_proto pre_train_model label.txt outputfolder [CPU/GPU]
ROOTFILE=/nfs/hn46/xiaolonw/cnncode/caffe-3dnormal_r_n
GLOG_logtostderr=1 /nfs/hn46/xiaolonw/cnncode/caffe-3dnormal_r_n/build/tools/test_net_3dnormal_win.bin /nfs/hn46/xiaolonw/cnncode/caffe-3dnormal_r_n/prototxt/3dnormal_win_cls_denoise_fc2/testontrain/seg_test_2fc_3dnormal2.prototxt /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window/models/3dnormal_win_cls_denoise_fc2/3dnormal__iter_280000 /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window/testLabels_ontrain/trainLabels2.txt /home/xiaolonw/3ddata/3dnormal_win_cls/train2
| xiaolonw/caffe-3dnormal_joint_past | scripts/3dnormal_win_cls_denoise_fc2/testontrain/test_3dnet2.sh | Shell | bsd-2-clause | 749 |
#!/bin/bash
FN="pd.mogene.2.0.st_3.14.1.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/pd.mogene.2.0.st_3.14.1.tar.gz"
"https://bioarchive.galaxyproject.org/pd.mogene.2.0.st_3.14.1.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.mogene.2.0.st/bioconductor-pd.mogene.2.0.st_3.14.1_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.mogene.2.0.st/bioconductor-pd.mogene.2.0.st_3.14.1_src_all.tar.gz"
)
MD5="71326d6bd85de02490171696f923d053"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
| Luobiny/bioconda-recipes | recipes/bioconductor-pd.mogene.2.0.st/post-link.sh | Shell | mit | 1,459 |
#!/bin/sh
cmake -DCHECK=off -DARITH=gmp -DBN_PRECI=4096 -DALLOC=DYNAMIC -DCFLAGS="-O3 -march=native -mtune=native -fomit-frame-pointer" -DWITH="DV;BN;MD;CP" -DSHLIB=off $1
| PIVX-Project/PIVX | src/chiabls/contrib/relic/preset/gmp-paillier-4096.sh | Shell | mit | 173 |
SWITCH=$1
TABLE=$2
watch -n 1 -d "flowcount.sh $1 $2"
| TheOranges/gbp-devstack | devstack-scripts/pollflows.sh | Shell | epl-1.0 | 54 |
cd /opt/rur-ple/
python rur_lessons.py
| tectronics/rur-ple | linux_packaging/rur-lessons.sh | Shell | gpl-2.0 | 39 |
#!/bin/sh
xgettext -C -f po/POTFILES.in -d gninjam -p po -o gninjam.pot --keyword=_ --copyright-holder="Tobias Gehrig"
for lang in de ; do
grep -v "^#:" po/$lang.po > po/$lang.po.tmp
mv po/$lang.po.tmp po/$lang.po
xgettext -C --omit-header -j -f po/POTFILES.in -d gninjam -p po -o $lang.po --keyword=_
done | bill-auger/gninjam | update-potfile.sh | Shell | gpl-2.0 | 319 |
#
# Copyright (C) 2009 OpenWrt.org
#
. /lib/ar71xx.sh
PART_NAME=firmware
RAMFS_COPY_DATA=/lib/ar71xx.sh
CI_BLKSZ=65536
CI_LDADR=0x80060000
platform_find_partitions() {
local first dev size erasesize name
while read dev size erasesize name; do
name=${name#'"'}; name=${name%'"'}
case "$name" in
vmlinux.bin.l7|kernel|linux|rootfs)
if [ -z "$first" ]; then
first="$name"
else
echo "$erasesize:$first:$name"
break
fi
;;
esac
done < /proc/mtd
}
platform_find_kernelpart() {
local part
for part in "${1%:*}" "${1#*:}"; do
case "$part" in
vmlinux.bin.l7|kernel|linux)
echo "$part"
break
;;
esac
done
}
platform_do_upgrade_combined() {
local partitions=$(platform_find_partitions)
local kernelpart=$(platform_find_kernelpart "${partitions#*:}")
local erase_size=$((0x${partitions%%:*})); partitions="${partitions#*:}"
local kern_length=0x$(dd if="$1" bs=2 skip=1 count=4 2>/dev/null)
local kern_blocks=$(($kern_length / $CI_BLKSZ))
local root_blocks=$((0x$(dd if="$1" bs=2 skip=5 count=4 2>/dev/null) / $CI_BLKSZ))
if [ -n "$partitions" ] && [ -n "$kernelpart" ] && \
[ ${kern_blocks:-0} -gt 0 ] && \
[ ${root_blocks:-0} -gt ${kern_blocks:-0} ] && \
[ ${erase_size:-0} -gt 0 ];
then
local append=""
[ -f "$CONF_TAR" -a "$SAVE_CONFIG" -eq 1 ] && append="-j $CONF_TAR"
( dd if="$1" bs=$CI_BLKSZ skip=1 count=$kern_blocks 2>/dev/null; \
dd if="$1" bs=$CI_BLKSZ skip=$((1+$kern_blocks)) count=$root_blocks 2>/dev/null ) | \
mtd -r $append -F$kernelpart:$kern_length:$CI_LDADR,rootfs write - $partitions
fi
}
platform_check_image() {
local board=$(ar71xx_board_name)
local magic="$(get_magic_word "$1")"
[ "$ARGC" -gt 1 ] && return 1
case "$board" in
ap81 | ap83 | dir-600-a1 | dir-615-c1 | dir-825-b1 | mzk-w04nu | mzk-w300nh | tew-632brp | wrt400n | bullet-m | nanostation-m | rocket-m | wzr-hp-g300nh )
[ "$magic" != "2705" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
tl-wr741nd | tl-wr841n-v1 | tl-wr941nd | tl-wr1043nd)
[ "$magic" != "0100" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
wndr3700)
[ "$magic" != "3337" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
wrt160nl)
[ "$magic" != "4e4c" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
routerstation | routerstation-pro | ls-sr71 | pb42 | pb44)
[ "$magic" != "4349" ] && {
echo "Invalid image. Use *-sysupgrade.bin files on this board"
return 1
}
local md5_img=$(dd if="$1" bs=2 skip=9 count=16 2>/dev/null)
local md5_chk=$(dd if="$1" bs=$CI_BLKSZ skip=1 2>/dev/null | md5sum -); md5_chk="${md5_chk%% *}"
if [ -n "$md5_img" -a -n "$md5_chk" ] && [ "$md5_img" = "$md5_chk" ]; then
return 0
else
echo "Invalid image. Contents do not match checksum (image:$md5_img calculated:$md5_chk)"
return 1
fi
return 0
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
platform_do_upgrade() {
local board=$(ar71xx_board_name)
case "$board" in
routerstation | routerstation-pro | ls-sr71)
platform_do_upgrade_combined "$ARGV"
;;
*)
default_do_upgrade "$ARGV"
;;
esac
}
disable_watchdog() {
killall watchdog
( ps | grep -v 'grep' | grep '/dev/watchdog' ) && {
echo 'Could not disable watchdog'
return 1
}
}
append sysupgrade_pre_upgrade disable_watchdog
| alicemirror/Backfire-EA3250 | target/linux/ar71xx/base-files/lib/upgrade/platform.sh | Shell | gpl-2.0 | 3,387 |
#!/bin/sh
PATH=/usr/sbin:/usr/bin:/sbin:/bin
[ -s /.resume -a -b "$resume" ] && {
# First try user level resume; it offers splash etc
case "$splash" in
quiet)
a_splash="-P splash=y"
;;
*)
a_splash="-P splash=n"
;;
esac
[ -x "$(command -v resume)" ] && command resume "$a_splash" "$resume"
(readlink -fn "$resume" > /sys/power/resume) > /.resume
}
| haraldh/dracut | modules.d/95resume/resume.sh | Shell | gpl-2.0 | 434 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_pallette.miff PAM
| ipwndev/DSLinux-Mirror | user/imagemagick/src/tests/rwfile_PAM_pallette.sh | Shell | gpl-2.0 | 363 |
#rm js/sql.js
EMSCRIPTEN=/home/olojkine/Téléchargements/emscripten/emscripten/ make $1
| GadflyBSD/thinkAPP | app_www/lib/sql.js/gh-pages/compile.sh | Shell | apache-2.0 | 89 |
#!/bin/bash
FN="pd.celegans_3.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/pd.celegans_3.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/pd.celegans_3.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.celegans/bioconductor-pd.celegans_3.12.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.celegans/bioconductor-pd.celegans_3.12.0_src_all.tar.gz"
)
MD5="b90ea2e071522bb340c103a1c8270205"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
| phac-nml/bioconda-recipes | recipes/bioconductor-pd.celegans/post-link.sh | Shell | mit | 1,424 |
#!/bin/bash
#SBATCH -p debug
#SBATCH --ntasks=4
#SBATCH --ntasks-per-node=4
#SBATCH -t 60
#SBATCH --output=OUTPUT_FILES/%j.o
#SBATCH --job-name=go_database
umask 0022
cd $SLURM_SUBMIT_DIR
# script to generate databases
# read Par_file to get information about the run
NPROC=`grep ^NPROC DATA/Par_file | grep -v -E '^[[:space:]]*#' | cut -d = -f 2`
mkdir -p OUTPUT_FILES
# backup files used for this simulation
cp go_generate_databases_slurm.bash OUTPUT_FILES/
cp DATA/Par_file OUTPUT_FILES/
# save a complete copy of source files
#rm -rf OUTPUT_FILES/src
#cp -rp ./src OUTPUT_FILES/
# obtain job information
cat $SLURM_JOB_NODELIST > OUTPUT_FILES/compute_nodes
echo "$SLURM_JOBID" > OUTPUT_FILES/jobid
echo starting MPI mesher on $NPROC processors
echo " "
sleep 2
mpiexec -np $NPROC ./bin/xgenerate_databases
echo "done "
| QuLogic/specfem3d | utils/Cluster/slurm/go_generate_databases_slurm.bash | Shell | gpl-2.0 | 835 |
#!/bin/bash
# network root script for anaconda.
# runs in the "online" hook, every time an interface comes online.
command -v getarg >/dev/null || . /lib/dracut-lib.sh
. /lib/anaconda-lib.sh
# initqueue/online hook passes interface name as $1
netif="$1"
# get repo info
splitsep ":" "$root" prefix repo
# repo not set? make sure we are using fresh repo information
if [ -z "$repo" ]; then
. "$hookdir"/cmdline/*parse-anaconda-repo.sh
splitsep ":" "$root" prefix repo
fi
# no repo? non-net root? we're not needed here.
[ "$prefix" = "anaconda-net" ] && [ -n "$repo" ] || return 0
# already done? don't run again.
[ -e /dev/root ] && return 0
# user requested a specific network device, but this isn't it - bail out
[ -n "$ksdevice" ] && [ "$ksdevice" != "$netif" ] && return 0
# user didn't request a specific device, so the first one online wins!
[ -z "$ksdevice" ] && ksdevice="$netif"
command -v config_get >/dev/null || . /lib/anaconda-lib.sh
case $repo in
nfs*)
. /lib/nfs-lib.sh
info "anaconda mounting NFS repo at $repo"
# Replace hex space with a real one. All uses of repo need to be quoted
# after this point.
repo=${repo//\\x20/ }
# Convert nfs4 to nfs:nfsvers=4
#
# The reason for this is because anaconda's nfs and dracut's nfs are different.
# dracut expects options at the end, anaconda puts them after nfs:
# dracut's nfs_to_var has a special case to handle anaconda's nfs: form but not nfs4:
if str_starts "$repo" "nfs4:"; then
repo=nfs:${repo#nfs4:}
nfs_to_var "$repo" "$netif"
if ! strstr "$options" "vers="; then
repo="nfs:${options:+$options,}nfsvers=4:$server:$path"
fi
else
# HACK: work around some Mysterious NFS4 Badness (#811242 and friends)
# by defaulting to nfsvers=3 when no version is requested
nfs_to_var "$repo" "$netif"
if ! strstr "$options" "vers="; then
repo="nfs:${options:+$options,}nfsvers=3:$server:$path"
fi
# END HACK. FIXME: Figure out what is up with nfs4, jeez
fi
if [ "${repo%.iso}" == "$repo" ]; then
mount_nfs "$repo" "$repodir" "$netif" || warn "Couldn't mount $repo"
anaconda_live_root_dir "$repodir"
else
iso="${repo##*/}"
mount_nfs "${repo%$iso}" "$repodir" "$netif" || \
warn "Couldn't mount $repo"
anaconda_live_root_dir "$repodir" "$iso"
fi
;;
http*|ftp*)
info "anaconda: stage2 locations are: $repo"
anaconda_net_root "$repo"
;;
urls)
# Use the locations from the file.
# We will try them one by one until we succeed.
locations="$(</tmp/stage2_urls)"
info "anaconda: stage2 locations are: $locations"
for repo in $locations; do
anaconda_net_root "$repo" && break
done
;;
*)
warn_critical "unknown network repository URL: $repo"
return 1
;;
esac
echo "$netif" >> /tmp/anaconda_netroot.done # mark it done
| jkonecny12/anaconda | dracut/anaconda-netroot.sh | Shell | gpl-2.0 | 3,163 |
#!/usr/bin/env bash
set -ev
wd=`cd $(dirname $0); pwd`
. $wd/artifacts.sh
ls -la $share_folder/build
ALL_MODULES=`find $share_folder/build/ -name "*.whl"`
pip install -e ./tools
[ -d privates ] && pip install -qqq privates/*.whl
pip install $ALL_MODULES
pip install "sphinx==1.6.7" -q
echo "Installed."
cd doc/sphinx; python ./__main__.py
python $wd/test_help_doc_arguments.py "./_build/xml/ind.xml"
echo "OK."
| yugangw-msft/azure-cli | scripts/ci/test_ref_doc.sh | Shell | mit | 422 |
#!/bin/sh
EXPECTED_ARGS=1
if [ $# -ne $EXPECTED_ARGS ]
then
echo "Usage: `basename $0` <fsa_input_in_utf8>"
echo "The input for this script is usually the output of prepare_fsa_format.sh"
echo "A directory 's_fsa' must exist (or a link must exist) with the FSA from "
echo "http://www.eti.pg.gda.pl/katedry/kiw/pracownicy/Jan.Daciuk/personal/fsa.html"
exit 1
fi
LANG=POSIX
OUTPUT=src/resource/de/german.dict
cat $1 src/resource/de/added.txt | iconv -f utf8 -t latin1 | sort -u | gawk -f s_fsa/morph_data.awk | s_fsa/fsa_ubuild -O -o $OUTPUT
echo "Output written to $OUTPUT"
| ManolitoOctaviano/Language-Identification | src/resource/de/make-dict-de.sh | Shell | lgpl-2.1 | 586 |
#!/bin/sh
OCP3_DNS_NAMESERVER=${OCP3_DNS_NAMESERVER:-8.8.8.8}
PUBLIC_NETWORK=${PUBLIC_NETWORK:-public_network}
CONTROL_SUBNET_CIDR=${CONTROL_SUBNET_CIDR:-172.18.10.0/24}
openstack network create control-network
openstack subnet create --network control-network --subnet-range ${CONTROL_SUBNET_CIDR} \
--dns-nameserver ${OCP3_DNS_NAMESERVER} control-subnet
openstack router create control-router
openstack router add subnet control-router control-subnet
neutron router-gateway-set control-router ${PUBLIC_NETWORK}
| Tlacenka/openshift-ansible-contrib | reference-architecture/osp-cli/ch5.3_control_network.sh | Shell | apache-2.0 | 515 |
#!/bin/bash
FN="bladderbatch_1.32.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/experiment/src/contrib/bladderbatch_1.32.0.tar.gz"
"https://bioarchive.galaxyproject.org/bladderbatch_1.32.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-bladderbatch/bioconductor-bladderbatch_1.32.0_src_all.tar.gz"
)
MD5="aa6e7e141e4144037f2c6edd1637270a"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
| cokelaer/bioconda-recipes | recipes/bioconductor-bladderbatch/post-link.sh | Shell | mit | 1,312 |
#!/bin/bash
# This script generates the combined tbx.js JavaScript file for use when `mode="prod"`.
# Running this will save a file at /src/html/assets/javascripts/releases/$VERSION/tbx.js.
# TODO: Add link to documentation that describes how static assets are cached.
# https://github.com/photo/documentation/issues/40
# These JS assets are what you can see in /src/html/assets/themes/fabrizio1.0/templates/template.php.
# The order in which these files are combined need to match what's in template.php
VERSION=$(grep 'currentCodeVersion' ../configs/defaults.ini | awk -F '=' '{print $2}')
CONFIGFILE="../configs/js-assets.txt"
JSDIR="../html/assets/themes/fabrizio1.0/javascripts"
OUTFILE="../html/assets/javascripts/releases/$VERSION/tbx.js"
echo -n "Truncating file $OUTFILE..."
cat /dev/null > $OUTFILE
echo "OK"
echo -n "Writing new asset file $OUTFILE..."
while IFS= read -r file; do
if [ -f "$JSDIR$file" ];
then
printf "\n/* $file */\n" >> $OUTFILE
cat $JSDIR$file >> $OUTFILE
fi
done < $CONFIGFILE
echo -n "OK"
| meatcar/frontend | src/scripts/tbx-dot-js.sh | Shell | apache-2.0 | 1,044 |
#!/usr/bin/env bash
set -e
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
# Load the helpers.
. helpers.bash
function execute() {
>&2 echo "++ $@"
eval "$@"
}
# Tests to run. Defaults to all.
TESTS=${@:-. compose discovery api nodemanagement mesos/api mesos/compose mesos/zk}
# Generate a temporary binary for the tests.
export SWARM_BINARY=`mktemp`
# Download docker-compose
execute time curl -L --silent https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
execute chmod +x /usr/local/bin/docker-compose
# Build Swarm.
execute time go build -o "$SWARM_BINARY" ../..
# Start the docker engine.
execute docker daemon --log-level=panic \
--storage-driver="$STORAGE_DRIVER" &
DOCKER_PID=$!
# Wait for it to become reachable.
tries=10
until docker version &> /dev/null; do
(( tries-- ))
if [ $tries -le 0 ]; then
echo >&2 "error: daemon failed to start"
exit 1
fi
sleep 1
done
# Pre-fetch the test image.
execute time docker pull ${DOCKER_IMAGE}:${DOCKER_VERSION} > /dev/null
# Run the tests using the same client provided by the test image.
id=`execute docker create ${DOCKER_IMAGE}:${DOCKER_VERSION}`
tmp=`mktemp -d`
execute docker cp "${id}:/usr/local/bin/docker" "$tmp"
execute docker rm -f "$id" > /dev/null
export DOCKER_BINARY="${tmp}/docker"
# Run the tests.
execute time bats --tap $TESTS
| echupriyanov/swarm | test/integration/test_runner.sh | Shell | apache-2.0 | 1,413 |
#!/bin/bash
set -eo pipefail
dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
image="$1"
# Use a client image with curl for testing
clientImage='buildpack-deps:buster-curl'
# ensure the clientImage is ready and available
if ! docker image inspect "$clientImage" &> /dev/null; then
docker pull "$clientImage" > /dev/null
fi
serverImage="$1"
# Create an instance of the container-under-test
cid="$(docker run -d "$serverImage")"
trap "docker rm -vf $cid > /dev/null" EXIT
_request() {
local url="${1#/}"
shift
docker run --rm \
--link "$cid":open-liberty \
"$clientImage" \
curl -fsSL "$@" "http://open-liberty:9080/$url"
}
# Make sure that Open Liberty is listening
. "$dir/../../retry.sh" '_request / &> /dev/null'
# Check that we can request /
[ -n "$(_request '/')" ]
# Check that the version.js file can be retrieved.
helloWorld="$(_request '/version.js')"
[[ "$helloWorld" == *'var current'* ]]
| dinogun/official-images | test/tests/open-liberty-hello-world/run.sh | Shell | apache-2.0 | 919 |
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Runs tests for kubectl diff
run_kubectl_diff_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl diff"
# Test that it works when the live object doesn't exist
output_message=$(! kubectl diff -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'test-pod'
kubectl apply -f hack/testdata/pod.yaml
# Make sure that diffing the resource right after returns nothing (0 exit code).
kubectl diff -f hack/testdata/pod.yaml
# Make sure that:
# 1. the exit code for diff is 1 because it found a difference
# 2. the difference contains the changed image
output_message=$(kubectl diff -f hack/testdata/pod-changed.yaml || test $? -eq 1)
kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0'
# Test that we have a return code bigger than 1 if there is an error when diffing
kubectl diff -f hack/testdata/invalid-pod.yaml || test $? -gt 1
kubectl delete -f hack/testdata/pod.yaml
set +o nounset
set +o errexit
}
run_kubectl_diff_same_names() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Test kubectl diff with multiple resources with the same name"
output_message=$(KUBECTL_EXTERNAL_DIFF="find" kubectl diff -Rf hack/testdata/diff/)
kube::test::if_has_string "${output_message}" 'v1\.Pod\..*\.test'
kube::test::if_has_string "${output_message}" 'apps\.v1\.Deployment\..*\.test'
kube::test::if_has_string "${output_message}" 'v1\.ConfigMap\..*\.test'
kube::test::if_has_string "${output_message}" 'v1\.Secret\..*\.test'
set +o nounset
set +o errexit
}
| aveshagarwal/kubernetes | test/cmd/diff.sh | Shell | apache-2.0 | 2,353 |
########################################################################
# Test resurrect table locks
########################################################################
. inc/common.sh
require_server_version_higher_than 5.6.0
################################################################################
# Start an uncommitted transaction pause "indefinitely" to keep the connection
# open
################################################################################
function start_uncomitted_transaction()
{
run_cmd $MYSQL $MYSQL_ARGS sakila <<EOF
START TRANSACTION;
DELETE FROM payment;
SELECT SLEEP(10000);
EOF
}
start_server
load_sakila
start_uncomitted_transaction &
job_master=$!
sleep 5;
innobackupex --no-timestamp --include="sakila.actor" $topdir/backup
kill -SIGKILL $job_master
stop_server
innobackupex --apply-log $topdir/backup
| tplavcic/percona-xtrabackup | storage/innobase/xtrabackup/test/t/bug1340717.sh | Shell | gpl-2.0 | 867 |
#!/bin/bash
FN="KEGGdzPathwaysGEO_1.16.0.tar.gz"
URLS=(
"http://bioconductor.org/packages/3.6/data/experiment/src/contrib/KEGGdzPathwaysGEO_1.16.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-keggdzpathwaysgeo/bioconductor-keggdzpathwaysgeo_1.16.0_src_all.tar.gz"
)
MD5="814209c8cae15c1ecdc7efddb8a1b090"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
wget -O- -q $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
| colinbrislawn/bioconda-recipes | recipes/bioconductor-keggdzpathwaysgeo/post-link.sh | Shell | mit | 1,269 |
# --- Running LAMMPS ---
# -- Prerequisites: --
# The "run.in.nvt" file is a LAMMPS input script containing
# references to the input scripts and data files
# you hopefully have created earlier with moltemplate.sh:
# system.in.init, system.in.settings, system.data
# If not, carry out the instructions in "README_setup.sh".
#
# -- Instructions: --
# If "lmp_mpi" is the name of the command you use to invoke lammps,
# then you would run lammps on these files this way:
lmp_mpi -i run.in.nvt # Run a simulation at constant volume
# If you have compiled the MPI version of lammps, you can run lammps in parallel
# (But for a system of this small size, it should not be necessary.)
#mpirun -np 4 lmp_mpi -i run.in.nvt
#or
#mpirun -np 4 lmp_mpi -i run.in.npt
# (assuming you have 4 processors available)
| quang-ha/lammps | tools/moltemplate/examples/coarse_grained/abstract_lennard_jones_tube/README_run.sh | Shell | gpl-2.0 | 810 |
#!/bin/bash
# example:
# show help:
# bash runCECP.sh -h
# show alignment GUI (and download the PDB files automatically if they don't exist)
# bash runCECP.sh -pdb1 3cna.A -pdb2 2pel.A -pdbFilePath /tmp/ -autoFetch -show3d
# print output as XML
# bash runCECP.sh -pdb1 3cna.A -pdb2 2pel.A -pdbFilePath /tmp/ -printXML
# print output in CE style
# bash runCECP.sh -pdb1 3cna.A -pdb2 2pel.A -pdbFilePath /tmp/ -printCE
# print output in FatCat style
# bash runCECP.sh -pdb1 3cna.A -pdb2 2pel.A -pdbFilePath /tmp/ -printFatCat
# load files from local file system. Note: aligned the whole file, i.e. all chains. If you want to break this up into regions, you need to manipulate the files first manually.
# bash runCECP.sh -file1 /tmp/cn/pdb3cna.ent.gz -file2 file:///tmp/pe/pdb2pel.ent.gz -show3d
### Execute jar ###
# Get the base directory of the argument.
# Can resolve single symlinks if readlink is installed
function scriptdir {
cd "$(dirname "$1")"
cd "$(dirname "$(readlink "$1" 2>/dev/null || basename "$1" )")"
pwd
}
DIR="$(scriptdir "$0" )"
# send the arguments to the java app
java -Xmx500M -cp "$DIR/${project.build.finalName}.jar" org.biojava.nbio.structure.align.ce.CeCPMain "$@"
| andreasprlic/biojava | biojava-protein-comparison-tool/src/main/assembly/runCECP.sh | Shell | lgpl-2.1 | 1,221 |
SCRIPT=$(basename $0)
mkdir -p /var/vcap/sys/log/monit
exec 1>> /var/vcap/sys/log/monit/$SCRIPT.out.log
exec 2>> /var/vcap/sys/log/monit/$SCRIPT.err.log
pid_guard() {
pidfile=$1
name=$2
if [ -f "$pidfile" ]; then
pid=$(head -1 "$pidfile")
if [ -n "$pid" ] && [ -e /proc/$pid ]; then
echo "$name is already running, please stop it first"
exit 1
fi
echo "Removing stale pidfile..."
rm $pidfile
fi
}
wait_pidfile() {
pidfile=$1
try_kill=$2
timeout=${3:-0}
force=${4:-0}
countdown=$(( $timeout * 10 ))
if [ -f "$pidfile" ]; then
pid=$(head -1 "$pidfile")
if [ -z "$pid" ]; then
echo "Unable to get pid from $pidfile"
exit 1
fi
if [ -e /proc/$pid ]; then
if [ "$try_kill" = "1" ]; then
echo "Killing $pidfile: $pid "
kill $pid
fi
while [ -e /proc/$pid ]; do
sleep 0.1
[ "$countdown" != '0' -a $(( $countdown % 10 )) = '0' ] && echo -n .
if [ $timeout -gt 0 ]; then
if [ $countdown -eq 0 ]; then
if [ "$force" = "1" ]; then
echo -ne "\nKill timed out, using kill -QUIT on $pid... "
kill -QUIT $pid
sleep 0.5
fi
break
else
countdown=$(( $countdown - 1 ))
fi
fi
done
if [ -e /proc/$pid ]; then
echo "Timed Out"
else
echo "Stopped"
fi
else
echo "Process $pid is not running"
fi
rm -f $pidfile
else
echo "Pidfile $pidfile doesn't exist"
fi
}
kill_and_wait() {
pidfile=$1
# Monit default timeout for start/stop is 30s
# Append 'with timeout {n} seconds' to monit start/stop program configs
timeout=${2:-25}
force=${3:-1}
wait_pidfile $pidfile 1 $timeout $force
}
| sedouard/concourse | src/pid_utils.sh | Shell | apache-2.0 | 1,808 |
#!/bin/sh
SNV_VERSION=`git show -s --format=%H`
echo -n "Git-$SNV_VERSION"
| lowflyerUK/motion | git-commit-version.sh | Shell | gpl-2.0 | 77 |
#!/bin/sh
#
# update.sh
#
# update copyright dates in files
# Copyright (c) 2001-2006, Cisco Systems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# Neither the name of the Cisco Systems, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
a=`find . -name "*.[ch]"`
for x in $a; do
sed 's/(c) 2001-2005/(c) 2001-2006/' $x > $x.tmp;
mv $x.tmp $x;
done
| wangscript/libjingle-1 | trunk/third_party/libsrtp/srtp/update.sh | Shell | bsd-3-clause | 1,758 |
#!/bin/sh
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
for p in python2.7 python2.6 python2 python not_found ; do
python=`which $p || echo not_found`
if [ -x "$python" ] ; then
break
fi
done
if [ -x "$python" ] ; then
exec $python $@
else
echo "No acceptable version of python found on the system"
exit 1
fi
| yongni/grpc | tools/distrib/python_wrapper.sh | Shell | apache-2.0 | 863 |
#!/bin/bash
#
# Copyright (C) 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Show n largest objects in a git repo's pack files.
#
# usage:
# $ reposize.sh 100 # find and list biggest 100 objects
#
# derived from
# http://stubbisms.wordpress.com/2009/07/10/git-script-to-show-largest-pack-objects-and-trim-your-waist-line/
if [ ! $# == 1 ]; then
echo "
Usage: $0 <number of biggest objects to show>
if there are loose objects the script will run 'git gc' to move all data to packs
"
exit
fi
# find git repository directory
gitdir=$(git rev-parse --git-dir 2>.error.log)
if [ $? -ne 0 ]; then
echo $(cat .error.log)
rm .error.log
exit
fi
rm .error.log
object_count=$(git count-objects -v | grep count: | cut -f 2 -d ' ')
if [ $object_count -gt 1 ]; then
echo "-------------------------------------------------------"
echo "$object_count loose objects found in repository $gitdir"
echo "-> running git gc to move all data to packs"
git gc
echo "-------------------------------------------------------"
fi
# set the internal field separator to line break, so that we can iterate easily over the verify-pack output
IFS=$'\n';
# list all objects including their size, sort by size, take top $1 biggest blobs
objects=$(git verify-pack -v $gitdir/objects/pack/pack-*.idx | grep -v chain | sort -k3nr | head -n $1)
echo "All sizes are in kiB's. The pack column is the size of the object, compressed, inside the pack file."
output="size,pack,SHA,location"
for y in $objects
do
# extract the size in bytes
size=$(($(echo $y | cut -f 5 -d ' ') / 1024))
# extract the compressed size in bytes
compressedSize=$(($(echo $y | cut -f 6 -d ' ') / 1024))
# extract the SHA
sha=$(echo $y | cut -f 1 -d ' ')
# find the objects location in the repository tree
other=$(git rev-list --all --objects | grep $sha)
output="${output}\n${size},${compressedSize},${other}"
done
echo -e $output | column -t -s ', '
| midnightradio/gerrit | gerrit-server/src/main/resources/com/google/gerrit/server/tools/root/scripts/reposize.sh | Shell | apache-2.0 | 2,522 |
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
export KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
go get -u gopkg.in/mikedanese/gazel.v13/gazel
if ! "${GOPATH}/bin/gazel" -validate -print-diff -root="$(kube::realpath ${KUBE_ROOT})" ; then
echo
echo "Run ./hack/update-bazel.sh"
exit 1
fi
| jawnsy/cri-o | vendor/k8s.io/kubernetes/hack/verify-bazel.sh | Shell | apache-2.0 | 939 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ mouse4302cdf
| joachimwolff/bioconda-recipes | recipes/bioconductor-mouse4302cdf/pre-unlink.sh | Shell | mit | 59 |
# RR
# SS
valgrind ./repop_testcases sr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
#SD
valgrind ./repop_testcases sr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
#DS
valgrind ./repop_testcases dr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
#DD
valgrind ./repop_testcases dr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
# RC
# SS
valgrind ./repop_testcases sr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
#SD
valgrind ./repop_testcases sr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
#DS
valgrind ./repop_testcases dr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
#DD
valgrind ./repop_testcases dr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dr 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
# CR
# SS
valgrind ./repop_testcases sc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
#SD
valgrind ./repop_testcases sc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
#DS
valgrind ./repop_testcases dc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand sr 11x29x51 rand '+' > /dev/null
#DD
valgrind ./repop_testcases dc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand dr 11x29x51 rand '+' > /dev/null
# CC
# SS
valgrind ./repop_testcases sc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
#SD
valgrind ./repop_testcases sc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases sc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
#DS
valgrind ./repop_testcases dc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand sc 11x29x51 rand '+' > /dev/null
#DD
valgrind ./repop_testcases dc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
valgrind ./repop_testcases dc 11x29x51 rand dc 11x29x51 rand '+' > /dev/null
| distrep/DMLT | external/svm/repop/valgrind.sh | Shell | gpl-3.0 | 5,019 |
#!/bin/sh
# PCSX2 - PS2 Emulator for PCs
# Copyright (C) 2002-2011 PCSX2 Dev Team
#
# PCSX2 is free software: you can redistribute it and/or modify it under the terms
# of the GNU Lesser General Public License as published by the Free Software Found-
# ation, either version 3 of the License, or (at your option) any later version.
#
# PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PCSX2.
# If not, see <http://www.gnu.org/licenses/>.
# Probably self-explanatory: This batch file compiles a single souce image into a
# CPP header file for use by pcsx2.
#
# bin2cpp.sh SrcImage
#
# Parameters
# SrcImage - Complete filename with extension.
#
$1/tools/bin/bin2cpp $2
| tacchinotacchi/pcsx2 | tools/bin2app.sh | Shell | gpl-2.0 | 940 |
alias brewp='brew pin'
alias brews='brew list -1'
alias brewsp='brew list --pinned'
alias bubo='brew update && brew outdated'
alias bubc='brew upgrade && brew cleanup'
alias bubu='bubo && bubc'
alias buf='brew upgrade --formula'
alias bcubo='brew update && brew outdated --cask'
alias bcubc='brew upgrade --cask && brew cleanup'
| okubax/dotfiles | zsh/oh-my-zsh/plugins/brew/brew.plugin.zsh | Shell | gpl-3.0 | 329 |
#!/bin/bash
if [ -f .veewee_params ]
then
. .veewee_params
fi
date > /etc/vagrant_box_build_time
# Setup sudo to allow no-password sudo for "sudo"
usermod -a -G sudo vagrant
# Installing vagrant keys
mkdir /home/vagrant/.ssh
chmod 700 /home/vagrant/.ssh
cd /home/vagrant/.ssh
groupadd -r admin
usermod -a -G admin vagrant
wget --no-check-certificate 'https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub' -O authorized_keys
chmod 600 /home/vagrant/.ssh/authorized_keys
chown -R vagrant /home/vagrant/.ssh
| akshaykarle/veewee | templates/ubuntu-10.04.4-server-i386/vagrant.sh | Shell | mit | 523 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2011:0910
#
# Security announcement date: 2011-06-28 17:38:03 UTC
# Script generation date: 2017-01-01 21:13:11 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - ruby-debuginfo.i686:1.8.7.299-7.el6_1.1
# - ruby-libs.i686:1.8.7.299-7.el6_1.1
# - ruby.x86_64:1.8.7.299-7.el6_1.1
# - ruby-debuginfo.x86_64:1.8.7.299-7.el6_1.1
# - ruby-irb.x86_64:1.8.7.299-7.el6_1.1
# - ruby-libs.x86_64:1.8.7.299-7.el6_1.1
# - ruby-devel.i686:1.8.7.299-7.el6_1.1
# - ruby-devel.x86_64:1.8.7.299-7.el6_1.1
# - ruby-docs.x86_64:1.8.7.299-7.el6_1.1
# - ruby-rdoc.x86_64:1.8.7.299-7.el6_1.1
# - ruby-ri.x86_64:1.8.7.299-7.el6_1.1
# - ruby-static.x86_64:1.8.7.299-7.el6_1.1
# - ruby-tcltk.x86_64:1.8.7.299-7.el6_1.1
#
# Last versions recommanded by security team:
# - ruby-debuginfo.i686:1.8.7.374-3.el6_6
# - ruby-libs.i686:1.8.7.374-3.el6_6
# - ruby.x86_64:1.8.7.374-3.el6_6
# - ruby-debuginfo.x86_64:1.8.7.374-3.el6_6
# - ruby-irb.x86_64:1.8.7.374-3.el6_6
# - ruby-libs.x86_64:1.8.7.374-3.el6_6
# - ruby-devel.i686:1.8.7.374-3.el6_6
# - ruby-devel.x86_64:1.8.7.374-3.el6_6
# - ruby-docs.x86_64:1.8.7.374-3.el6_6
# - ruby-rdoc.x86_64:1.8.7.374-3.el6_6
# - ruby-ri.x86_64:1.8.7.374-3.el6_6
# - ruby-static.x86_64:1.8.7.374-3.el6_6
# - ruby-tcltk.x86_64:1.8.7.374-3.el6_6
#
# CVE List:
# - CVE-2011-0188
# - CVE-2011-1004
# - CVE-2011-1005
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install ruby-debuginfo.i686-1.8.7.374 -y
sudo yum install ruby-libs.i686-1.8.7.374 -y
sudo yum install ruby.x86_64-1.8.7.374 -y
sudo yum install ruby-debuginfo.x86_64-1.8.7.374 -y
sudo yum install ruby-irb.x86_64-1.8.7.374 -y
sudo yum install ruby-libs.x86_64-1.8.7.374 -y
sudo yum install ruby-devel.i686-1.8.7.374 -y
sudo yum install ruby-devel.x86_64-1.8.7.374 -y
sudo yum install ruby-docs.x86_64-1.8.7.374 -y
sudo yum install ruby-rdoc.x86_64-1.8.7.374 -y
sudo yum install ruby-ri.x86_64-1.8.7.374 -y
sudo yum install ruby-static.x86_64-1.8.7.374 -y
sudo yum install ruby-tcltk.x86_64-1.8.7.374 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_6/x86_64/2011/RHSA-2011:0910.sh | Shell | mit | 2,249 |
Subsets and Splits