code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
git clone https://github.com/nlpie/nlptab-webapp.git
cd nlptab-webapp
npm install
gulp
sed -i.bak "s/esServer: 'localhost:9200'/esServer: 'localhost:58032'/g" app/config.js
cp -R app/* /var/www/html/
ufw allow http
| NLPIE/NLP-TAB-vm | scripts/nlptab-webapp.sh | Shell | apache-2.0 | 215 |
#!/bin/bash
# Creates shallow haddocks for GitHub pages.
set -eu -o pipefail
IMAGE_NAME=tensorflow/haskell:v0
STACK="stack --docker --docker-image=$IMAGE_NAME"
$STACK haddock --no-haddock-deps tensorflow*
DOC_ROOT=$($STACK path --local-doc-root)
DOCS=docs/haddock
git rm -fr $DOCS
mkdir -p $DOCS
cp $DOC_ROOT/{*.html,*js,*.png,*.gif,*.css} $DOCS
cp -a $DOC_ROOT/tensorflow* $DOCS
rm -f $DOCS/*/*.haddock
git add $DOCS
| cem3394/haskell | tools/haddock.sh | Shell | apache-2.0 | 422 |
# opendaylight.sh - DevStack extras script
if is_service_enabled odl-server odl-compute; then
# Initial source
[[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight
fi
if is_service_enabled odl-server; then
if [[ "$1" == "source" ]]; then
# no-op
:
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
install_opendaylight
configure_opendaylight
init_opendaylight
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# This has to start before Neutron
start_opendaylight
elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
# no-op
:
fi
if [[ "$1" == "unstack" ]]; then
stop_opendaylight
cleanup_opendaylight
fi
if [[ "$1" == "clean" ]]; then
# no-op
:
fi
fi
if is_service_enabled odl-compute; then
if [[ "$1" == "source" ]]; then
# no-op
:
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
install_opendaylight-compute
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
create_nova_conf_neutron
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing OpenDaylight"
ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT
sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"}
elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
# no-op
:
fi
if [[ "$1" == "unstack" ]]; then
sudo ovs-vsctl del-manager
BRIDGES=$(sudo ovs-vsctl list-br)
for bridge in $BRIDGES ; do
sudo ovs-vsctl del-controller $bridge
done
stop_opendaylight-compute
fi
if [[ "$1" == "clean" ]]; then
# no-op
:
fi
fi
| HeidCloud/devstack | extras.d/80-opendaylight.sh | Shell | apache-2.0 | 1,958 |
#!/bin/sh
# _
# | |
# ___ _ _ _ __ ___ _ _| |_ _ ___
# / __| | | | '_ ` _ \| | | | | | | / __|
# | (__| |_| | | | | | | |_| | | |_| \__ \
# \___|\__,_|_| |_| |_|\__,_|_|\__,_|___/
#
#
#$ -S /bin/bash
#$ -N dummy-123432423
#$ -l gpus=2
cd $SGE_O_WORKDIR
ls
sleep 20
mpirun -n 1000000 parallel
| Kitware/cumulus | tests/cases/fixtures/job/sge_submission_script_gpus.sh | Shell | apache-2.0 | 360 |
#!/bin/sh
# Generate CSV for a scatter plot of CPU times.
table-generator --correct-only -f csv -x scatter.xml
# Generate CSV for a scatter plot where color indicates frequency of data points
# (not useful with the example data in this directory).
cut -f 3,7 < scatter.table.csv \
| sort -n \
| uniq -c \
> scatter.counted.csv
# Generate CSV for a quantile plot of CPU times.
for i in *.results.xml.bz2 ; do
./quantile-generator.py --correct-only $i > ${i%.results.xml.bz2}.quantile.csv
done
# Generate CSV for a score-based quantile plot of CPU times.
for i in *.results.xml.bz2 ; do
./quantile-generator.py --score-based $i > ${i%.results.xml.bz2}.quantile-score.csv
done
# Commands for generating plots with Gnuplot:
gnuplot scatter.gp
gnuplot scatter-counted.gp
gnuplot quantile.gp
gnuplot quantile-split.gp
gnuplot quantile-score.gp
# Commands for generating plots with LaTeX (not necessary if included in other LaTeX file):
pdflatex scatter.tex
pdflatex scatter-counted.tex
pdflatex quantile.tex
pdflatex quantile-score.tex
# Special command for generating plots as PNG files (just for recreating the demo files)
# for f in *.tex; do
# pdflatex -shell-escape "\PassOptionsToClass{convert}{standalone}\input{$f}"
# done
| martin-neuhaeusser/benchexec | contrib/plots/generate-plots.sh | Shell | apache-2.0 | 1,240 |
#!/bin/sh
# Windows Azure OS X Package: Create packages script
# Copyright (C) 2012 Microsoft Corporation. All Rights Reserved.
#
# This builds the package as well as prepares the tarball file, etc.
# This script is only used at build time, it is not part of the package.
#
CURRENT_NODE_DISTRIBUTION_VERSION=v0.6.17
# Check for Apple's PackageMaker
# ------------------------------
if [ ! -f /Applications/Utilities/PackageMaker.app/Contents/MacOS/PackageMaker ]; then
echo PackageMaker needs to be installed in the Utilies folder on your Mac.
echo If you do not yet have PackageMaker, please download it from the Apple Dev Center.
echo
echo If you need to download it:
echo open http://adcdownload.apple.com/Developer_Tools/auxiliary_tools_for_xcode__february_2012/auxiliary_tools_for_xcode.dmg
echo
echo If you already have it, just drag it into the Utilities folder since this is hard-coded in the script.
echo
exit 1
fi
# Node.js validation
# ------------------
if [ ! -f /usr/local/bin/node ]; then
echo Node.js is not installed on this machine.
echo Please download and install it from http://nodejs.org/
open http://nodejs.org/
exit 1
fi
export NODE_VERSION=`/usr/local/bin/node -v`
echo The current Node.js version we are shipping is $CURRENT_NODE_DISTRIBUTION_VERSION
if [ ! "$NODE_VERSION" = "$CURRENT_NODE_DISTRIBUTION_VERSION" ]; then
echo Your Node.js version $NODE_VERSION does not match the version to distribute.
echo Aborting package preparation.
exit 1
fi
# Ensure that all modules are present
# -----------------------------------
pushd ../../
echo Running npm update to make sure that all modules are present locally...
npm update
popd
# Tarball creation
# ----------------
scripts/createTarball.sh
# Node.js binary
# --------------
# Copy the OS node into our local out folder for packaging
cp /usr/local/bin/node out/
echo Copied your local Node.js binary version $NODE_VERSION into the output folder
# OS X Package creation
# ---------------------
echo Building "Windows Azure SDK.pkg"
/Applications/Utilities/PackageMaker.app/Contents/MacOS/PackageMaker --doc sdk.pmdoc --out "./out/Windows Azure SDK.pkg"
echo
echo The package has been built and can be found in the ./out/ folder.
open ./out
| egamma/azure-sdk-for-node | tools/osx-setup/build.sh | Shell | apache-2.0 | 2,249 |
#!/usr/bin/env bash
# Stop script if unbound variable found (use ${var:-} if intentional)
set -u
# Stop script if command returns non-zero exit code.
# Prevents hidden errors caused by missing error code propagation.
set -e
usage()
{
echo "Common settings:"
echo " --configuration <value> Build configuration: 'Debug' or 'Release' (short: -c)"
echo " --verbosity <value> Msbuild verbosity: q[uiet], m[inimal], n[ormal], d[etailed], and diag[nostic] (short: -v)"
echo " --binaryLog Create MSBuild binary log (short: -bl)"
echo ""
echo "Actions:"
echo " --restore Restore dependencies (short: -r)"
echo " --build Build all projects (short: -b)"
echo " --rebuild Rebuild all projects"
echo " --test Run all unit tests (short: -t)"
echo " --sign Sign build outputs"
echo " --publish Publish artifacts (e.g. symbols)"
echo " --pack Package build outputs into NuGet packages and Willow components"
echo " --help Print help and exit (short: -h)"
echo ""
echo "Advanced settings:"
echo " --projects <value> Project or solution file(s) to build"
echo " --ci Set when running on CI server"
echo " --prepareMachine Prepare machine for CI run, clean up processes after build"
echo " --nodeReuse <value> Sets nodereuse msbuild parameter ('true' or 'false')"
echo " --warnAsError <value> Sets warnaserror msbuild parameter ('true' or 'false')"
echo ""
echo "Command line arguments starting with '/p:' are passed through to MSBuild."
}
source="${BASH_SOURCE[0]}"
# resolve $source until the file is no longer a symlink
while [[ -h "$source" ]]; do
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $source was a relative symlink, we need to resolve it relative to the path where the
# symlink file was located
[[ $source != /* ]] && source="$scriptroot/$source"
done
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
restore=false
build=false
rebuild=false
test=false
pack=false
publish=false
integration_test=false
performance_test=false
sign=false
public=false
ci=false
warn_as_error=true
node_reuse=true
binary_log=false
projects=''
configuration='Debug'
prepare_machine=false
verbosity='minimal'
properties=''
while [[ $# > 0 ]]; do
opt="$(echo "$1" | awk '{print tolower($0)}')"
case "$opt" in
--help|-h)
usage
exit 0
;;
--configuration|-c)
configuration=$2
shift
;;
--verbosity|-v)
verbosity=$2
shift
;;
--binarylog|-bl)
binary_log=true
;;
--restore|-r)
restore=true
;;
--build|-b)
build=true
;;
--rebuild)
rebuild=true
;;
--pack)
pack=true
;;
--test|-t)
test=true
;;
--integrationtest)
integration_test=true
;;
--performancetest)
performance_test=true
;;
--sign)
sign=true
;;
--publish)
publish=true
;;
--preparemachine)
prepare_machine=true
;;
--projects)
projects=$2
shift
;;
--ci)
ci=true
;;
--warnaserror)
warn_as_error=$2
shift
;;
--nodereuse)
node_reuse=$2
shift
;;
/p:*)
properties="$properties $1"
;;
/m:*)
properties="$properties $1"
;;
/bl:*)
properties="$properties $1"
;;
*)
echo "Invalid argument: $1"
usage
exit 1
;;
esac
shift
done
if [[ "$ci" == true ]]; then
binary_log=true
node_reuse=false
fi
. "$scriptroot/tools.sh"
function InitializeCustomToolset {
local script="$eng_root/restore-toolset.sh"
if [[ -a "$script" ]]; then
. "$script"
fi
}
function Build {
InitializeToolset
InitializeCustomToolset
if [[ ! -z "$projects" ]]; then
properties="$properties /p:Projects=$projects"
fi
local bl=""
if [[ "$binary_log" == true ]]; then
bl="/bl:\"$log_dir/Build.binlog\""
fi
MSBuild $_InitializeToolset \
$bl \
/p:Configuration=$configuration \
/p:RepoRoot="$repo_root" \
/p:Restore=$restore \
/p:Build=$build \
/p:Rebuild=$rebuild \
/p:Test=$test \
/p:Pack=$pack \
/p:IntegrationTest=$integration_test \
/p:PerformanceTest=$performance_test \
/p:Sign=$sign \
/p:Publish=$publish \
$properties
ExitWithExitCode 0
}
# Import custom tools configuration, if present in the repo.
configure_toolset_script="$eng_root/configure-toolset.sh"
if [[ -a "$configure_toolset_script" ]]; then
. "$configure_toolset_script"
fi
# TODO: https://github.com/dotnet/arcade/issues/1468
# Temporary workaround to avoid breaking change.
# Remove once repos are updated.
if [[ -n "${useInstalledDotNetCli:-}" ]]; then
use_installed_dotnet_cli="$useInstalledDotNetCli"
fi
Build
| VSadov/roslyn | eng/common/build.sh | Shell | apache-2.0 | 5,023 |
lesson_title "Redirection"
test_redirecting_stdout_to_file() {
output_stdout > tmp/redirect_test.txt
local contents=$(cat tmp/redirect_test.txt)
assertEqual "$contents" __
}
test_redirecting_stderr_to_file() {
output_stderr 2> tmp/redirect_test2.txt
local contents=$(cat tmp/redirect_test2.txt)
assertEqual "$contents" __
}
test_redirecting_stdout_to_stderr() {
output_stdout 2> tmp/redirect_test3.txt 1>&2
local contents=$(cat tmp/redirect_test3.txt)
assertEqual "$contents" __
}
test_redirecting_stderr_to_stdout() {
output_stderr 1> tmp/redirect_test4.txt 2>&1
local contents=$(cat tmp/redirect_test4.txt)
assertEqual "$contents" __
}
test_redirecting_stdout_and_stderr_to_file() {
output_both 1> tmp/redirect_test5.txt 2> tmp/redirect_test6.txt
local contents5=$(cat tmp/redirect_test5.txt)
local contents6=$(cat tmp/redirect_test6.txt)
assertEqual "$contents5" __
assertEqual "$contents6" __
}
| nadavc/bash_koans | src/00_about_redirection.sh | Shell | mit | 953 |
#!/bin/sh
#FG_ROOT=
if [ -n "$1" ]; then
FG_ROOT="$1"
fi
if [ -z "${FG_ROOT}" ]; then
echo "Please set FG_ROOT or pass a FG_ROOT path"
exit -1
fi
if [ ! -d "${FG_ROOT}" ]; then
echo "${FG_ROOT} is not a directory."
exit -1
fi
APT=${FG_ROOT}/Airports/apt.dat.gz
NAV=${FG_ROOT}/Navaids/nav.dat.gz
FIX=${FG_ROOT}/Navaids/fix.dat.gz
AWY=${FG_ROOT}/Navaids/awy.dat.gz
TACAN=${FG_ROOT}/Navaids/TACAN_freq.dat.gz
if [ ! -f ${APT} ]; then
echo "${APT} doesn't exist."
exit -1
fi
if [ ! -f ${NAV} ]; then
echo "${NAV} doesn't exist."
exit -1
fi
if [ ! -f ${FIX} ]; then
echo "${FIX} doesn't exist."
exit -1
fi
if [ ! -f ${AWY} ]; then
echo "${AWY} doesn't exist."
exit -1
fi
if [ ! -f ${TACAN} ]; then
echo "${TACAN} doesn't exist."
exit -1
fi
echo Processing ${APT}
zcat ${APT} | iconv -f latin1 -t utf8 | ./apt2sql > apt.sql
echo Processing ${NAV}
zcat ${NAV} | iconv -f latin1 -t utf8 | ./nav2sql > nav.sql
echo Processing ${FIX}
zcat ${FIX} | iconv -f latin1 -t utf8 | ./fix2sql > fix.sql
echo Processing ${AWY}
zcat ${AWY} | iconv -f latin1 -t utf8 | ./awy2sql > awy.sql
echo Processing ${TACAN}
zcat ${TACAN} | iconv -f latin1 -t utf8 | ./tacan2sql > tacan.sql
| FlightGear/fgmap | scripts/all2sql.sh | Shell | gpl-2.0 | 1,236 |
#!/bin/sh
#
# Copyright (C) 2013 OpenWrt.org
#
MPC85XX_BOARD_NAME=
MPC85XX_MODEL=
mpc85xx_board_detect() {
local model
local name
model=$(awk 'BEGIN{FS="[ \t]+:[ \t]"} /model/ {print $2}' /proc/cpuinfo)
case "$model" in
*"HiveAP-330")
name="hiveap-330"
;;
*"TL-WDR4900 v1")
name="tl-wdr4900-v1"
;;
esac
[ -z "$name" ] && name="unknown"
[ -z "$MPC85XX_BOARD_NAME" ] && MPC85XX_BOARD_NAME="$name"
[ -z "$MPC85XX_MODEL" ] && MPC85XX_MODEL="$model"
[ -e "/tmp/sysinfo/" ] || mkdir -p "/tmp/sysinfo/"
echo "$MPC85XX_BOARD_NAME" > /tmp/sysinfo/board_name
echo "$MPC85XX_MODEL" > /tmp/sysinfo/model
}
| SolidRun/lede-project | target/linux/mpc85xx/base-files/lib/mpc85xx.sh | Shell | gpl-2.0 | 623 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="minecraft"
rp_module_desc="Minecraft - Pi Edition"
rp_module_section="exp"
rp_module_flags="!mali !x86"
function depends_minecraft() {
getDepends xorg matchbox
}
function install_bin_minecraft() {
[[ -f "$md_inst/minecraft-pi" ]] && rm -rf "$md_inst/"*
aptInstall minecraft-pi
}
function remove_minecraft() {
aptRemove minecraft-pi
}
function configure_minecraft() {
addPort "$md_id" "minecraft" "Minecraft" "xinit $md_inst/Minecraft.sh"
cat >"$md_inst/Minecraft.sh" << _EOF_
#!/bin/bash
xset -dpms s off s noblank
matchbox-window-manager &
/usr/bin/minecraft-pi
_EOF_
chmod +x "$md_inst/Minecraft.sh"
}
| j-r0dd/RetroPie-Setup | scriptmodules/ports/minecraft.sh | Shell | gpl-3.0 | 1,059 |
#!/bin/bash
#
# Copyright 2016 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
set -o errexit
if [ -z "$A8_TEST_DOCKER" ]; then
A8_TEST_DOCKER="true"
fi
#if [ -z "$A8_TEST_K8S" ]; then
# A8_TEST_K8S="true"
#fi
SCRIPTDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
$SCRIPTDIR/build-scripts/build-amalgam8.sh
if [ "$A8_TEST_DOCKER" == "true" ]; then
$SCRIPTDIR/docker/test-docker.sh
fi
if [ "$A8_TEST_K8S" == "true" ]; then
$SCRIPTDIR/kubernetes/test-kubernetes.sh
fi
| craigyam/amalgam8 | testing/build_and_run.sh | Shell | apache-2.0 | 1,032 |
#!/usr/bin/env bash
# Shutdown script for azkaban solo server
set -o nounset
source "$(dirname $0)/util.sh"
installdir="$(dirname $0)/.."
maxattempt=3
pid=`cat ${installdir}/currentpid`
pname="solo server"
kill_process_with_retry "${pid}" "${pname}" "${maxattempt}" && rm -f ${installdir}/currentpid
| tianjianinline/azkaban-psbc | azkaban-solo-server/src/main/bash/azkaban-solo-shutdown.sh | Shell | apache-2.0 | 302 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use /etc/os-release to determine Linux Distro
if [ -f /etc/os-release ]; then
. /etc/os-release
else
if [ -f /etc/redhat-release ]; then
if grep "CentOS release 6" /etc/redhat-release >/dev/null ; then
ID=centos
VERSION_ID=6
fi
else
echo "Unknown Linux Distribution."
exit 1
fi
fi
case ${ID}-${VERSION_ID} in
fedora-20*)
# Work around issue in fedora:20 docker image
yum -y install yum-utils; yum-config-manager --enable updates-testing
rpm -ivh https://yum.puppetlabs.com/puppetlabs-release-fedora-20.noarch.rpm
yum update
yum -y install hostname curl sudo unzip wget puppet
;;
ubuntu-14.04)
# BIGTOP-2003. A workaround to install newer hiera to get rid of hiera 1.3.0 bug.
apt-get -y install wget
wget -O /tmp/puppetlabs-release-trusty.deb https://apt.puppetlabs.com/puppetlabs-release-trusty.deb && dpkg -i /tmp/puppetlabs-release-trusty.deb
rm -f /tmp/puppetlabs-release-trusty.deb
apt-get update
apt-get -y install curl sudo unzip puppet software-properties-common
;;
ubuntu-15*)
apt-get update
apt-get -y install curl sudo unzip wget puppet software-properties-common
;;
debian-8*)
apt-get update
apt-get -y install curl sudo unzip wget puppet
;;
opensuse-13.2)
zypper --gpg-auto-import-keys install -y curl sudo unzip wget puppet suse-release ca-certificates-mozilla net-tools tar
;;
centos-6*)
rpm -ivh http://yum.puppetlabs.com/puppetlabs-release-el-6.noarch.rpm
yum -y install curl sudo unzip wget puppet tar
;;
centos-7*)
rpm -ivh http://yum.puppetlabs.com/puppetlabs-release-el-7.noarch.rpm
yum -y install hostname curl sudo unzip wget puppet
;;
*)
echo "Unsupported OS ${ID}-${VERSION_ID}."
exit 1
esac
| minggLu/bigtop | bigtop_toolchain/bin/puppetize.sh | Shell | apache-2.0 | 2,534 |
# Set passwords
function kibanapw() { if [ $# -lt 2 ]; then echo -e "Usage: kibanapw USER PASSWORD\nUsers will be added to /etc/nginx/htpasswd.users"; else egrep "^${1}:" /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1; if [[ $? -eq 0 ]]; then sudo sed -i "/${1}\:/d" /etc/lighttpd/rock-htpasswd.user; fi; printf "${1}:$(echo ${2} | openssl passwd -apr1 -stdin)\n" | sudo tee -a /etc/lighttpd/rock-htpasswd.user > /dev/null 2>&1; fi; }
| CyberAnalyticDevTeam/SimpleRock | roles/kibana/files/profile.d-kibanapw.sh | Shell | bsd-3-clause | 438 |
#!/usr/bin/env bash
clean_slds_files() {
rm -fv $1/assets/fonts/License-for-font.txt
rm -fv $1/assets/fonts/*.ttf
rm -fv $1/assets/fonts/webfonts/*.eot
rm -fv $1/assets/fonts/webfonts/*.svg
rm -fv $1/assets/fonts/webfonts/SalesforceSans-Thin.woff
rm -fv $1/assets/fonts/webfonts/SalesforceSans-Thin.woff2
rm -fv $1/assets/fonts/webfonts/SalesforceSans-ThinItalic.woff
rm -fv $1/assets/fonts/webfonts/SalesforceSans-ThinItalic.woff2
rm -fv $1/assets/icons/action/*.png
rm -fv $1/assets/icons/action-sprite/symbols.html
rm -fv $1/assets/icons/custom/*.png
rm -fv $1/assets/icons/custom-sprite/symbols.html
rm -fv $1/assets/icons/doctype/*.png
rm -fv $1/assets/icons/doctype-sprite/symbols.html
rm -fv $1/assets/icons/standard/*.png
rm -fv $1/assets/icons/standard-sprite/symbols.html
rm -fv $1/assets/icons/utility/*.png
rm -fv $1/assets/icons/utility-sprite/symbols.html
rm -fv $1/assets/icons/README
rm -fv $1/assets/icons/License-for-icons.txt
rm -fv $1/assets/images/License-for-images.txt
rm -fv $1/assets/styles/salesforce-lightning-design-system-ltng.css
rm -fv $1/assets/styles/salesforce-lightning-design-system-scoped.css
rm -fv $1/assets/styles/salesforce-lightning-design-system-vf.css
rm -fv $1/assets/styles/salesforce-lightning-design-system.css
rm -fv $1/README.md
rm -rfv $1/scss
rm -rfv $1/swatches
}
SLDS_TMPDIR=`mktemp -d -t sldsclean.XXXXXXXX`
unzip -d $SLDS_TMPDIR/0_12_2 salesforce-lightning-design-system-0.12.2.zip
unzip -d $SLDS_TMPDIR/1_0_3 salesforce-lightning-design-system-1.0.3.zip
clean_slds_files $SLDS_TMPDIR/0_12_2
clean_slds_files $SLDS_TMPDIR/1_0_3
rm -fv $SLDS_TMPDIR/1_0_3/package.json
SLDS_OUTDIR=`pwd`
pushd $SLDS_TMPDIR
zip -r $SLDS_OUTDIR/SLDS.resource 0_12_2 1_0_3
popd
| cdcarter/Cumulus | scripts/make_clean_slds_resource.sh | Shell | bsd-3-clause | 1,830 |
#!/bin/sh
cd $(dirname $0)
node -pe "'Name: ' + require('../index').getPlatform().name"
node -pe "'Version: ' + require('../index').getPlatform().bonescript"
node -pe "require('../index').digitalRead('P8_19')"
| beagleboard/bonescript | test/TODO/basic_sanity.sh | Shell | mit | 210 |
#! /bin/sh
PATH="$(dirname "$0")/../Resources":$PATH
gorunner goimports
| ascarter/Go.bbpackage | src/Text Filters/goimports.sh | Shell | mit | 73 |
#!/bin/sh
cp package.json import_today_ext/package.json.backup
cp import_today_ext/ta.empty.package.json package.json
ionic state reset
cordova plugin add https://github.com/DavidStrausz/cordova-plugin-today-widget.git
cp config.xml import_today_ext/config.xml.backup
cp import_today_ext/ta.import.config.xml config.xml
cordova platform rm ios;cordova platform add ios
#cordova plugin rm cordova-plugin-today-widget
#cp -a ../ta.ios/TodayAir/*.lproj platforms/ios/TodayAir/
#mv import_today_ext/package.json.backup package.json
#mv import_today_ext/config.xml.backup config.xml
#open platforms/ios/TodayAir.xcodeproj
| kimalec/TodayWeather | client/import_today_ext/ta.run.sh | Shell | mit | 617 |
#! /usr/bin/env bash
#--------------------
# Standard options:
export COMMON_FF_CFG_FLAGS=
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --prefix=PREFIX"
# Licensing options:
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-gpl"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-version3"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-nonfree"
# Configuration options:
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-static"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-shared"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-small"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-runtime-cpudetect"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-gray"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-swscale-alpha"
# Program options:
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-programs"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-ffmpeg"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-ffplay"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-ffprobe"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-ffserver"
# Documentation options:
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-doc"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-htmlpages"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-manpages"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-podpages"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-txtpages"
# Component options:
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-avdevice"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-avcodec"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-avformat"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-avutil"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-swresample"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-swscale"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-postproc"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-avfilter"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-avresample"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-pthreads"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-w32threads"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-os2threads"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-network"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-dct"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-dwt"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-lsp"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-lzo"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-mdct"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-rdft"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-fft"
# Hardware accelerators:
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-dxva2"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-vaapi"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-vda"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-vdpau"
# Individual component options:
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-everything"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-encoders"
# ./configure --list-decoders
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-decoders"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=aac"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=aac_latm"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=ac3"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=flv"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=h263"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=h263i"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=h263p"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=h264"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=mp3*"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=vc1"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=vorbis"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=vp6"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=vp6a"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=vp6f"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=vp8"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-decoder=webp"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-hwaccels"
# ./configure --list-muxers
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-muxers"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-muxer=mpegts"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-muxer=mp4"
# ./configure --list-demuxers
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-demuxers"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=aac"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=ac3"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=concat"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=data"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=flv"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=hls"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=latm"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=live_flv"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=loas"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=m4v"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=mov"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=mp3"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=mpegps"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=mpegts"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-demuxer=mpegvideo"
# ./configure --list-parsers
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-parsers"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=aac"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=aac_latm"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=ac3"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=h263"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=h264"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=vc1"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=vorbis"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=vp8"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-parser=vp9"
# ./configure --list-bsf
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-bsfs"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-bsf=mjpeg2jpeg"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-bsf=mjpeg2jpeg"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-bsf=mjpega_dump_header"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-bsf=mov2textsub"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-bsf=text2movsub"
# ./configure --list-protocols
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-protocols"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=bluray"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=ffrtmpcrypt"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-protocol=ffrtmphttp"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=gopher"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=librtmp*"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=libssh"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=mmsh"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=mmst"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=pipe"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=rtmp*"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-protocol=rtmp"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-protocol=rtmpt"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=rtp"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=sctp"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=srtp"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-protocol=unix"
#
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-devices"
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-filters"
# External library support:
export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-iconv"
# ...
# Advanced options (experts only):
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --cross-prefix=${FF_CROSS_PREFIX}-"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-cross-compile"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --sysroot=PATH"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --sysinclude=PATH"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --target-os=TAGET_OS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --target-exec=CMD"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --target-path=DIR"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --toolchain=NAME"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --nm=NM"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --ar=AR"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --as=AS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --yasmexe=EXE"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --cc=CC"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --cxx=CXX"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --dep-cc=DEPCC"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --ld=LD"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --host-cc=HOSTCC"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --host-cflags=HCFLAGS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --host-cppflags=HCPPFLAGS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --host-ld=HOSTLD"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --host-ldflags=HLDFLAGS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --host-libs=HLIBS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --host-os=OS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --extra-cflags=ECFLAGS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --extra-cxxflags=ECFLAGS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --extra-ldflags=ELDFLAGS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --extra-libs=ELIBS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --extra-version=STRING"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --optflags=OPTFLAGS"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --build-suffix=SUFFIX"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --malloc-prefix=PREFIX"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --progs-suffix=SUFFIX"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --arch=ARCH"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --cpu=CPU"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-pic"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-sram"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-thumb"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-symver"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-hardcoded-tables"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-safe-bitstream-reader"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-memalign-hack"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-lto"
# Optimization options (experts only):
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-asm"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-altivec"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-amd3dnow"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-amd3dnowext"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-mmx"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-mmxext"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-sse"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-sse2"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-sse3"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-ssse3"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-sse4"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-sse42"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-avx"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-fma4"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-armv5te"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-armv6"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-armv6t2"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-vfp"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-neon"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-vis"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-inline-asm"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-yasm"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-mips32r2"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-mipsdspr1"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-mipsdspr2"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-mipsfpu"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-fast-unaligned"
# Developer options (useful when working on FFmpeg itself):
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-coverage"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-debug"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-debug=LEVEL"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-optimizations"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-extra-warnings"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-stripping"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --assert-level=level"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-memory-poisoning"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --valgrind=VALGRIND"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-ftrapv"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --samples=PATH"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-xmm-clobber-test"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-random"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-random"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --enable-random=LIST"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --disable-random=LIST"
# export COMMON_FF_CFG_FLAGS="$COMMON_FF_CFG_FLAGS --random-seed=VALUE"
| abirjepatil/iOS | config/module-lite.sh | Shell | gpl-2.0 | 15,079 |
#!/bin/bash
# testsuite for testlib
if [ -n "$PBUILDER_CHECKOUT" ]; then
. "$PBUILDER_CHECKOUT/testlib.sh"
else
# these currently don't need to be exported
PBUILDER_TEST_ROOT="${PBUILDER_ROOT:-}"
PBUILDER_TEST_PKGLIBDIR="${PBUILDER_PKGLIBDIR:-$PBUILDER_ROOT/usr/lib/pbuilder}"
. "$PBUILDER_TEST_PKGLIBDIR/testlib.sh"
fi
test_success() {
exit 0
}
test_fail() {
exit 1
}
test_options() {
echo "$@"
exit 1
}
test_output() {
echo "$@"
}
expect_success test_success
expect_fail test_fail
expect_fail test_options "hello world"
expect_output "foo bar" test_output "foo" "bar"
testlib_summary
| chensuchun/pbuilder | test_testlib.sh | Shell | gpl-2.0 | 632 |
#!/bin/sh
TEST_SCRIPT=./VMake/executableTester.sh
until test -r ${TEST_SCRIPT} ; do
TEST_SCRIPT=../${TEST_SCRIPT}
done
. ${TEST_SCRIPT}
runAndHandleSystemTest "testComponentMetadata " "$0" "$@"
| bmi-forum/bmi-pyre | StGermain/Base/Automation/tests/testComponentMetadata.0of1.sh | Shell | gpl-2.0 | 204 |
#!/bin/bash
#================
# FILE : config.sh
#----------------
# PROJECT : openSUSE KIWI Image System
# COPYRIGHT : (c) 2006 SUSE LINUX Products GmbH. All rights reserved
# :
# AUTHOR : Marcus Schaefer <ms@suse.de>
# :
# BELONGS TO : Operating System images
# :
# DESCRIPTION : configuration script for SUSE based
# : operating systems
# :
# :
# STATUS : BETA
#----------------
#======================================
# Functions...
#--------------------------------------
test -f /.kconfig && . /.kconfig
test -f /.profile && . /.profile
#======================================
# Greeting...
#--------------------------------------
echo "Configure image: [$kiwi_iname]..."
#======================================
# Setup baseproduct link
#--------------------------------------
suseSetupProduct
#======================================
# Activate services
#--------------------------------------
suseActivateDefaultServices
suseInsertService boot.device-mapper
#======================================
# SuSEconfig
#--------------------------------------
suseConfig
#======================================
# Umount kernel filesystems
#--------------------------------------
baseCleanMount
exit 0
| rjschwei/kiwi | doc/examples/suse-13.1/suse-vm-guest/config.sh | Shell | gpl-2.0 | 1,327 |
#!/bin/sh
# Make sure cp --attributes-only doesn't truncate existing data
# Copyright 2012-2014 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
printf '1' > file1
printf '2' > file2
printf '2' > file2.exp
cp --attributes-only file1 file2 || fail=1
cmp file2 file2.exp || fail=1
Exit $fail
| Distrotech/coreutils | tests/cp/attr-existing.sh | Shell | gpl-3.0 | 975 |
#!/bin/bash
#
# Build file to set up and run tests
set -ex
# Install Bazel 4.0.0.
use_bazel.sh 4.0.0
bazel version
# Print bazel testlogs to stdout when tests failed.
function print_test_logs {
# TODO(yannic): Only print logs of failing tests.
testlogs_dir=$(bazel info bazel-testlogs)
testlogs=$(find "${testlogs_dir}" -name "*.log")
for log in $testlogs; do
cat "${log}"
done
}
# Change to repo root
cd $(dirname $0)/../../..
git submodule update --init --recursive
# Disabled for now, re-enable if appropriate.
# //:build_files_updated_unittest \
trap print_test_logs EXIT
bazel test -k --copt=-Werror --host_copt=-Werror \
//java:tests \
//:protoc \
//:protobuf \
//:protobuf_python \
//:protobuf_test \
@com_google_protobuf//:cc_proto_blacklist_test
trap - EXIT
pushd examples
bazel build //...
popd
# Verify that we can build successfully from generated tar files.
./autogen.sh && ./configure && make -j$(nproc) dist
DIST=`ls *.tar.gz`
tar -xf $DIST
cd ${DIST//.tar.gz}
bazel build //:protobuf //:protobuf_java
| grpc/grpc-ios | native_src/third_party/protobuf/kokoro/linux/bazel/build.sh | Shell | apache-2.0 | 1,053 |
#!/bin/sh
#/**
# @@@ START COPYRIGHT @@@
#
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
#
# @@@ END COPYRIGHT @@@
# */
# This is used for starting multiple servers on the same machine.
# run it from 'bin/wms'
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin" >/dev/null && pwd`
if [ $# -lt 2 ]; then
S=`basename "${BASH_SOURCE-$0}"`
echo "Usage: $S [start|stop] offset(s)"
echo ""
echo " e.g. $S start 1 2"
exit
fi
export WMS_SERVER_OPTS=" "
run_server () {
DN=$2
export WMS_IDENT_STRING="$USER-$DN"
"$bin"/wms-daemon.sh $1 server
}
cmd=$1
shift;
for i in $*
do
run_server $cmd $i
done
| apache/incubator-trafodion | wms/bin/local-servers.sh | Shell | apache-2.0 | 1,338 |
rm *.so
export PATH=../../../../scripts:$PATH
export PYTHONPATH=../../../..
sed 's/vsum/sum/' sum0.py > ssum0.py
python -m timeit -s 'from ssum0 import sum0 as s; import numpy as np ; r = np.random.rand(1000000)' 's(r)'
rm -f ssum0.py
pythran -O2 sum0.py
python -m timeit -s 'from sum0 import sum0 as s; import numpy as np ; r = np.random.rand(1000000)' 's(r)'
pythran -O2 -DUSE_BOOST_SIMD -march=native sum0.py
python -m timeit -s 'from sum0 import sum0 as s; import numpy as np ; r = np.random.rand(1000000)' 's(r)'
| serge-sans-paille/pythran | docs/papers/wpmvp14/experiments/run_xp_sum0.sh | Shell | bsd-3-clause | 518 |
#! /bin/sh
# Check that compressed modules work correctly; based on 02simple.sh
[ -n "$CONFIG_HAVE_ZLIB" ] || exit 0
for ENDIAN in $TEST_ENDIAN; do
for BITNESS in $TEST_BITS; do
rm -rf tests/tmp/*
# Copy modules instead of linking, so we can compress them
MODULE_DIR=tests/tmp/lib/modules/$MODTEST_UNAME
mkdir -p $MODULE_DIR
cp tests/data/$BITNESS$ENDIAN/normal/export_dep-$BITNESS.ko \
tests/data/$BITNESS$ENDIAN/normal/noexport_dep-$BITNESS.ko \
tests/data/$BITNESS$ENDIAN/normal/export_nodep-$BITNESS.ko \
tests/data/$BITNESS$ENDIAN/normal/noexport_nodep-$BITNESS.ko \
tests/data/$BITNESS$ENDIAN/normal/noexport_doubledep-$BITNESS.ko \
$MODULE_DIR
gzip `find $MODULE_DIR -name '*.ko'`
touch tests/tmp/empty
# First time, there is no modules.dep, so it will be generated.
# Expect no output.
[ "`depmod -A 2>&1`" = "" ]
# Check modules.dep results: expect 5 lines
[ `grep -vc '^#' < $MODULE_DIR/modules.dep` = 5 ]
[ "`grep -w export_dep-$BITNESS.ko.gz: $MODULE_DIR/modules.dep`" = "export_dep-$BITNESS.ko.gz: export_nodep-$BITNESS.ko.gz" ]
[ "`grep -w noexport_dep-$BITNESS.ko.gz: $MODULE_DIR/modules.dep`" = "noexport_dep-$BITNESS.ko.gz: export_nodep-$BITNESS.ko.gz" ]
[ "`grep -w export_nodep-$BITNESS.ko.gz: $MODULE_DIR/modules.dep`" = "export_nodep-$BITNESS.ko.gz:" ]
[ "`grep -w noexport_nodep-$BITNESS.ko.gz: $MODULE_DIR/modules.dep`" = "noexport_nodep-$BITNESS.ko.gz:" ]
[ "`grep -w noexport_doubledep-$BITNESS.ko.gz: $MODULE_DIR/modules.dep`" = "noexport_doubledep-$BITNESS.ko.gz: export_dep-$BITNESS.ko.gz export_nodep-$BITNESS.ko.gz" ]
# Check modules.symbols results: expect 3 lines
[ `grep -vc '^#' < $MODULE_DIR/modules.symbols` = 3 ]
[ "`grep -w symbol:exported1 $MODULE_DIR/modules.symbols`" = "alias symbol:exported1 export_nodep_$BITNESS" ]
[ "`grep -w symbol:exported2 $MODULE_DIR/modules.symbols`" = "alias symbol:exported2 export_nodep_$BITNESS" ]
[ "`grep -w symbol:exported3 $MODULE_DIR/modules.symbols`" = "alias symbol:exported3 export_dep_$BITNESS" ]
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
# Synonyms
[ "`depmod $MODTEST_UNAME`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -a`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -a $MODTEST_UNAME`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -A`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -A $MODTEST_UNAME`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -e -F /empty -A`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -e -F /empty -A $MODTEST_VERSION`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod --all`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod --quick`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -e -F /empty --quick`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod -e -F /empty --quick $MODTEST_VERSION`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod --errsyms -F /empty --quick`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
[ "`depmod --errsyms -F /empty --quick $MODTEST_VERSION`" = "" ]
diff -u $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.dep >/dev/null
mv $MODULE_DIR/modules.dep $MODULE_DIR/modules.dep.old
diff -u $MODULE_DIR/modules.symbols.old $MODULE_DIR/modules.symbols >/dev/null
mv $MODULE_DIR/modules.symbols $MODULE_DIR/modules.symbols.old
# Combined should form stdout versions.
grep -vh '^#' $MODULE_DIR/modules.dep.old $MODULE_DIR/modules.symbols.old > $MODULE_DIR/modules.all.old
# Stdout versions.
depmod -n | grep -v '^#' > $MODULE_DIR/modules.all
diff -u $MODULE_DIR/modules.all.old $MODULE_DIR/modules.all >/dev/null
mv $MODULE_DIR/modules.all $MODULE_DIR/modules.all.old
depmod -a -n | grep -v '^#' > $MODULE_DIR/modules.all
diff -u $MODULE_DIR/modules.all.old $MODULE_DIR/modules.all >/dev/null
mv $MODULE_DIR/modules.all $MODULE_DIR/modules.all.old
depmod -n -a $MODTEST_VERSION | grep -v '^#' > $MODULE_DIR/modules.all
diff -u $MODULE_DIR/modules.all.old $MODULE_DIR/modules.all >/dev/null
mv $MODULE_DIR/modules.all $MODULE_DIR/modules.all.old
depmod -e -F /empty -n -A $MODTEST_VERSION | grep -v '^#' > $MODULE_DIR/modules.all
diff -u $MODULE_DIR/modules.all.old $MODULE_DIR/modules.all >/dev/null
mv $MODULE_DIR/modules.all $MODULE_DIR/modules.all.old
done
done
| pexip/os-module-init-tools | tests/test-depmod/12compressed.sh | Shell | gpl-2.0 | 7,085 |
$ cd ./cassandra/data/data/catalogkeyspace/journal-296a2d30c22a11e9b1350d927649052c/snapshots && ls -l
| belliottsmith/cassandra | doc/modules/cassandra/examples/BASH/find_two_snapshots.sh | Shell | apache-2.0 | 103 |
MACHINE=
SCRIPT_NAME=elf
OUTPUT_FORMAT="elf32-littlearm"
BIG_OUTPUT_FORMAT="elf32-bigarm"
LITTLE_OUTPUT_FORMAT="elf32-littlearm"
TEXT_START_ADDR=0x8000
TEMPLATE_NAME=elf32
EXTRA_EM_FILE=armelf
OTHER_TEXT_SECTIONS='*(.glue_7t) *(.glue_7) *(.vfp11_veneer)'
OTHER_BSS_SYMBOLS='__bss_start__ = .;'
OTHER_BSS_END_SYMBOLS='_bss_end__ = . ; __bss_end__ = . ;'
OTHER_END_SYMBOLS='__end__ = . ;'
OTHER_SECTIONS='.note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }'
ATTRS_SECTIONS='.ARM.attributes 0 : { KEEP (*(.ARM.attributes)) KEEP (*(.gnu.attributes)) }'
OTHER_READONLY_SECTIONS="
.ARM.extab ${RELOCATING-0} : { *(.ARM.extab${RELOCATING+* .gnu.linkonce.armextab.*}) }
${RELOCATING+ __exidx_start = .; }
.ARM.exidx ${RELOCATING-0} : { *(.ARM.exidx${RELOCATING+* .gnu.linkonce.armexidx.*}) }
${RELOCATING+ __exidx_end = .; }"
DATA_START_SYMBOLS='__data_start = . ;';
GENERATE_SHLIB_SCRIPT=yes
ARCH=arm
MACHINE=
MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
ENTRY=_start
EMBEDDED=yes
# This sets the stack to the top of the simulator memory (2^19 bytes).
STACK_ADDR=0x80000
# ARM does not support .s* sections.
NO_SMALL_DATA=yes
| seank/FreeScale-s12x-binutils-jm | ld/emulparams/armelf.sh | Shell | gpl-2.0 | 1,130 |
#!/bin/sh
#
# This test is for checking network interface
# For the moment it tests only ethernet interface (but wifi could be easily added)
#
# We assume that all network driver are loaded
# if not they probably have failed earlier in the boot process and their logged error will be catched by another test
#
# this function will try to up the interface
# if already up, nothing done
# arg1: network interface name
kci_net_start()
{
netdev=$1
ip link show "$netdev" |grep -q UP
if [ $? -eq 0 ];then
echo "SKIP: $netdev: interface already up"
return 0
fi
ip link set "$netdev" up
if [ $? -ne 0 ];then
echo "FAIL: $netdev: Fail to up interface"
return 1
else
echo "PASS: $netdev: set interface up"
NETDEV_STARTED=1
fi
return 0
}
# this function will try to setup an IP and MAC address on a network interface
# Doing nothing if the interface was already up
# arg1: network interface name
kci_net_setup()
{
netdev=$1
# do nothing if the interface was already up
if [ $NETDEV_STARTED -eq 0 ];then
return 0
fi
MACADDR='02:03:04:05:06:07'
ip link set dev $netdev address "$MACADDR"
if [ $? -ne 0 ];then
echo "FAIL: $netdev: Cannot set MAC address"
else
ip link show $netdev |grep -q "$MACADDR"
if [ $? -eq 0 ];then
echo "PASS: $netdev: set MAC address"
else
echo "FAIL: $netdev: Cannot set MAC address"
fi
fi
#check that the interface did not already have an IP
ip address show "$netdev" |grep '^[[:space:]]*inet'
if [ $? -eq 0 ];then
echo "SKIP: $netdev: already have an IP"
return 0
fi
# TODO what ipaddr to set ? DHCP ?
echo "SKIP: $netdev: set IP address"
return 0
}
# test an ethtool command
# arg1: return code for not supported (see ethtool code source)
# arg2: summary of the command
# arg3: command to execute
kci_netdev_ethtool_test()
{
if [ $# -le 2 ];then
echo "SKIP: $netdev: ethtool: invalid number of arguments"
return 1
fi
$3 >/dev/null
ret=$?
if [ $ret -ne 0 ];then
if [ $ret -eq "$1" ];then
echo "SKIP: $netdev: ethtool $2 not supported"
else
echo "FAIL: $netdev: ethtool $2"
return 1
fi
else
echo "PASS: $netdev: ethtool $2"
fi
return 0
}
# test ethtool commands
# arg1: network interface name
kci_netdev_ethtool()
{
netdev=$1
#check presence of ethtool
ethtool --version 2>/dev/null >/dev/null
if [ $? -ne 0 ];then
echo "SKIP: ethtool not present"
return 1
fi
TMP_ETHTOOL_FEATURES="$(mktemp)"
if [ ! -e "$TMP_ETHTOOL_FEATURES" ];then
echo "SKIP: Cannot create a tmp file"
return 1
fi
ethtool -k "$netdev" > "$TMP_ETHTOOL_FEATURES"
if [ $? -ne 0 ];then
echo "FAIL: $netdev: ethtool list features"
rm "$TMP_ETHTOOL_FEATURES"
return 1
fi
echo "PASS: $netdev: ethtool list features"
#TODO for each non fixed features, try to turn them on/off
rm "$TMP_ETHTOOL_FEATURES"
kci_netdev_ethtool_test 74 'dump' "ethtool -d $netdev"
kci_netdev_ethtool_test 94 'stats' "ethtool -S $netdev"
return 0
}
# stop a netdev
# arg1: network interface name
kci_netdev_stop()
{
netdev=$1
if [ $NETDEV_STARTED -eq 0 ];then
echo "SKIP: $netdev: interface kept up"
return 0
fi
ip link set "$netdev" down
if [ $? -ne 0 ];then
echo "FAIL: $netdev: stop interface"
return 1
fi
echo "PASS: $netdev: stop interface"
return 0
}
# run all test on a netdev
# arg1: network interface name
kci_test_netdev()
{
NETDEV_STARTED=0
IFACE_TO_UPDOWN="$1"
IFACE_TO_TEST="$1"
#check for VLAN interface
MASTER_IFACE="$(echo $1 | cut -d@ -f2)"
if [ ! -z "$MASTER_IFACE" ];then
IFACE_TO_UPDOWN="$MASTER_IFACE"
IFACE_TO_TEST="$(echo $1 | cut -d@ -f1)"
fi
NETDEV_STARTED=0
kci_net_start "$IFACE_TO_UPDOWN"
kci_net_setup "$IFACE_TO_TEST"
kci_netdev_ethtool "$IFACE_TO_TEST"
kci_netdev_stop "$IFACE_TO_UPDOWN"
return 0
}
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
exit 0
fi
ip link show 2>/dev/null >/dev/null
if [ $? -ne 0 ];then
echo "SKIP: Could not run test without the ip tool"
exit 0
fi
TMP_LIST_NETDEV="$(mktemp)"
if [ ! -e "$TMP_LIST_NETDEV" ];then
echo "FAIL: Cannot create a tmp file"
exit 1
fi
ip link show |grep '^[0-9]' | grep -oE '[[:space:]].*eth[0-9]*:|[[:space:]].*enp[0-9]s[0-9]:' | cut -d\ -f2 | cut -d: -f1> "$TMP_LIST_NETDEV"
while read netdev
do
kci_test_netdev "$netdev"
done < "$TMP_LIST_NETDEV"
rm "$TMP_LIST_NETDEV"
exit 0
| uoaerg/linux-dccp | tools/testing/selftests/net/netdevice.sh | Shell | gpl-2.0 | 4,346 |
#!/bin/sh
TZ=GMT export TZ
enckey=0x4043434545464649494a4a4c4c4f4f515152525454575758
authkey=0x87658765876587658765876587658765
ipsec spi --del --af inet --edst 205.150.200.252 --spi 0x12345678 --proto esp
ipsec spi --del --af inet --edst 205.150.200.252 --spi 0x12345678 --proto tun
ipsec spi --af inet --edst 205.150.200.252 --spi 0x12345678 --proto esp --src 205.150.200.232 --esp 3des-md5-96 --enckey $enckey --authkey $authkey
ipsec spi --af inet --edst 205.150.200.252 --spi 0x12345678 --proto tun --src 205.150.200.232 --dst 205.150.200.252 --ip4
ipsec spigrp inet 205.150.200.252 0x12345678 tun inet 205.150.200.252 0x12345678 esp
ipsec eroute --del --eraf inet --src 205.150.200.163/32 --dst 205.150.200.252/32
ipsec eroute --add --eraf inet --src 205.150.200.163/32 --dst 205.150.200.252/32 --said tun0x12345678@205.150.200.252
# magic route command
ip route add 205.150.200.252 via 205.150.200.238 src 205.150.200.163 dev ipsec0
| qianguozheng/Openswan | testing/kunit/xmit-02/testspi1.sh | Shell | gpl-2.0 | 951 |
#!/bin/bash
# usage:
# api-expect-exec-success.sh ID [message]
# tests an execution status is succeeded
execid="$1"
shift
expectstatus=${1:-succeeded}
shift
# arg to include.sh
set -- -
DIR=$(cd `dirname $0` && pwd)
source $DIR/include.sh
# now submit req
runurl="${APIURL}/execution/${execid}"
params=""
# get listing
docurl ${runurl}?${params} > $DIR/curl.out
if [ 0 != $? ] ; then
errorMsg "ERROR: failed query request ${runurl}?${params}"
exit 2
fi
$SHELL $SRC_DIR/api-test-success.sh $DIR/curl.out || (echo "${runurl}?${params}"; exit 2)
#Check projects list
itemcount=$($XMLSTARLET sel -T -t -v "/result/executions/@count" $DIR/curl.out)
assert "1" "$itemcount" "execution count should be 1"
status=$($XMLSTARLET sel -T -t -v "//execution[@id=$execid]/@status" $DIR/curl.out)
assert "$expectstatus" "$status" "execution status should be succeeded"
exit 0 | tjordanchat/rundeck | test/api/api-expect-exec-success.sh | Shell | apache-2.0 | 881 |
#!/bin/bash
# Aruments
# 1 - run list
# 2 - start run
# 3 - end run
runList=$1
startRun=$2
endRun=$3
echo runList=$runList
echo startRun=$startRun
echo endRun=$endRun
#
workdir=${GUI_OUTDIR}/tmp/tmp${startRun}-${endRun}
backupdir=`pwd`/
mkdirhier $workdir
cp $runList $workdir
cd $workdir
source guiEnv.sh
source $ALICE_ROOT/TPC/scripts/halloWorld.sh
#
aliroot -q -b $SCRIPTDIR/ConfigOCDB.C\($2\) $SCRIPTDIR/CalibEnv.C+\(\"$runList\",$startRun,$endRun\)
echo End of job:
echo pwd=`pwd`
echo ls=
ls -alrt
echo cp dcsTime.root $GUI_OUTDIR/time/calibTreeTime_$startRun_$endRun.root
cp dcsTime.root $GUI_OUTDIR/time/calibTreeTime_$startRun_$endRun.root
cd $backupdir
| mkrzewic/AliRoot | TPC/scripts/OCDBscan/makeCalibTree.sh | Shell | bsd-3-clause | 675 |
################################################################################
# Bug #1174314
# Test xtrabackup --stats with server dir
################################################################################
. inc/common.sh
logdir=${TEST_VAR_ROOT}/logs
mkdir $logdir
MYSQLD_EXTRA_MY_CNF_OPTS="
innodb_log_group_home_dir=$logdir
"
start_server
run_cmd $MYSQL $MYSQL_ARGS test <<EOF
CREATE TABLE t1(a INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
EOF
shutdown_server
# there is inconsistency between shutdown_server and stop_server
# stop_server sets XB_ARGS="--no-defaults", while shutdown_server
# doesn't.
# we pass all necessary options as an arguments, so if someday this
# will be changed, test still will work
xtrabackup --stats --datadir=${MYSQLD_DATADIR} \
--innodb_log_group_home_dir=$logdir
vlog "stats did not fail"
| janlindstrom/percona-xtrabackup | storage/innobase/xtrabackup/test/t/xb_stats_datadir.sh | Shell | gpl-2.0 | 864 |
#! /bin/sh
# $1 is expected to be $TRAVIS_OS_NAME
./Configure dist
if [ "$1" == osx ]; then
make NAME='_srcdist' TARFILE='_srcdist.tar' \
TAR_COMMAND='$(TAR) $(TARFLAGS) -cvf -' tar
else
make TARFILE='_srcdist.tar' NAME='_srcdist' dist
fi
| openweave/openweave-core | third_party/openssl/openssl/.travis-create-release.sh | Shell | apache-2.0 | 258 |
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
TITLE=ConcurrentWriteACLTest
BENCH="ConcurrentWriteACLTest"
ADMIN="true"
RUNTIME=10
RANDOM_USER="true"
FIXS="Oak-Mongo" # Jackrabbit"
THREADS="1,2,4,8,10,15,20,50"
PROFILE=false
NUM_ITEMS=10
LOG=$TITLE"_$(date +'%Y%m%d_%H%M%S').csv"
echo "Benchmarks: $BENCH" > $LOG
echo "Fixtures: $FIXS" >> $LOG
echo "Admin User: $ADMIN" >> $LOG
echo "Runtime: $RUNTIME" >> $LOG
echo "Num Items: $NUM_ITEMS" >> $LOG
echo "Concurrency: $THREADS" >> $LOG
echo "Random User: $RANDOM_USER" >> $LOG
echo "Profiling: $PROFILE" >> $LOG
echo "--------------------------------------" >> $LOG
for bm in $BENCH
do
for user in $ADMIN
do
# we start new VMs for each fixture to minimize memory impacts between them
for fix in $FIXS
do
echo "Executing benchmarks as admin: $user on $fix" | tee -a $LOG
echo "-----------------------------------------------------------" | tee -a $LOG
rm -rf target/Jackrabbit-* target/Oak-Tar-*
cmd="java -Xmx2048m -Dprofile=$PROFILE -Druntime=$RUNTIME -Dwarmup=5 -jar target/oak-run-*-SNAPSHOT.jar benchmark --itemsToRead $NUM_ITEMS --csvFile $LOG --concurrency $THREADS --runAsAdmin $user --report false --randomUser $RANDOM_USER $bm $fix"
echo $cmd
$cmd
done
done
done
echo "-----------------------------------------"
echo "Benchmark completed. see $LOG for details:"
cat $LOG
| Kast0rTr0y/jackrabbit-oak | oak-run/run_writeacl.sh | Shell | apache-2.0 | 2,200 |
# cleanup
for node in druid-historical druid-coordinator druid-overlord druid-router druid-broker druid-middlemanager druid-zookeeper-kafka druid-metadata-storage;
do
docker stop $node
docker rm $node
done
# environment variables
DIR=$(cd $(dirname $0) && pwd)
DOCKERDIR=$DIR/docker
SHARED_DIR=${HOME}/shared
SUPERVISORDIR=/usr/lib/druid/conf
RESOURCEDIR=$DIR/src/test/resources
# so docker IP addr will be known during docker build
echo $DOCKER_IP > $DOCKERDIR/docker_ip
# Make directories if they dont exist
mkdir -p $SHARED_DIR/logs
mkdir -p $SHARED_DIR/tasklogs
# install druid jars
rm -rf $SHARED_DIR/docker
cp -R docker $SHARED_DIR/docker
mvn -B dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib
# Build Druid Cluster Image
docker build -t druid/cluster $SHARED_DIR/docker
# Start zookeeper and kafka
docker run -d --name druid-zookeeper-kafka -p 2181:2181 -p 9092:9092 -v $SHARED_DIR:/shared -v $DOCKERDIR/zookeeper.conf:$SUPERVISORDIR/zookeeper.conf -v $DOCKERDIR/kafka.conf:$SUPERVISORDIR/kafka.conf druid/cluster
# Start MYSQL
docker run -d --name druid-metadata-storage -v $SHARED_DIR:/shared -v $DOCKERDIR/metadata-storage.conf:$SUPERVISORDIR/metadata-storage.conf druid/cluster
# Start Overlord
docker run -d --name druid-overlord -p 8090:8090 -v $SHARED_DIR:/shared -v $DOCKERDIR/overlord.conf:$SUPERVISORDIR/overlord.conf --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Coordinator
docker run -d --name druid-coordinator -p 8081:8081 -v $SHARED_DIR:/shared -v $DOCKERDIR/coordinator.conf:$SUPERVISORDIR/coordinator.conf --link druid-overlord:druid-overlord --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Historical
docker run -d --name druid-historical -v $SHARED_DIR:/shared -v $DOCKERDIR/historical.conf:$SUPERVISORDIR/historical.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
# Start Middlemanger
docker run -d --name druid-middlemanager -p 8100:8100 -p 8101:8101 -p 8102:8102 -p 8103:8103 -p 8104:8104 -p 8105:8105 -v $RESOURCEDIR:/resources -v $SHARED_DIR:/shared -v $DOCKERDIR/middlemanager.conf:$SUPERVISORDIR/middlemanager.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-overlord:druid-overlord druid/cluster
# Start Broker
docker run -d --name druid-broker -p 8082:8082 -v $SHARED_DIR:/shared -v $DOCKERDIR/broker.conf:$SUPERVISORDIR/broker.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-middlemanager:druid-middlemanager --link druid-historical:druid-historical druid/cluster
# Start Router
docker run -d --name druid-router -p 8888:8888 -v $SHARED_DIR:/shared -v $DOCKERDIR/router.conf:$SUPERVISORDIR/router.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
| erikdubbelboer/druid | integration-tests/run_cluster.sh | Shell | apache-2.0 | 2,942 |
#!/bin/sh
test_description='CRLF renormalization'
TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
test_expect_success setup '
git config core.autocrlf false &&
printf "LINEONE\nLINETWO\nLINETHREE\n" >LF.txt &&
printf "LINEONE\r\nLINETWO\r\nLINETHREE\r\n" >CRLF.txt &&
printf "LINEONE\r\nLINETWO\nLINETHREE\n" >CRLF_mix_LF.txt &&
git add . &&
git commit -m initial
'
test_expect_success 'renormalize CRLF in repo' '
echo "*.txt text=auto" >.gitattributes &&
git add --renormalize "*.txt" &&
cat >expect <<-\EOF &&
i/lf w/crlf attr/text=auto CRLF.txt
i/lf w/lf attr/text=auto LF.txt
i/lf w/mixed attr/text=auto CRLF_mix_LF.txt
EOF
git ls-files --eol |
sed -e "s/ / /g" -e "s/ */ /g" |
sort >actual &&
test_cmp expect actual
'
test_expect_success 'ignore-errors not mistaken for renormalize' '
git reset --hard &&
echo "*.txt text=auto" >.gitattributes &&
git ls-files --eol >expect &&
git add --ignore-errors "*.txt" &&
git ls-files --eol >actual &&
test_cmp expect actual
'
test_done
| abg1979/git | t/t0025-crlf-renormalize.sh | Shell | gpl-2.0 | 1,012 |
#!/bin/bash
#
# Copyright (C) 1997-2003 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
export LC_ALL=C
export LANG=C
export PATH=/bin:/sbin:/usr/bin:/usr/sbin
. $(dirname $0)/ocf-shellfuncs
. $(dirname $0)/utils/config-utils.sh
. $(dirname $0)/utils/messages.sh
. $(dirname $0)/utils/ra-skelet.sh
declare PSQL_POSTMASTER="/usr/bin/postmaster"
declare PSQL_CTL="/usr/bin/pg_ctl"
declare PSQL_pid_file="`generate_name_for_pid_file`"
declare PSQL_conf_dir="`generate_name_for_conf_dir`"
declare PSQL_gen_config_file="$PSQL_conf_dir/postgresql.conf"
declare PSQL_kill_timeout="5"
declare PSQL_stop_timeout="15"
if [ -z "$OCF_RESKEY_startup_wait" ]; then
OCF_RESKEY_startup_wait=10
fi
verify_all()
{
clog_service_verify $CLOG_INIT
if [ -z "$OCF_RESKEY_name" ]; then
clog_service_verify $CLOG_FAILED "Invalid Name Of Service"
return $OCF_ERR_ARGS
fi
if [ -z "$OCF_RESKEY_service_name" ]; then
clog_service_verify $CLOG_FAILED_NOT_CHILD
return $OCF_ERR_ARGS
fi
if [ -z "$OCF_RESKEY_config_file" ]; then
clog_check_file_exist $CLOG_FAILED_INVALID "$OCF_RESKEY_config_file"
clog_service_verify $CLOG_FAILED
return $OCF_ERR_ARGS
fi
if [ ! -r "$OCF_RESKEY_config_file" ]; then
clog_check_file_exist $CLOG_FAILED_NOT_READABLE $OCF_RESKEY_config_file
clog_service_verify $CLOG_FAILED
return $OCF_ERR_ARGS
fi
if [ -z "$OCF_RESKEY_postmaster_user" ]; then
clog_servicer_verify $CLOG_FAILED "Invalid User"
return $OCF_ERR_ARGS
fi
clog_service_verify $CLOG_SUCCEED
return 0
}
generate_config_file()
{
declare original_file="$1"
declare generated_file="$2"
declare ip_addressess="$3"
declare ip_comma="";
if [ -f "$generated_file" ]; then
sha1_verify "$generated_file"
if [ $? -ne 0 ]; then
clog_check_sha1 $CLOG_FAILED
return 0
fi
fi
clog_generate_config $CLOG_INIT "$original_file" "$generated_file"
declare x=1
for i in $ip_addressess; do
i=`echo $i | sed -e 's/\/.*$//'`
if [ $x -eq 1 ]; then
x=0
ip_comma=$i
else
ip_comma=$ip_comma,$i
fi
done
generate_configTemplate "$generated_file" "$1"
echo "external_pid_file = '$PSQL_pid_file'" >> "$generated_file"
echo "listen_addresses = '$ip_comma'" >> "$generated_file"
echo >> "$generated_file"
sed 's/^[[:space:]]*external_pid_file/### external_pid_file/i;s/^[[:space:]]*listen_addresses/### listen_addresses/i' < "$original_file" >> "$generated_file"
sha1_addToFile "$generated_file"
clog_generate_config $CLOG_SUCCEED "$original_file" "$generated_file"
return 0;
}
start()
{
declare pguser_group
declare count=0
clog_service_start $CLOG_INIT
create_pid_directory
create_conf_directory "$PSQL_conf_dir"
check_pid_file "$PSQL_pid_file"
if [ $? -ne 0 ]; then
clog_check_pid $CLOG_FAILED "$PSQL_pid_file"
clog_service_start $CLOG_FAILED
return $OCF_ERR_GENERIC
fi
#
# Create an empty PID file for the postgres user and
# change it to be owned by the postgres user so that
# postmaster doesn't complain.
#
pguser_group=`groups $OCF_RESKEY_postmaster_user | cut -f3 -d ' '`
touch $PSQL_pid_file
chown $OCF_RESKEY_postmaster_user.$pguser_group $PSQL_pid_file
clog_looking_for $CLOG_INIT "IP Addresses"
get_service_ip_keys "$OCF_RESKEY_service_name"
ip_addresses=`build_ip_list`
if [ -z "$ip_addresses" ]; then
clog_looking_for $CLOG_FAILED_NOT_FOUND "IP Addresses"
return $OCF_ERR_GENERIC
fi
clog_looking_for $CLOG_SUCCEED "IP Addresses"
generate_config_file "$OCF_RESKEY_config_file" "$PSQL_gen_config_file" "$ip_addresses"
su - "$OCF_RESKEY_postmaster_user" -c "$PSQL_POSTMASTER -c config_file=\"$PSQL_gen_config_file\" \
$OCF_RESKEY_postmaster_options" &> /dev/null &
# We need to sleep briefly to allow pg_ctl to detect that we've started.
# We need to fetch "-D /path/to/pgsql/data" from $OCF_RESKEY_postmaster_options
until [ "$count" -gt "$OCF_RESKEY_startup_wait" ] ||
[ `su - "$OCF_RESKEY_postmaster_user" -c \
"$PSQL_CTL status $OCF_RESKEY_postmaster_options" &> /dev/null; echo $?` = '0' ]
do
sleep 1
let count=$count+1
done
if [ "$count" -gt "$OCF_RESKEY_startup_wait" ]; then
clog_service_start $CLOG_FAILED
return $OCF_ERR_GENERIC
fi
clog_service_start $CLOG_SUCCEED
return 0;
}
stop()
{
clog_service_stop $CLOG_INIT
## Send -INT to close connections and stop. -QUIT is used if -INT signal does not stop process.
stop_generic_sigkill "$PSQL_pid_file" "$PSQL_stop_timeout" "$PSQL_kill_timeout" "-INT"
if [ $? -ne 0 ]; then
clog_service_stop $CLOG_FAILED
return $OCF_ERR_GENERIC
fi
clog_service_stop $CLOG_SUCCEED
return 0;
}
status()
{
clog_service_status $CLOG_INIT
status_check_pid "$PSQL_pid_file"
if [ $? -ne 0 ]; then
clog_service_status $CLOG_FAILED "$PSQL_pid_file"
return $OCF_ERR_GENERIC
fi
clog_service_status $CLOG_SUCCEED
return 0
}
case $1 in
meta-data)
cat `echo $0 | sed 's/^\(.*\)\.sh$/\1.metadata/'`
exit 0
;;
validate-all)
verify_all
exit $?
;;
start)
verify_all && start
exit $?
;;
stop)
verify_all && stop
exit $?
;;
status|monitor)
verify_all
status
exit $?
;;
restart)
verify_all
stop
start
exit $?
;;
*)
echo "Usage: $0 {start|stop|status|monitor|restart|meta-data|validate-all}"
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
| ingted/resource-agents | rgmanager/src/resources/postgres-8.sh | Shell | gpl-2.0 | 6,052 |
#!/bin/bash
# This file is part of the rsyslog project, released under GPLv3
# this test is currently not included in the testbench as libdbi
# itself seems to have a memory leak
echo ===============================================================================
echo \[libdbi-basic.sh\]: basic test for libdbi-basic functionality via mysql
. $srcdir/diag.sh init
mysql --user=rsyslog --password=testbench < testsuites/mysql-truncate.sql
. $srcdir/diag.sh startup-vg-noleak libdbi-basic.conf
. $srcdir/diag.sh injectmsg 0 5000
. $srcdir/diag.sh shutdown-when-empty
. $srcdir/diag.sh wait-shutdown-vg
. $srcdir/diag.sh check-exit-vg
# note "-s" is requried to suppress the select "field header"
mysql -s --user=rsyslog --password=testbench < testsuites/mysql-select-msg.sql > rsyslog.out.log
. $srcdir/diag.sh seq-check 0 4999
. $srcdir/diag.sh exit
| RomeroMalaquias/rsyslog | tests/libdbi-basic-vg.sh | Shell | gpl-3.0 | 852 |
mkdir -p $PREFIX/bin
#Copying perl script to bin folder
cp LINKS $PREFIX/bin
chmod +x $PREFIX/bin/LINKS
#Recompiling C code
cd lib/bloomfilter/swig/
PERL5DIR=`(perl -e 'use Config; print $Config{archlibexp}, "\n";') 2>/dev/null`
swig -Wall -c++ -perl5 BloomFilter.i
g++ -c BloomFilter_wrap.cxx -I$PERL5DIR/CORE -fPIC -Dbool=char -O3
g++ -Wall -shared BloomFilter_wrap.o -o BloomFilter.so -O3
#Installing included perl module
h2xs -n BloomFilter -O -F -'I ../../../'
cd BloomFilter
perl Makefile.PL
make
make install
| dmaticzka/bioconda-recipes | recipes/links/build.sh | Shell | mit | 524 |
# either plugin your device and copy the files via USB-detected-device or use the following method
# when starting an ssh server like sshdroid
URL=192.168.0.102
GH=/sdcard/graphhopper/maps/
# if you install sshdroid you can scp your files to your android device
# wget http://mapsforge.googlecode.com/files/berlin.map
# alternatives: http://download.mapsforge.org/maps/
scp -P 2222 berlin.map root@$URL:$GH
# wget http://download.geofabrik.de/osm/europe/germany/berlin.osm.bz2
# bunzip2 berlin.osm.bz2
# cd ../graphhopper
# ./run.sh /media/SAMSUNG/maps/berlin.osm
scp -r -P 2222 berlin-gh/ root@$URL:$GH | tsammons/SMART-GH | android/scripts/deploy-maps.sh | Shell | apache-2.0 | 607 |
#!/bin/sh
# Package
PACKAGE="lirc"
DNAME="LIRC"
# Others
INSTALL_DIR="/usr/local/${PACKAGE}"
INSTALLER_SCRIPT=`dirname $0`/installer
PATH="${PATH}:${INSTALL_DIR}/bin:/usr/local/bin:/bin:/usr/bin:/usr/syno/bin"
DAEMON="${INSTALL_DIR}/sbin/lircd"
PID_FILE="${INSTALL_DIR}/var/lircd.pid"
CONF_FILE="${INSTALL_DIR}/etc/lirc/lircd.conf"
IREXEC="${INSTALL_DIR}/bin/irexec"
LIRCRC_FILE="${INSTALL_DIR}/etc/lirc/lircrc"
LOG_FILE="${INSTALL_DIR}/var/log/lircd"
VERSION_FILE="${INSTALL_DIR}/etc/DSM_VERSION"
SELECTED_LIRC_DRIVER=@driver@
load_unload_drivers ()
{
case $1 in
load)
case $2 in
mceusb)
insmod ${INSTALL_DIR}/lib/modules/lirc_dev.ko
insmod ${INSTALL_DIR}/lib/modules/lirc_${2}.ko
;;
uirt)
insmod ${INSTALL_DIR}/lib/modules/lirc_dev.ko
insmod /lib/modules/usbserial.ko
insmod /lib/modules/ftdi_sio.ko
stty -F /dev/usb/ttyUSB0 1200 sane evenp parenb cs7 -crtscts
LIRC_STARTUP_PARAMS="--device=/dev/usb/ttyUSB0 --driver=usb_uirt_raw"
;;
uirt2)
insmod ${INSTALL_DIR}/lib/modules/lirc_dev.ko
insmod /lib/modules/usbserial.ko
insmod /lib/modules/ftdi_sio.ko
stty -F /dev/usb/ttyUSB0 1200 sane evenp parenb cs7 -crtscts
LIRC_STARTUP_PARAMS="--device=/dev/usb/ttyUSB0 --driver=uirt2_raw"
;;
irtoy)
# Not yet supported. Here for example only.
;;
*)
# Not yet supported.
;;
esac
;;
unload)
case $2 in
mceusb)
rmmod ${INSTALL_DIR}/lib/modules/lirc_${2}.ko
rmmod ${INSTALL_DIR}/lib/modules/lirc_dev.ko
;;
uirt|uirt2)
rmmod /lib/modules/ftdi_sio.ko
rmmod /lib/modules/usbserial.ko
rmmod ${INSTALL_DIR}/lib/modules/lirc_dev.ko
;;
irtoy)
# Not yet supported. Here for example only.
;;
*)
# Not yet supported.
;;
esac
;;
esac
}
start_daemon ()
{
# Call function to load driver - validation happens inside
load_unload_drivers load $SELECTED_LIRC_DRIVER
${DAEMON} ${LIRC_STARTUP_PARAMS} ${CONF_FILE} --pidfile=${PID_FILE} --logfile=${LOG_FILE}
if [ -e ${LIRCRC_FILE} ]; then
${IREXEC} -d ${LIRCRC_FILE}
fi
}
stop_daemon ()
{
killall irexec >/dev/null 2>&1
if daemon_status; then
echo Stopping ${DNAME} ...
kill `cat ${PID_FILE}`
wait_for_status 1 20 || kill -9 `cat ${PID_FILE}`
else
echo ${DNAME} is not running
exit 0
fi
test -e ${PID_FILE} || rm -f ${PID_FILE}
# Call function to unload driver - validation happens inside
load_unload_drivers unload $SELECTED_LIRC_DRIVER
}
daemon_status ()
{
if [ -f ${PID_FILE} ] && kill -0 `cat ${PID_FILE}` > /dev/null 2>&1; then
return
fi
rm -f ${PID_FILE}
return 1
}
wait_for_status ()
{
counter=$2
while [ ${counter} -gt 0 ]; do
daemon_status
[ $? -eq $1 ] && return
let counter=counter-1
sleep 1
done
return 1
}
check_dsm_version ()
{
if [ -f ${VERSION_FILE} ]; then
diff -qw /etc.defaults/VERSION ${VERSION_FILE} 2>&1 >/dev/null
if [ $? -ne 0 ]; then
echo -n "DSM version has changed, re-running driver setup..."
. ${INSTALLER_SCRIPT}
lirc_install_drivers ${SELECTED_LIRC_DRIVER}
cp /etc.defaults/VERSION ${VERSION_FILE}
echo done.
fi
else
echo "First time starting, capturing DSM version"
cp /etc.defaults/VERSION ${VERSION_FILE}
fi
}
case $1 in
start)
if daemon_status; then
echo ${DNAME} is already running
exit 0
else
# Check if DSM was upgraded
check_dsm_version
echo Starting ${DNAME} ...
start_daemon
exit $?
fi
;;
stop)
stop_daemon
exit $?
;;
restart)
stop_daemon
start_daemon
exit $?
;;
status)
if daemon_status; then
echo ${DNAME} is running
exit 0
else
echo ${DNAME} is not running
exit 1
fi
;;
log)
echo ${LOG_FILE}
;;
driver)
echo ${SELECTED_LIRC_DRIVER}
;;
*)
exit 1
;;
esac
| cdcabrera/spksrc | spk/lirc/src/dsm-control.sh | Shell | bsd-3-clause | 4,844 |
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script performs disaster recovery of etcd from the backup data.
# Assumptions:
# - backup was done using etcdctl command:
# a) in case of etcd2
# $ etcdctl backup --data-dir=<dir>
# produced .snap and .wal files
# b) in case of etcd3
# $ etcdctl --endpoints=<address> snapshot save
# produced .db file
# - version.txt file is in the current directory (if it isn't it will be
# defaulted to "3.0.17/etcd3"). Based on this file, the script will
# decide to which version we are restoring (procedures are different
# for etcd2 and etcd3).
# - in case of etcd2 - *.snap and *.wal files are in current directory
# - in case of etcd3 - *.db file is in the current directory
# - the script is run as root
# - for event etcd, we only support clearing it - to do it, you need to
# set RESET_EVENT_ETCD=true env var.
set -o errexit
set -o nounset
set -o pipefail
# Version file contains information about current version in the format:
# <etcd binary version>/<etcd api mode> (e.g. "3.0.12/etcd3").
#
# If the file doesn't exist we assume "3.0.17/etcd3" configuration is
# the current one and create a file with such configuration.
# The restore procedure is chosen based on this information.
VERSION_FILE="version.txt"
# Make it possible to overwrite version file (or default version)
# with VERSION_CONTENTS env var.
if [ -n "${VERSION_CONTENTS:-}" ]; then
echo "${VERSION_CONTENTS}" > "${VERSION_FILE}"
fi
if [ ! -f "${VERSION_FILE}" ]; then
echo "3.0.17/etcd3" > "${VERSION_FILE}"
fi
VERSION_CONTENTS="$(cat ${VERSION_FILE})"
ETCD_VERSION="$(echo "$VERSION_CONTENTS" | cut -d '/' -f 1)"
ETCD_API="$(echo "$VERSION_CONTENTS" | cut -d '/' -f 2)"
# Name is used only in case of etcd3 mode, to appropriate set the metadata
# for the etcd data.
# NOTE: NAME HAS TO BE EQUAL TO WHAT WE USE IN --name flag when starting etcd.
NAME="${NAME:-etcd-$(hostname)}"
INITIAL_CLUSTER="${INITIAL_CLUSTER:-${NAME}=http://localhost:2380}"
INITIAL_ADVERTISE_PEER_URLS="${INITIAL_ADVERTISE_PEER_URLS:-http://localhost:2380}"
# Port on which etcd is exposed.
etcd_port=2379
event_etcd_port=4002
# Wait until both etcd instances are up
wait_for_etcd_up() {
port=$1
# TODO: As of 3.0.x etcd versions, all 2.* and 3.* versions return
# {"health": "true"} on /health endpoint in healthy case.
# However, we should come with a regex for it to avoid future break.
health_ok="{\"health\": \"true\"}"
for _ in $(seq 120); do
# TODO: Is it enough to look into /health endpoint?
health=$(curl --silent "http://127.0.0.1:${port}/health")
if [ "${health}" == "${health_ok}" ]; then
return 0
fi
sleep 1
done
return 1
}
# Wait until apiserver is up.
wait_for_cluster_healthy() {
for _ in $(seq 120); do
cs_status=$(kubectl get componentstatuses -o template --template='{{range .items}}{{with index .conditions 0}}{{.type}}:{{.status}}{{end}}{{"\n"}}{{end}}') || true
componentstatuses=$(echo "${cs_status}" | grep -c 'Healthy:') || true
healthy=$(echo "${cs_status}" | grep -c 'Healthy:True') || true
if [ "${componentstatuses}" -eq "${healthy}" ]; then
return 0
fi
sleep 1
done
return 1
}
# Wait until etcd and apiserver pods are down.
wait_for_etcd_and_apiserver_down() {
for _ in $(seq 120); do
etcd=$(docker ps | grep -c etcd-server)
apiserver=$(docker ps | grep -c apiserver)
# TODO: Theoretically it is possible, that apiserver and or etcd
# are currently down, but Kubelet is now restarting them and they
# will reappear again. We should avoid it.
if [ "${etcd}" -eq "0" ] && [ "${apiserver}" -eq "0" ]; then
return 0
fi
sleep 1
done
return 1
}
# Move the manifest files to stop etcd and kube-apiserver
# while we swap the data out from under them.
MANIFEST_DIR="/etc/kubernetes/manifests"
MANIFEST_BACKUP_DIR="/etc/kubernetes/manifests-backups"
mkdir -p "${MANIFEST_BACKUP_DIR}"
echo "Moving etcd(s) & apiserver manifest files to ${MANIFEST_BACKUP_DIR}"
# If those files were already moved (e.g. during previous
# try of backup) don't fail on it.
mv "${MANIFEST_DIR}/kube-apiserver.manifest" "${MANIFEST_BACKUP_DIR}" || true
mv "${MANIFEST_DIR}/etcd.manifest" "${MANIFEST_BACKUP_DIR}" || true
mv "${MANIFEST_DIR}/etcd-events.manifest" "${MANIFEST_BACKUP_DIR}" || true
# Wait for the pods to be stopped
echo "Waiting for etcd and kube-apiserver to be down"
if ! wait_for_etcd_and_apiserver_down; then
# Couldn't kill etcd and apiserver.
echo "Downing etcd and apiserver failed"
exit 1
fi
read -rsp $'Press enter when all etcd instances are down...\n'
# Create the sort of directory structure that etcd expects.
# If this directory already exists, remove it.
BACKUP_DIR="/var/tmp/backup"
rm -rf "${BACKUP_DIR}"
if [ "${ETCD_API}" == "etcd2" ]; then
echo "Preparing etcd backup data for restore"
# In v2 mode, we simply copy both snap and wal files to a newly created
# directory. After that, we start etcd with --force-new-cluster option
# that (according to the etcd documentation) is required to recover from
# a backup.
echo "Copying data to ${BACKUP_DIR} and restoring there"
mkdir -p "${BACKUP_DIR}/member/snap"
mkdir -p "${BACKUP_DIR}/member/wal"
# If the cluster is relatively new, there can be no .snap file.
mv ./*.snap "${BACKUP_DIR}/member/snap/" || true
mv ./*.wal "${BACKUP_DIR}/member/wal/"
# TODO(jsz): This won't work with HA setups (e.g. do we need to set --name flag)?
echo "Starting etcd ${ETCD_VERSION} to restore data"
if ! image=$(docker run -d -v ${BACKUP_DIR}:/var/etcd/data \
--net=host -p ${etcd_port}:${etcd_port} \
"k8s.gcr.io/etcd:${ETCD_VERSION}" /bin/sh -c \
"/usr/local/bin/etcd --data-dir /var/etcd/data --force-new-cluster"); then
echo "Docker container didn't started correctly"
exit 1
fi
echo "Container ${image} created, waiting for etcd to report as healthy"
if ! wait_for_etcd_up "${etcd_port}"; then
echo "Etcd didn't come back correctly"
exit 1
fi
# Kill that etcd instance.
echo "Etcd healthy - killing ${image} container"
docker kill "${image}"
elif [ "${ETCD_API}" == "etcd3" ]; then
echo "Preparing etcd snapshot for restore"
mkdir -p "${BACKUP_DIR}"
echo "Copying data to ${BACKUP_DIR} and restoring there"
number_files=$(find . -maxdepth 1 -type f -name "*.db" | wc -l)
if [ "${number_files}" -ne "1" ]; then
echo "Incorrect number of *.db files - expected 1"
exit 1
fi
mv ./*.db "${BACKUP_DIR}/"
snapshot="$(ls ${BACKUP_DIR})"
# Run etcdctl snapshot restore command and wait until it is finished.
# setting with --name in the etcd manifest file and then it seems to work.
if ! docker run -v ${BACKUP_DIR}:/var/tmp/backup --env ETCDCTL_API=3 \
"k8s.gcr.io/etcd:${ETCD_VERSION}" /bin/sh -c \
"/usr/local/bin/etcdctl snapshot restore ${BACKUP_DIR}/${snapshot} --name ${NAME} --initial-cluster ${INITIAL_CLUSTER} --initial-advertise-peer-urls ${INITIAL_ADVERTISE_PEER_URLS}; mv /${NAME}.etcd/member /var/tmp/backup/"; then
echo "Docker container didn't started correctly"
exit 1
fi
rm -f "${BACKUP_DIR}/${snapshot}"
fi
# Also copy version.txt file.
cp "${VERSION_FILE}" "${BACKUP_DIR}"
export MNT_DISK="/mnt/disks/master-pd"
# Save the corrupted data (clean directory if it is already non-empty).
rm -rf "${MNT_DISK}/var/etcd-corrupted"
mkdir -p "${MNT_DISK}/var/etcd-corrupted"
echo "Saving corrupted data to ${MNT_DISK}/var/etcd-corrupted"
mv /var/etcd/data "${MNT_DISK}/var/etcd-corrupted"
# Replace the corrupted data dir with the restored data.
echo "Copying restored data to /var/etcd/data"
mv "${BACKUP_DIR}" /var/etcd/data
if [ "${RESET_EVENT_ETCD:-}" == "true" ]; then
echo "Removing event-etcd corrupted data"
EVENTS_CORRUPTED_DIR="${MNT_DISK}/var/etcd-events-corrupted"
# Save the corrupted data (clean directory if it is already non-empty).
rm -rf "${EVENTS_CORRUPTED_DIR}"
mkdir -p "${EVENTS_CORRUPTED_DIR}"
mv /var/etcd/data-events "${EVENTS_CORRUPTED_DIR}"
fi
# Start etcd and kube-apiserver again.
echo "Restarting etcd and apiserver from restored snapshot"
mv "${MANIFEST_BACKUP_DIR}"/* "${MANIFEST_DIR}/"
rm -rf "${MANIFEST_BACKUP_DIR}"
# Verify that etcd is back.
echo "Waiting for etcd to come back"
if ! wait_for_etcd_up "${etcd_port}"; then
echo "Etcd didn't come back correctly"
exit 1
fi
# Verify that event etcd is back.
echo "Waiting for event etcd to come back"
if ! wait_for_etcd_up "${event_etcd_port}"; then
echo "Event etcd didn't come back correctly"
exit 1
fi
# Verify that kube-apiserver is back and cluster is healthy.
echo "Waiting for apiserver to come back"
if ! wait_for_cluster_healthy; then
echo "Apiserver didn't come back correctly"
exit 1
fi
echo "Cluster successfully restored!"
| mkumatag/origin | vendor/k8s.io/kubernetes/cluster/restore-from-backup.sh | Shell | apache-2.0 | 9,389 |
#!/bin/bash
export C_INCLUDE_PATH=${PREFIX}/include
export LIBRARY_PATH=${PREFIX}/lib
make
mkdir -p $PREFIX/bin
cp SweepFinder2 $PREFIX/bin
| dmaticzka/bioconda-recipes | recipes/sweepfinder2/build.sh | Shell | mit | 142 |
#!/usr/bin/env bash
OS=`cat /etc/os-release | grep "^ID=" | sed 's/ID=//g' | sed 's/["]//g' | awk '{print $1}'`
echo -e "Operating System: ${OS}\n"
realm="TEST.COREFX.NET"
domain="TEST"
principal1="TESTHOST/testfqdn.test.corefx.net"
principal2="TESTHTTP/localhost"
krb_user="krb_user"
krb_password="password"
ntlm_user="ntlm_user"
ntlm_password="ntlm_password"
kadmin="kadmin.local"
krb5kdc="krb5kdc"
kdb5_util="kdb5_util"
krb_conf="krb5.conf"
krb_conf_location="/etc/krb5.conf"
keytabfile="/etc/krb5.keytab"
# NTLM credentials file
ntlm_user_file="/var/tmp/ntlm_user_file"
PROGNAME=$(basename $0)
usage()
{
echo "This script must be run with super-user privileges."
echo "Usage: ${PROGNAME} [-h|--help] [-y|--yes] [-u|--uninstall]";
}
# Cleanup config files and uninstall KDC
clean_up()
{
echo "Stopping KDC.."
if pgrep krb5kdc 2> /dev/null; then pkill krb5kdc 2> /dev/null ; fi
case ${OS} in
"ubuntu" | "debian")
kdc_conf_location="/etc/krb5kdc/kdc.conf"
dpkg -s krb5-kdc >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Uninstalling krb5-kdc"
apt-get -y purge krb5-kdc
fi
;;
"centos" | "rhel" | "fedora")
kdc_conf_location="/var/kerberos/krb5kdc/kdc.conf"
yum list installed krb5-server >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Uninstalling krb5-server"
yum -y remove krb5-server
fi
;;
"opensuse")
kdc_conf_location="/var/lib/kerberos/krb5kdc/kdc.conf"
zypper search --installed-only krb5-server >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Uninstalling krb5-server"
zypper --non-interactive remove krb5-server >/dev/null 2>&1
fi
;;
*)
echo "This is an unsupported operating system"
exit 1
;;
esac
echo "Removing config files"
if [ -f ${krb_conf_location} ]; then
rm -f ${krb_conf_location}
fi
if [ -f ${kdc_conf_location} ]; then
rm -f ${kdc_conf_location}
fi
echo "Removing KDC database"
rm -f ${database_files}
if [ -f ${keytabfile} ]; then
rm -f ${keytabfile}
fi
echo "Removing NTLM credentials file"
if [ -f ${ntlm_user_file} ]; then
rm -f ${ntlm_user_file}
fi
echo "Cleanup completed"
}
error_exit()
{
echo "${1:-"Unknown Error"}"
echo "Aborting"
clean_up
exit 1
}
# Common function across linux distros to configure KDC post installation
configure_kdc()
{
echo "Stopping KDC.."
if pgrep krb5kdc 2> /dev/null; then pkill krb5kdc ; fi
# Remove database files if exist
rm -f ${database_files}
add_principal_cmd="add_principal -pw ${krb_password}"
# Create/copy krb5.conf and kdc.conf
echo "Copying krb5.conf and kdc.conf.."
cp ${krb_conf} ${krb_conf_location} || \
error_exit "Cannot copy ${krb_conf} to ${krb_conf_location}"
cp ${kdc_conf} ${kdc_conf_location} || \
error_exit "Cannot copy ${kdc_conf} to ${kdc_conf_location}"
echo "Creating KDC database for realm ${realm}.."
${kdb5_util} create -r ${realm} -P ${krb_password} -s || \
error_exit "Cannot create KDC database for realm ${realm}"
echo "Adding principal ${principal1}.."
${kadmin} -q "${add_principal_cmd} ${principal1}@${realm}" || \
error_exit "Cannot add ${principal1}"
echo "Adding principal ${principal2}.."
${kadmin} -q "${add_principal_cmd} ${principal2}@${realm}" || \
error_exit "Cannot add ${principal2}"
echo "Adding user ${krb_user}.."
${kadmin} -q "${add_principal_cmd} ${krb_user}@${realm}" || \
error_exit "Cannot add ${krb_user}"
echo "Exporting keytab for ${principal1}"
${kadmin} -q "ktadd -norandkey ${principal1}@${realm}" || \
error_exit "Cannot export kytab for ${principal1}"
echo "Exporting keytab for ${principal2}"
${kadmin} -q "ktadd -norandkey ${principal2}@${realm}" || \
error_exit "Cannot export kytab for ${principal2}"
echo "Exporting keytab for ${krb_user}"
${kadmin} -q "ktadd -norandkey ${krb_user}@${realm}" || \
error_exit "Cannot export kytab for ${krb_user}"
}
# check the invoker of this script
if [ $EUID -ne 0 ]; then
usage
exit 1
fi
# Parse command-line arguments
TEMP=`getopt -o p:hyu --long password:,help,yes,uninstall -n 'test.sh' -- "$@"`
[ $? -eq 0 ] || {
usage
exit 1
}
eval set -- "$TEMP"
uninstall=0
force=0
while true; do
case $1 in
-h|--help) usage; exit 0;;
-y|--yes) force=1; shift ;;
-u|--uninstall) uninstall=1; shift;;
-p|--password) shift; krb_password=$1; shift;;
--) shift; break;;
*) usage; exit 1;;
esac
done
# Uninstallation
if [ $uninstall -eq 1 ]; then
if [ $force -eq 0 ]; then
echo "This will uninstall KDC from your machine and cleanup the related config files."
read -p "Do you want to continue? ([Y]es/[N]o)? " choice
case $(echo $choice | tr '[A-Z]' '[a-z]') in
y|yes) clean_up;;
*) echo "Skipping uninstallation";;
esac
else
clean_up
fi
exit 0
fi
# Installation
if [ $force -eq 0 ]; then
read -p "This will install KDC on your machine and create KDC principals. Do you want to continue? ([Y]es/[N]o)? " choice
case $(echo $choice | tr '[A-Z]' '[a-z]') in
y|yes) ;;
*) echo "Skipping installation"; exit 0;;
esac
fi
case ${OS} in
"ubuntu" | "debian")
kdc_conf="kdc.conf.ubuntu"
kdc_conf_location="/etc/krb5kdc/kdc.conf"
database_files="/var/lib/krb5kdc/principal*"
dpkg -s krb5-kdc >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Installing krb5-kdc.."
export DEBIAN_FRONTEND=noninteractive
apt-get -y install krb5-kdc krb5-admin-server
if [ $? -ne 0 ]; then
echo "Error occurred during installation, aborting"
exit 1
fi
else
echo "krb5-kdc already installed.."
exit 2
fi
configure_kdc
echo "Starting KDC.."
${krb5kdc}
;;
"centos" | "rhel" | "fedora" )
kdc_conf="kdc.conf.centos"
kdc_conf_location="/var/kerberos/krb5kdc/kdc.conf"
database_files="/var/kerberos/krb5kdc/principal*"
yum list installed krb5-server >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Installing krb5-server.."
yum -y install krb5-server krb5-libs krb5-workstation
if [ $? -ne 0 ]; then
echo "Error occurred during installation, aborting"
exit 1
fi
else
echo "krb5-server already installed.."
exit 2
fi
configure_kdc
echo "Starting KDC.."
systemctl start krb5kdc.service
systemctl enable krb5kdc.service
;;
"opensuse")
# the following is a workaround for opensuse
# details at https://groups.google.com/forum/#!topic/comp.protocols.kerberos/3itzZQ4fETA
# and http://lists.opensuse.org/opensuse-factory/2013-10/msg00099.html
export KRB5CCNAME=$PWD
krb5kdc="/usr/lib/mit/sbin/krb5kdc"
kadmin="/usr/lib/mit/sbin/kadmin.local"
kdb5_util="/usr/lib/mit/sbin/kdb5_util"
kdc_conf="kdc.conf.opensuse"
kdc_conf_location="/var/lib/kerberos/krb5kdc/kdc.conf"
database_files="/var/lib/kerberos/krb5kdc/principal*"
zypper search --installed-only krb5-mini >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "removing krb5-mini which conflicts with krb5-server and krb5-devel"
zypper --non-interactive remove krb5-mini
fi
zypper search --installed-only krb5-server >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Installing krb5-server.."
zypper --non-interactive install krb5-server krb5-client krb5-devel
if [ $? -ne 0 ]; then
echo "Error occurred during installation, aborting"
exit 1
fi
else
echo "krb5-server already installed.."
exit 2
fi
configure_kdc
echo "Starting KDC..${krb5kdc}"
${krb5kdc}
;;
*)
echo "This is an unsupported operating system"
exit 1
;;
esac
# Create NTLM credentials file
grep -ir gssntlmssp.so /etc/gss/mech.d > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "$domain:$ntlm_user:$ntlm_password" > $ntlm_user_file
echo "$realm:$krb_user:$krb_password" >> $ntlm_user_file
chmod +r $ntlm_user_file
fi
chmod +r ${keytabfile}
| nbarbettini/corefx | src/System.Net.Security/tests/Scripts/Unix/setup-kdc.sh | Shell | mit | 8,851 |
. ${srcdir}/emulparams/armelf.sh
MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
TEXT_START_ADDR=0x00008000
TARGET2_TYPE=got-rel
unset STACK_ADDR
unset EMBEDDED
| jlspyaozhongkai/Uter | third_party_backup/binutils-2.25/ld/emulparams/armelf_nbsd.sh | Shell | gpl-3.0 | 151 |
Subsets and Splits