code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#! /bin/bash # [:VIM_EVAL:]expand('%:t')[:END_EVAL:] set -euo pipefail readonly PROGNAME=$(basename "$0") readonly PROGDIR=$(readlink -m "$(dirname "$0")") readonly ARGS=("$@") #readonly DIR=$(dirname "$(readlink -f "$0")") #if [ -e "${DIR}" ]; then # readonly WORK_DIR=$(mktemp -d -p "${DIR}") #fi #function interrupt { # echo "Removing temporary directory ${WORK_DIR}" # rm -rf "${WORK_DIR}" #} #trap interrupt INT SIGINT TERM SIGTERM EXIT
gotmanyacc/dotfiles
vim/templates/skel.sh
Shell
gpl-3.0
453
#!/bin/bash #FILE ipfs-add-my-hash.sh #DATE 2020-06-06 set -euxo pipefail HASH='ipfs/QmNm45RagXiktLp7C46rnFKsjGTNKatTRLfEM4BEKmeXsJ' GW_LIST='/tmp/ipfs-gw.list' OUTPUT='/tmp/ipfs-gw-hash.list' for gw in $(cat ${GW_LIST}); do URL="${gw}/${HASH}" echo "$URL" |tee -a $OUTPUT done exit
mdrights/Myscripts
os-opt/ipfs-add-my-hash.sh
Shell
gpl-3.0
297
#!/bin/bash sudo echo 10 > /proc/sys/vm/dirty_ratio sudo echo 5 > /proc/sys/vm/dirty_background_ratio sudo echo 3000 > /proc/sys/vm/dirty_expire_centisecs sudo echo 500 > /proc/sys/vm/dirty_writeback_centisecs sudo echo 100 > /proc/sys/vm/vfs_cache_pressure
dewtx29/linux_script
s/os/sysctl4.sh
Shell
gpl-3.0
258
#! /usr/bin/env bash slice=/sys/fs/cgroup/cpuset/machine.slice crm_cpus=$(/usr/sbin/crm node utilization $HOSTNAME show cpu|perl -ne '/value=(\d+)$/ && print $1') server_cpus=$(grep -c ^proc /proc/cpuinfo) server_cpus=$((server_cpus-1)) first_allowed_cpu=$(echo $server_cpus - $crm_cpus |bc) /usr/bin/cluster_set_cpuset -p $slice -e -s "${first_allowed_cpu}-${server_cpus}" if [ $? -eq 0 ] ; then echo "Set cpu_cpus and cpu_exclusive... OK" else echo "Set cpu_cpus and cpu_exclusive... FAILED" exit 1 fi exit 0
mgrzybek/ansible-ha-cluster
files/machine-slice-configuration.sh
Shell
gpl-3.0
521
lupdate CenGen.pro -ts cengen_ru.ts cengen_en.ts linguist cengen_ru.ts cp cengen_ru.qm ../cengen-build-desktop/
mihailikus/cengen
tr.sh
Shell
gpl-3.0
113
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #!/bin/bash ADB="/usr/bin/adb" FASTBOOT="/usr/bin/fastboot" UDEV="/etc/udev/rules.d/51-android.rules" # get sudo echo "[INFO] Nexus Tools 2.6.5" echo "[INFO] Please enter sudo password for uninstall." sudo echo "[ OK ] Sudo access granted." || { echo "[ERROR] No sudo access."; exit 1; } # remove files if [ -f $ADB ]; then sudo rm $ADB echo "[ OK ] ADB removed." else echo "[INFO] ADB not found in /usr/bin, skipping uninstall." fi if [ -f $FASTBOOT ]; then sudo rm $FASTBOOT echo "[ OK ] Fastboot removed." else echo "[INFO] Fastboot not found in /usr/bin, skipping uninstall." fi if [ -f $UDEV ]; then sudo rm $UDEV echo "[ OK ] Udev list removed." else echo "[INFO] Udev list not found in /etc/udev/rules.d/, skipping uninstall." fi echo "[ OK ] Done uninstalling." echo " "
youbiteme/nexus-tools
uninstall.sh
Shell
gpl-3.0
1,476
#!/bin/sh echo -n 'Preparing files...' cd .. rm -f kapow.desktop.in cp kapow.desktop kapow.desktop.in sed -e '/^Name\[/ d' \ -e '/^GenericName\[/ d' \ -e '/^Comment\[/ d' \ -e '/^Icon/ d' \ -e '/^Keywords/ d' \ -i kapow.desktop.in rm -f kapow.appdata.xml.in cp kapow.appdata.xml kapow.appdata.xml.in sed -e '/p xml:lang/ d' \ -e '/summary xml:lang/ d' \ -e '/name xml:lang/ d' \ -e '/<developer_name>/ d' \ -i kapow.appdata.xml.in cd po echo ' DONE' echo -n 'Extracting messages...' xgettext --from-code=UTF-8 --output=description.pot \ --package-name='Kapow' --copyright-holder='Graeme Gott' \ ../*.in sed 's/CHARSET/UTF-8/' -i description.pot echo ' DONE' echo -n 'Cleaning up...' cd .. rm -f kapow.desktop.in rm -f kapow.appdata.xml.in echo ' DONE'
gottcode/kapow
icons/po/update-pot.sh
Shell
gpl-3.0
774
#! /usr/bin/env bash set -x set -e testdir=$(mktemp -d -t pijul-test.XXXXXXXXXX) test ! -z $testdir cd $testdir mkdir a cd a $pijul init echo saluton > file echo malbão >> file echo probolon >> file $pijul add file $pijul record -a --author "Jean-Paul" --name "L'existentialisme" cd .. $pijul get a b cd b echo saluton > file echo probolon >> file $pijul record -a --author "Albert" --name "J'en ai fait quand j'étais jeune" echo ynyyy | $pijul rollback --author "Albert" --name "J'en ai fait quand j'étais jeune" cd ../a echo saluton > file echo probolon >> file $pijul record -a --author "Jean-Paul" --name "Bof" $pijul pull -a ../b if test ! -z $pdf_out then $pijul debug dot -Tpdf -o $pdf_out/test.pdf debug fi cd .. $pijul get a c cd c cat file rm -rf $testdir
bitemyapp/Pijul
tests/zombie.sh
Shell
gpl-3.0
789
python -m unittest discover tests -p "*_test.py"
beslave/space-king
test.sh
Shell
gpl-3.0
49
#!/bin/bash . PACKAGE echo "Creating Plasmoid package: $NAME-$VERSION.plasmoid" zip -r ../$NAME-$VERSION.plasmoid . -x@zip_excluded.lst
tanghus/currency-converter-plasmoid
make_plasmoid_pkg.sh
Shell
gpl-3.0
136
#!/usr/bin/env bash # -*- ENCODING: UTF-8 -*- ## ## @author Raúl Caro Pastorino ## @copyright Copyright © 2019 Raúl Caro Pastorino ## @license https://wwww.gnu.org/licenses/gpl.txt ## @email raul@fryntiz.dev ## @web https://fryntiz.es ## @gitlab https://gitlab.com/fryntiz ## @github https://github.com/fryntiz ## ## Applied Style Guide: ## @style https://gitlab.com/fryntiz/bash-guide-style ############################ ## INSTRUCTIONS ## ############################ ## ########################### ## FUNCIONES ## ########################### aplicaciones_pentesting() { echo -e "$VE Aplicaciones Pentesting y Redes$CL" actualizarRepositorios repararGestorPaquetes instalarSoftwareLista "$SOFTLIST/Pentesting/db.lst" instalarSoftwareLista "$SOFTLIST/Pentesting/frameworks.lst" instalarSoftwareLista "$SOFTLIST/Pentesting/redes.lst" instalarSoftwareLista "$SOFTLIST/Pentesting/sistema.lst" instalarSoftwareLista "$SOFTLIST/Pentesting/bluetooth.lst" instalarSoftwareLista "$SOFTLIST/Pentesting/crackers.lst" repararGestorPaquetes }
fryntiz/preparar_entorno
Apps/pentesting.sh
Shell
gpl-3.0
1,146
#! /bin/bash # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. ######################################################################## # # tests/pk11_gtests/pk11_gtests.sh # # Script to drive the PKCS#11 gtest unit tests # # needs to work on all Unix and Windows platforms # # special strings # --------------- # FIXME ... known problems, search for this string # NOTE .... unexpected behavior # ######################################################################## ############################## pk11_gtest_init ########################## # local shell function to initialize this script ######################################################################## pk11_gtest_init() { SCRIPTNAME=pk11_gtest.sh # sourced - $0 would point to all.sh if [ -z "${CLEANUP}" ] ; then # if nobody else is responsible for CLEANUP="${SCRIPTNAME}" # cleaning this script will do it fi if [ -z "${INIT_SOURCED}" -o "${INIT_SOURCED}" != "TRUE" ]; then cd ../common . ./init.sh fi SCRIPTNAME=pk11_gtest.sh html_head PKCS\#11 Gtests if [ ! -d "${PK11GTESTDIR}" ]; then mkdir -p "${PK11GTESTDIR}" fi cd "${PK11GTESTDIR}" } ########################## pk11_gtest_start ######################### # Local function to actually start the test #################################################################### pk11_gtest_start() { if [ ! -f ${BINDIR}/pk11_gtest ]; then html_unknown "Skipping pk11_gtest (not built)" return fi # Temporarily disable asserts for PKCS#11 slot leakage (Bug 1168425) unset NSS_STRICT_SHUTDOWN PK11GTESTREPORT="${PK11GTESTDIR}/report.xml" ${BINDIR}/pk11_gtest -d "${PK11GTESTDIR}" --gtest_output=xml:"${PK11GTESTREPORT}" html_msg $? 0 "pk11_gtest run successfully" sed -f ${COMMON}/parsegtestreport.sed "${PK11GTESTREPORT}" | \ while read result name; do if [ "$result" = "notrun" ]; then echo "$name" SKIPPED elif [ "$result" = "run" ]; then html_passed "$name" > /dev/null else html_failed "$name" fi done } pk11_gtest_cleanup() { cd ${QADIR} . common/cleanup.sh } ################## main ################################################# cd "$(dirname "$0")" pk11_gtest_init pk11_gtest_start pk11_gtest_cleanup
chombourger/efup
external/nss/tests/pk11_gtests/pk11_gtests.sh
Shell
mpl-2.0
2,411
useradd vagrant -u 5000 -m -d /home/vagrant echo "Defaults !requiretty vagrant ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/90-vagrant chmod 0440 /etc/sudoers.d/90-vagrant sed -i -r 's/.*UseDNS.*/UseDNS no/' /etc/ssh/sshd_config mkdir -p /root/.ssh cat /root/vagrant_pub_key >> /root/.ssh/authorized_keys chown -R root: /root chmod 0700 /root/.ssh chmod 0600 /root/.ssh/authorized_keys mkdir -p /home/vagrant/.ssh cat /root/vagrant_pub_key >> /home/vagrant/.ssh/authorized_keys chown -R vagrant: /home/vagrant chmod 0700 /home/vagrant/.ssh chmod 0600 /home/vagrant/.ssh/authorized_keys
jistr/vagrant-libvirt-image-tools
files/prepare_vm.sh
Shell
agpl-3.0
585
#!/bin/bash # Definitions readonly SCRIPT_DIR=`dirname $0` readonly ARGC="$#" # First, try to set up env if [ -f "${SCRIPT_DIR}/CloudGateway_env.sh" ]; then . "${SCRIPT_DIR}/CloudGateway_env.sh" else echo "Unable to find Cloud Gateway environment file, exiting." 1>&2 exit 1 fi function usage { print_error "Usage: $0 [-v[v]] [-r]" exit 22 } VERBOSE=0 RAW=0 while true ; do if [ $# -eq 0 ]; then break; fi case "$1" in -v) VERBOSE=1; shift ;; -vv) VERBOSE=2; shift ;; -r) RAW=1; shift ;; --) shift ; break ;; *) usage; esac done # Get dirty / deleting files count from DB nb_dirty_files=0 nb_files_to_be_deleted=0 # Dirty value=$( get_psql_single_value 'select count(*) from inodes_instances WHERE status = 1;' ) result=$? if [ $? -eq 0 -a -n "${value}" ]; then readonly nb_dirty_files=${value} else print_error "Error getting dirty files count: ${result}" readonly nb_dirty_files=0 fi # Deleting value=$( get_psql_single_value 'select count(*) from inodes_instances WHERE status = 2;' ) result=$? if [ $? -eq 0 -a -n "${value}" ]; then readonly nb_files_to_be_deleted=${value} else print_error "Error getting deleting files count: ${result}" readonly nb_files_to_be_deleted=0 fi # If we have neither dirty nor deleted files, exit with 0. if [ ${nb_dirty_files} -eq 0 -a ${nb_files_to_be_deleted} -eq 0 ]; then exit 0 fi # Print counters if [ ${nb_dirty_files} -gt 0 ]; then echo "${nb_dirty_files} files to upload."; fi if [ ${nb_files_to_be_deleted} -gt 0 ]; then echo "${nb_files_to_be_deleted} files to delete." fi # If we are in verbose mode, query then print content if [ ${VERBOSE} -gt 0 ]; then if [ ${nb_files_to_be_deleted} -gt 0 ]; then result=$( get_psql_values "select iil.fs_id as FS, iil.inode_number, instances.instance_name, CASE WHEN deleting='t' THEN 'deleting' ELSE 'to_be_deleted' END AS status FROM inodes_instances AS ii INNER JOIN inodes_instances_link AS iil ON (iil.inode_instance_id = ii.inode_instance_id) INNER JOIN instances ON (ii.instance_id = instances.instance_id) WHERE status = 2;" ${DISPLAY_PSQL_TABLE}) returned_value=$? echo "${result}" >&2 fi if [ ${nb_dirty_files} -gt 0 ]; then if [ ${VERBOSE} -gt 1 ]; then full_path=" (SELECT sub.path from (WITH RECURSIVE path(name, path, parent, entry_id, parent_id) AS ( SELECT name, '/', NULL, entries.entry_id, CAST(0 as BIGINT) FROM entries WHERE entries.parent_entry_id IS NULL UNION SELECT entries.name, parentpath.path || CASE parentpath.path WHEN '/' THEN '' ELSE '/' END || entries.name, parentpath.path, entries.entry_id, parent_entry_id as parent_id FROM entries, path as parentpath WHERE entries.parent_entry_id = parentpath.entry_id) SELECT * FROM path WHERE entry_id = entries.entry_id) as sub) as path " else full_path="entries.name"; fi time_column="TO_CHAR(TO_TIMESTAMP(inodes.mtime), 'YYYY/MM/DD HH24:MI:SS') as mtime" if [ ${RAW} -eq 1 ]; then time_column="inodes.mtime" fi size_column="pg_size_pretty(inodes.size) AS size" if [ ${RAW} -eq 1 ]; then size_column="inodes.size AS size" fi result=$( get_psql_values " select filesystems.fs_name as FS, inodes.inode_number, instance_name, ${time_column}, ${size_column}, CASE WHEN status=1 THEN CASE WHEN uploading='t' THEN 'uploading' ELSE 'to_be_uploaded' END WHEN status=2 THEN CASE WHEN deleting='t' THEN 'deleting' ELSE 'to_be_deleted' END END AS status, $full_path FROM inodes_instances AS ii INNER JOIN inodes_instances_link AS iil ON (ii.inode_instance_id = iil.inode_instance_id) INNER JOIN instances ON (ii.instance_id = instances.instance_id) INNER JOIN inodes ON (inodes.inode_number = iil.inode_number AND inodes.fs_id = iil.fs_id) INNER JOIN entries ON (entries.inode_number = iil.inode_number AND entries.fs_id = iil.fs_id) INNER JOIN filesystems ON (entries.fs_id = filesystems.fs_id) WHERE status != 0 ORDER BY mtime;" ${DISPLAY_PSQL_TABLE}) returned_value=$? echo "${result}" >&2 fi fi # Different exit codes for each situation (dirty files only, deleted files only, both) if [ ${nb_dirty_files} -gt 0 -a ${nb_files_to_be_deleted} -gt 0 ]; then exit 3 elif [ ${nb_files_to_be_deleted} -gt 0 ]; then exit 2 elif [ ${nb_dirty_files} -gt 0 ]; then exit 1 fi exit 0
nuagelabsfr/cloud-gateway
src/scripts/CloudGatewayStatus.sh
Shell
agpl-3.0
4,664
#!/bin/bash RSYNC_PASSWORD=$LUG_password rsync -rtlivH --delete-after --delay-updates --safe-links --max-delete=1000 --contimeout=60 $LUG_username@sync.repo.archlinuxcn.org::repo $LUG_path
sjtug/mirror-docker
lug/worker-script/archlinux-cn.sh
Shell
agpl-3.0
190
# copy cloudbrain to vagrant home vagrant scp ../../../cloudbrain :/Users/vagrant/. # Build popy in VM vagrant ssh -c "cd /Users/vagrant/cloudbrain/scripts/osx && ./build-popy.sh" # Copy popy.tar.gz rm popy.tar.gz vagrant scp :/Users/vagrant/cloudbrain/scripts/osx/popy.tar.gz .
flysonic10/cloudbrain
.vagrant/osx/vagrant-build-popy.sh
Shell
agpl-3.0
281
#!/bin/bash # symulacja formatowania partycji lista_menu(){ for p in $( fdisk -l 2>/dev/null | grep '^/dev/' | cut -d' ' -f1 ) ; do echo "TRUE $p " done } menu(){ zenity --title "Formatowanie Dysków" --text "bedzie formacik panie?" \ --width=400 --height=300 \ --list --checklist \ --column="zaznacz" --column "partycja" \ $(lista_menu) \ --separator " " --multiple \ --print-column=2 } for d in $(menu) ; do echo "mkfs.ext4 $d" done
borzole/borzole
bin/zmkfs.ext3.sh
Shell
lgpl-3.0
458
#Parameter mapping #string tmpName #string tempDir #string intermediateDir #string projectVariantCallsSnpEff_Annotated #string projectVariantCallsSnpEff_SummaryHtml #string projectBatchGenotypedAnnotatedVariantCalls #string project #string logsDir #string groupname #string tmpDataDir #string snpEffVersion #string javaVersion makeTmpDir "${projectVariantCallsSnpEff_Annotated}" tmpProjectVariantCallsSnpEff_Annotated="${MC_tmpFile}" module load "${snpEffVersion}" module list if [ -f "${projectBatchGenotypedAnnotatedVariantCalls}" ] then # ## ###Annotate with SnpEff ## # #Run snpEff java -XX:ParallelGCThreads=1 -Djava.io.tmpdir="${tempDir}" -Xmx3g -jar \ "${EBROOTSNPEFF}/snpEff.jar" \ -v hg19 \ -csvStats "${tmpProjectVariantCallsSnpEff_Annotated}.csvStats.csv" \ -noLog \ -lof \ -stats "${projectVariantCallsSnpEff_SummaryHtml}" \ -canon \ -ud 0 \ -c "${EBROOTSNPEFF}/snpEff.config" \ "${projectBatchGenotypedAnnotatedVariantCalls}" \ > "${tmpProjectVariantCallsSnpEff_Annotated}" mv "${tmpProjectVariantCallsSnpEff_Annotated}" "${projectVariantCallsSnpEff_Annotated}" mv "${tmpProjectVariantCallsSnpEff_Annotated}.csvStats.csv" "${projectVariantCallsSnpEff_Annotated}.csvStats.csv" echo "mv ${tmpProjectVariantCallsSnpEff_Annotated} ${projectVariantCallsSnpEff_Annotated}" else echo "skipped" fi
RoanKanninga/NGS_DNA
protocols/SnpEff.sh
Shell
lgpl-3.0
1,339
#!/bin/bash # # Apache License # Version 2.0, January 2004 # http://www.apache.org/licenses/ # # TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION # # 1. Definitions. # # "License" shall mean the terms and conditions for use, reproduction, # and distribution as defined by Sections 1 through 9 of this document. # # "Licensor" shall mean the copyright owner or entity authorized by # the copyright owner that is granting the License. # # "Legal Entity" shall mean the union of the acting entity and all # other entities that control, are controlled by, or are under common # control with that entity. For the purposes of this definition, # "control" means (i) the power, direct or indirect, to cause the # direction or management of such entity, whether by contract or # otherwise, or (ii) ownership of fifty percent (50%) or more of the # outstanding shares, or (iii) beneficial ownership of such entity. # # "You" (or "Your") shall mean an individual or Legal Entity # exercising permissions granted by this License. # # "Source" form shall mean the preferred form for making modifications, # including but not limited to software source code, documentation # source, and configuration files. # # "Object" form shall mean any form resulting from mechanical # transformation or translation of a Source form, including but # not limited to compiled object code, generated documentation, # and conversions to other media types. # # "Work" shall mean the work of authorship, whether in Source or # Object form, made available under the License, as indicated by a # copyright notice that is included in or attached to the work # (an example is provided in the Appendix below). # # "Derivative Works" shall mean any work, whether in Source or Object # form, that is based on (or derived from) the Work and for which the # editorial revisions, annotations, elaborations, or other modifications # represent, as a whole, an original work of authorship. For the purposes # of this License, Derivative Works shall not include works that remain # separable from, or merely link (or bind by name) to the interfaces of, # the Work and Derivative Works thereof. # # "Contribution" shall mean any work of authorship, including # the original version of the Work and any modifications or additions # to that Work or Derivative Works thereof, that is intentionally # submitted to Licensor for inclusion in the Work by the copyright owner # or by an individual or Legal Entity authorized to submit on behalf of # the copyright owner. For the purposes of this definition, "submitted" # means any form of electronic, verbal, or written communication sent # to the Licensor or its representatives, including but not limited to # communication on electronic mailing lists, source code control systems, # and issue tracking systems that are managed by, or on behalf of, the # Licensor for the purpose of discussing and improving the Work, but # excluding communication that is conspicuously marked or otherwise # designated in writing by the copyright owner as "Not a Contribution." # # "Contributor" shall mean Licensor and any individual or Legal Entity # on behalf of whom a Contribution has been received by Licensor and # subsequently incorporated within the Work. # # 2. Grant of Copyright License. Subject to the terms and conditions of # this License, each Contributor hereby grants to You a perpetual, # worldwide, non-exclusive, no-charge, royalty-free, irrevocable # copyright license to reproduce, prepare Derivative Works of, # publicly display, publicly perform, sublicense, and distribute the # Work and such Derivative Works in Source or Object form. # # 3. Grant of Patent License. Subject to the terms and conditions of # this License, each Contributor hereby grants to You a perpetual, # worldwide, non-exclusive, no-charge, royalty-free, irrevocable # (except as stated in this section) patent license to make, have made, # use, offer to sell, sell, import, and otherwise transfer the Work, # where such license applies only to those patent claims licensable # by such Contributor that are necessarily infringed by their # Contribution(s) alone or by combination of their Contribution(s) # with the Work to which such Contribution(s) was submitted. If You # institute patent litigation against any entity (including a # cross-claim or counterclaim in a lawsuit) alleging that the Work # or a Contribution incorporated within the Work constitutes direct # or contributory patent infringement, then any patent licenses # granted to You under this License for that Work shall terminate # as of the date such litigation is filed. # # 4. Redistribution. You may reproduce and distribute copies of the # Work or Derivative Works thereof in any medium, with or without # modifications, and in Source or Object form, provided that You # meet the following conditions: # # (a) You must give any other recipients of the Work or # Derivative Works a copy of this License; and # # (b) You must cause any modified files to carry prominent notices # stating that You changed the files; and # # (c) You must retain, in the Source form of any Derivative Works # that You distribute, all copyright, patent, trademark, and # attribution notices from the Source form of the Work, # excluding those notices that do not pertain to any part of # the Derivative Works; and # # (d) If the Work includes a "NOTICE" text file as part of its # distribution, then any Derivative Works that You distribute must # include a readable copy of the attribution notices contained # within such NOTICE file, excluding those notices that do not # pertain to any part of the Derivative Works, in at least one # of the following places: within a NOTICE text file distributed # as part of the Derivative Works; within the Source form or # documentation, if provided along with the Derivative Works; or, # within a display generated by the Derivative Works, if and # wherever such third-party notices normally appear. The contents # of the NOTICE file are for informational purposes only and # do not modify the License. You may add Your own attribution # notices within Derivative Works that You distribute, alongside # or as an addendum to the NOTICE text from the Work, provided # that such additional attribution notices cannot be construed # as modifying the License. # # You may add Your own copyright statement to Your modifications and # may provide additional or different license terms and conditions # for use, reproduction, or distribution of Your modifications, or # for any such Derivative Works as a whole, provided Your use, # reproduction, and distribution of the Work otherwise complies with # the conditions stated in this License. # # 5. Submission of Contributions. Unless You explicitly state otherwise, # any Contribution intentionally submitted for inclusion in the Work # by You to the Licensor shall be under the terms and conditions of # this License, without any additional terms or conditions. # Notwithstanding the above, nothing herein shall supersede or modify # the terms of any separate license agreement you may have executed # with Licensor regarding such Contributions. # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor, # except as required for reasonable and customary use in describing the # origin of the Work and reproducing the content of the NOTICE file. # # 7. Disclaimer of Warranty. Unless required by applicable law or # agreed to in writing, Licensor provides the Work (and each # Contributor provides its Contributions) on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied, including, without limitation, any warranties or conditions # of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A # PARTICULAR PURPOSE. You are solely responsible for determining the # appropriateness of using or redistributing the Work and assume any # risks associated with Your exercise of permissions under this License. # # 8. Limitation of Liability. In no event and under no legal theory, # whether in tort (including negligence), contract, or otherwise, # unless required by applicable law (such as deliberate and grossly # negligent acts) or agreed to in writing, shall any Contributor be # liable to You for damages, including any direct, indirect, special, # incidental, or consequential damages of any character arising as a # result of this License or out of the use or inability to use the # Work (including but not limited to damages for loss of goodwill, # work stoppage, computer failure or malfunction, or any and all # other commercial damages or losses), even if such Contributor # has been advised of the possibility of such damages. # # 9. Accepting Warranty or Additional Liability. While redistributing # the Work or Derivative Works thereof, You may choose to offer, # and charge a fee for, acceptance of support, warranty, indemnity, # or other liability obligations and/or rights consistent with this # License. However, in accepting such obligations, You may act only # on Your own behalf and on Your sole responsibility, not on behalf # of any other Contributor, and only if You agree to indemnify, # defend, and hold each Contributor harmless for any liability # incurred by, or claims asserted against, such Contributor by reason # of your accepting any such warranty or additional liability. # # END OF TERMS AND CONDITIONS # # APPENDIX: How to apply the Apache License to your work. # # To apply the Apache License to your work, attach the following # boilerplate notice, with the fields enclosed by brackets "{}" # replaced with your own identifying information. (Don't include # the brackets!) The text should be enclosed in the appropriate # comment syntax for the file format. We also recommend that a # file or class name and description of purpose be included on the # same "printed page" as the copyright notice for easier # identification within third-party archives. # # Copyright {yyyy} {name of copyright owner} # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -x # Updates the internal mirror from contents in s3 source /etc/rtwsrc if [ -z "$RTWS_APT_MIRROR_ROOT_LOCATION" ]; then RTWS_APT_MIRROR_ROOT_LOCATION="/mnt/rdafs/" echo "Defaulting RTWS_APT_MIRROR_ROOT_LOCATION to $RTWS_APT_MIRROR_ROOT_LOCATION" fi if [ ! -d $RTWS_APT_MIRROR_ROOT_LOCATION ]; then mkdir -pv $RTWS_APT_MIRROR_ROOT_LOCATION fi if [[ "$(whoami)" != "root" ]]; then echo "ERROR: You must be root to proceed..." exit 1 fi if [ -z "$RTWS_MOUNT_DEVICE" ]; then echo "ERROR: RTWS_MOUNT_DEVICE not defined..." exit 1 fi s3cmd -c /home/rtws/.s3cfg -rf get s3://$RTWS_MOUNT_DEVICE/mirror $RTWS_APT_MIRROR_ROOT_LOCATION # Link mirrored domain content excluding their domain prefix cd $RTWS_APT_MIRROR_ROOT_LOCATION/mirror/ for domain in $(/bin/ls -d *) do cd $domain for dir in $(/bin/ls -d *) do ln -sf $RTWS_APT_MIRROR_ROOT_LOCATION/mirror/$domain/$dir /var/www/. done cd - done
deleidos/digitaledge-platform
commons-core/src/main/script/boot/apt_mirror_update.sh
Shell
apache-2.0
12,672
echo "apt-cache search touch | sed 's/^/sudo apt-get install -y -y /g;s/ - / #- /g' " sudo apt-get install -y -y gimp #- The GNU Image Manipulation Program sudo apt-get install -y -y coreutils #- GNU core utilities sudo apt-get install -y -y libpam0g #- Pluggable Authentication Modules library sudo apt-get install -y -y libgrip0 #- Shared library providing multitouch gestures to GTK+ apps. sudo apt-get install -y -y libusbmuxd1 #- USB multiplexor daemon for iPhone and iPod Touch devices #- library sudo apt-get install -y -y libutouch-evemu1 #- Kernel Device Emulation Library sudo apt-get install -y -y libts-0.0-0 #- touch screen library sudo apt-get install -y -y libimobiledevice2 #- Library for communicating with the iPhone and iPod Touch sudo apt-get install -y -y libutouch-frame1 #- Touch Frame Library sudo apt-get install -y -y xserver-xorg-input-synaptics #- Synaptics TouchPad driver for X.Org server sudo apt-get install -y -y gimp-plugin-registry #- repository of optional extensions for GIMP sudo apt-get install -y -y lockfile-progs #- Programs for locking and unlocking files and mailboxes sudo apt-get install -y -y libutouch-geis1 #- Gesture engine interface support sudo apt-get install -y -y ginn #- Gesture Injector: No-GEIS, No-Toolkits sudo apt-get install -y -y tsconf #- touch screen library common files sudo apt-get install -y -y libmtdev1 #- Multitouch Protocol Translation Library #- shared library sudo apt-get install -y -y libutouch-grail1 #- Gesture Recognition And Instantiation Library sudo apt-get install -y -y ptouch-driver #- CUPS/Foomatic driver for Brother P-touch label printers sudo apt-get install -y -y inputattach #- utility to connect serial-attached peripherals to the input subsystem sudo apt-get install -y -y usbmuxd #- USB multiplexor daemon for iPhone and iPod Touch devices
bayvictor/distributed-polling-system
bin/aptget_touch_support.sh
Shell
apache-2.0
1,838
cat ./tues-afternoon.bench | awk '{if (length($0) > 0) { print $2"."$4,$8,$10}}' > /tmp/bench.data cat /tmp/bench.data | awk '{print > $1".dat"}'
dgraph-io/experiments
intersects/gnuplot/convert.sh
Shell
apache-2.0
146
# Install puppet agent yum -y install http://yum.puppetlabs.com/puppetlabs-release-el-6.noarch.rpm yum -y install puppet-2.7.23-1.el6.noarch # Prepare puppet configuration file cat > /bigtop-puppet/config/site.csv << EOF hadoop_head_node,$1 hadoop_storage_dirs,/data/1,/data/2 bigtop_yumrepo_uri,http://bigtop.s3.amazonaws.com/releases/0.7.0/redhat/6/x86_64 jdk_package_name,java-1.7.0-openjdk-devel.x86_64 components,hadoop,hbase EOF mkdir -p /data/{1,2}
spotify/bigtop
bigtop-deploy/vm/vagrant-puppet/provision.sh
Shell
apache-2.0
458
#!/bin/bash # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eo pipefail export NPM_CONFIG_PREFIX=/home/node/.npm-global if [ -f ${KOKORO_KEYSTORE_DIR}/73713_github-magic-proxy-url-release-please ]; then # Groom the release PR as new commits are merged. npx release-please release-pr --token=${KOKORO_KEYSTORE_DIR}/73713_github-magic-proxy-token-release-please \ --repo-url=googleapis/java-accessapproval \ --package-name="accessapproval" \ --api-url=${KOKORO_KEYSTORE_DIR}/73713_github-magic-proxy-url-release-please \ --proxy-key=${KOKORO_KEYSTORE_DIR}/73713_github-magic-proxy-key-release-please \ --release-type=java-yoshi fi
googleapis/java-accessapproval
.kokoro/continuous/propose_release.sh
Shell
apache-2.0
1,189
curl -XPUT 'http://192.168.99.100:9200/spark/' -d '{ "settings" : { "index" : { "number_of_shards" : 1, "number_of_replicas" : 0 } }, "mappings" : { "netflow": { "properties": { "port_src_well_known_service": { "type": "string" }, "ts": { "format": "yyyy/MM/dd HH:mm:ss.SSS||yyyy/MM/dd", "type": "date" }, "geoip_src_country": { "type": "string" }, "asset_dst_priority": { "index": "not_analyzed", "type": "integer" }, "threat_dst_infrastructure": { "type": "string" }, "reason_for_flow": { "type": "string" }, "threat_src_infrastructure": { "type": "string" }, "te": { "type": "string" }, "geoip_src_lat": { "type": "string" }, "src_port": { "type": "long" }, "asset_src_org_type": { "type": "string" }, "geoip_src_subdivisions": { "type": "string" }, "threat_src_campaign": { "type": "string" }, "threat_src_type": { "type": "string" }, "geoip_dst_city": { "type": "string" }, "geoip_dst_country": { "type": "string" }, "geoip_dst_long": { "type": "string" }, "threat_dst_campaign": { "type": "string" }, "tcp_flag_a": { "type": "string" }, "threat_dst_malware": { "type": "string" }, "asset_dst_country": { "type": "string" }, "sensor_priority": { "type": "string" }, "sensor_id": { "type": "string" }, "asset_src_priority": { "type": "string" }, "dest_port": { "type": "integer" }, "asset_src_site": { "type": "string" }, "tcp_flag_s": { "type": "string" }, "geoip_src_as": { "type": "string" }, "tcp_flag_r": { "type": "string" }, "tcp_flag_p": { "type": "string" }, "yyyy": { "type": "string" }, "src_ip": { "type": "ip" }, "dst_ip": { "type": "ip" }, "asset_dst_org_sector": { "type": "string" }, "asset_src_org_sector": { "type": "string" }, "sensor_country": { "type": "string" }, "sensor_site": { "type": "string" }, "threat_dst_type": { "type": "string" }, "tcp_flag_f": { "type": "string" }, "tcp_flag_u": { "type": "string" }, "bytes": { "type": "long" }, "packets": { "type": "long" }, "geoip_src_city": { "type": "string" }, "hh": { "type": "string" }, "threat_src_malware": { "type": "string" }, "sensor_org_type": { "type": "string" }, "dd": { "type": "string" }, "tos": { "type": "long" }, "asset_dst_org_type": { "type": "string" }, "dest_ip": { "type": "ip" }, "asset_dst_org_name": { "type": "string" }, "geoip_src_long": { "type": "string" }, "asset_dst_site": { "type": "string" }, "sensor_org_name": { "type": "string" }, "threat_src_attacker": { "type": "string" }, "threat_dst_attacker": { "type": "string" }, "geoip_dst_as": { "type": "string" }, "geoip_dst_as_org": { "type": "string" }, "sensor_org_sector": { "type": "string" }, "mi": { "type": "string" }, "mm": { "type": "string" }, "protocol": { "type": "long" }, "ESDateStr": { "type": "string" }, "geoip_dst_lat": { "type": "string" }, "asset_src_country": { "type": "string" }, "geoip_dst_isp_org": { "type": "string" }, "port_dst_well_known_service": { "type": "string" }, "duration": { "type": "string" }, "geoip_dst_subdivisions": { "type": "string" }, "geoip_src_as_org": { "type": "string" }, "geoip_src_isp_org": { "type": "string" }, "ip_version": { "type": "string" }, "asset_src_org_name": { "type": "string" }, "asset_dst_priority": { "type": "string" } } } } }'
faganpe/KafkaStreamingPOC
src/scripts/es/create_index_with_mappings.sh
Shell
apache-2.0
6,544
#!/bin/bash # # Copyright 2016 Pascual Martinez-Gomez # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Usage: # # ./en/eacl2017exp.sh <ncores> <split> <templates.yaml> # # Example: # # ./en/eacl2017exp.sh 10 train en/semantic_templates_en_event.yaml # sick=en/SICK.semeval.txt # How many processes in parallel you want to run. # The maximum number should be inferior to the number of cores in your machine. # Default: 3 cores=${1:-3} # Split of the data (default train): # train (4439 problems), # test (4906 problems), # trial (495 problems). dataset=${2:-"train"} templates=$3 plain_dir=plain results_dir=results # Extract training and test data from SICK dataset, removing the header line. if [ ! -d ${plain_dir} ]; then mkdir -p ${plain_dir} fi echo "Extracting problems from the SICK file." tail -n +2 $sick | \ tr -d '\r' | \ awk -F'\t' -v tdir=${plain_dir} \ '{pair_id=$1; sub(/\.$/,"",$2); sub(/\.$/,"",$3); premise=$2; conclusion=$3; if($5 == "CONTRADICTION"){ judgement="no"; } else if ($5 == "ENTAILMENT") { judgement="yes"; } else if ($5 == "NEUTRAL") { judgement="unknown"; } set=$NF; printf "%s.\n%s.\n", premise, conclusion > tdir"/sick_"tolower(set)"_"pair_id".txt"; printf "%s\n", judgement > tdir"/sick_"tolower(set)"_"pair_id".answer"; }' # Create files that list all filenames of training, testing and trial. for dset in {train,test,trial}; do ls -v ${plain_dir}/sick_${dset}_*.txt > ${plain_dir}/sick_${dset}.files done # Split filename entries into several files, for parallel processing: ntrain=`cat ${plain_dir}/sick_train.files | wc -l` ntest=`cat ${plain_dir}/sick_test.files | wc -l` ntrial=`cat ${plain_dir}/sick_trial.files | wc -l` train_lines_per_split=`python -c "from math import ceil; print(int(ceil(float(${ntrain})/${cores})))"` test_lines_per_split=`python -c "from math import ceil; print(int(ceil(float(${ntest})/${cores})))"` trial_lines_per_split=`python -c "from math import ceil; print(int(ceil(float(${ntrial})/${cores})))"` rm -f ${plain_dir}/sick_{train,test,trial}.files_?? split -l $train_lines_per_split ${plain_dir}/sick_train.files ${plain_dir}/sick_train.files_ split -l $test_lines_per_split ${plain_dir}/sick_test.files ${plain_dir}/sick_test.files_ split -l $trial_lines_per_split ${plain_dir}/sick_trial.files ${plain_dir}/sick_trial.files_ # Copy a coq static library and compile it. cp en/coqlib_sick.v coqlib.v coqc coqlib.v cp en/tactics_coq_sick.txt tactics_coq.txt # Run pipeline for each entailment problem. for ff in ${plain_dir}/sick_${dataset}.files_??; do for f in `cat ${ff}`; do ./en/rte_en_mp.sh $f $templates; done & done # Wait for the parallel processes to finish. wait total=0 correct=0 for f in ${plain_dir}/sick_${dataset}_*.answer; do let total++ base_filename=${f##*/} sys_filename=${results_dir}/${base_filename/.answer/.txt.answer} gold_answer=`head -1 $f` if [ ! -e ${sys_filename} ]; then sys_answer="unknown" else sys_answer=`head -1 ${sys_filename}` if [ ! "${sys_answer}" == "unknown" ] && [ ! "${sys_answer}" == "yes" ] && [ ! "${sys_answer}" == "no" ]; then sys_answer="unknown" fi fi if [ "${gold_answer}" == "${sys_answer}" ]; then let correct++ fi echo -e $f"\t"$gold_answer"\t"$sys_answer done accuracy=`echo "scale=3; $correct / $total" | bc -l` echo "Accuracy: "$correct" / "$total" = "$accuracy # Print a summary (precision, recall, f-score) of the errors at individual problems, # per problem category and a global score. echo "Evaluating." echo "<!doctype html> <html lang='en'> <head> <meta charset='UTF-8'> <title>Evaluation results of "$category_templates"</title> <style> body { font-size: 1.5em; } </style> </head> <body> <table border='1'> <tr> <td>sick problem</td> <td>gold answer</td> <td>system answer</td> <td>proving time</td> </tr>" > $results_dir/main_${dataset}.html total_observations=0 correct_recognitions=0 attempts=0 total_proving_time=0 red_color="rgb(255,0,0)" green_color="rgb(0,255,0)" white_color="rgb(255,255,255)" gray_color="rgb(136,136,136)" for gold_filename in `ls -v ${plain_dir}/sick_${dataset}_*.answer`; do base_filename=${gold_filename##*/} # this line obtains the filename, without the directory path. system_filename=${results_dir}/${base_filename/.answer/.txt.answer} gold_answer=`cat $gold_filename` system_answer=`cat $system_filename` time_filename=${results_dir}/${base_filename/.answer/.txt.time} proving_time=`cat $time_filename` total_proving_time=`echo "$total_proving_time + $proving_time" | bc -l` total_number=$((total_number + 1)) color=$white_color if [ "$gold_answer" == "yes" ] || [ "$gold_answer" == "no" ]; then total_observations=$((total_observations + 1)) if [ "$gold_answer" == "$system_answer" ]; then correct_recognitions=$((correct_recognitions + 1)) color=$green_color else color=$red_color fi if [ "$system_answer" == "yes" ] || [ "$system_answer" == "no" ]; then attempts=$((attempts + 1)) else color=$gray_color fi fi echo ' <tr> <td><a style="background-color:'$color';" href="'${base_filename/.answer/.txt.html}'">'${base_filename/.answer/}'</a></td> <td>'$gold_answer'</td> <td>'$system_answer'</td> <td>'$proving_time's</td> </tr>' >> $results_dir/main_${dataset}.html done average_proving_time=`echo "scale=2; $total_proving_time / $total_number" | bc -l` echo " <h4><font color="red">Accuracy: "$correct" / "$total" = "$accuracy" </font></h4> <h4><font color="red">Average proving time: "${average_proving_time}" </font></h4> </body> </html> " >> $results_dir/main_${dataset}.html ./ja/accuracy.sh ${results_dir}/main_${dataset}.html > ${results_dir}/score.txt
mynlp/ccg2lambda
en/eacl2017exp.sh
Shell
apache-2.0
6,306
#!/bin/sh set -e set -u set -o pipefail function on_error { echo "$(realpath -mq "${0}"):$1: error: Unexpected failure" } trap 'on_error $LINENO' ERR # Used as a return value for each invocation of `strip_invalid_archs` function. STRIP_BINARY_RETVAL=0 # Strip invalid architectures strip_invalid_archs() { binary="$1" warn_missing_arch=${2:-true} # Get architectures for current target binary binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)" # Intersect them with the architectures we are building for intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)" # If there are no archs supported by this binary then warn the user if [[ -z "$intersected_archs" ]]; then if [[ "$warn_missing_arch" == "true" ]]; then echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)." fi STRIP_BINARY_RETVAL=1 return fi stripped="" for arch in $binary_archs; do if ! [[ "${ARCHS}" == *"$arch"* ]]; then # Strip non-valid architectures in-place lipo -remove "$arch" -output "$binary" "$binary" stripped="$stripped $arch" fi done if [[ "$stripped" ]]; then echo "Stripped $binary of architectures:$stripped" fi STRIP_BINARY_RETVAL=0 } # This protects against multiple targets copying the same framework dependency at the same time. The solution # was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????") # Copies and strips a vendored dSYM install_dsym() { local source="$1" warn_missing_arch=${2:-true} if [ -r "$source" ]; then # Copy the dSYM into the targets temp dir. echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\"" rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}" local basename basename="$(basename -s .dSYM "$source")" binary_name="$(ls "$source/Contents/Resources/DWARF")" binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}" # Strip invalid architectures from the dSYM. if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then strip_invalid_archs "$binary" "$warn_missing_arch" fi if [[ $STRIP_BINARY_RETVAL == 0 ]]; then # Move the stripped file into its final destination. echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\"" rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}" else # The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing. mkdir -p "${DWARF_DSYM_FOLDER_PATH}" touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM" fi fi } # Copies the bcsymbolmap files of a vendored framework install_bcsymbolmap() { local bcsymbolmap_path="$1" local destination="${BUILT_PRODUCTS_DIR}" echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"" rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}" } install_dsym "${PODS_ROOT}/../../Pod/Library/PLMediaStreamingKit.framework.dSYM"
pili-engineering/PLMediaStreamingKit
Example/Pods/Target Support Files/PLMediaStreamingKit/PLMediaStreamingKit-copy-dsyms.sh
Shell
apache-2.0
4,410
#!/bin/bash (python setup.py bdist_wheel;scp dist/mpos-1.0.0-py2-none-any.whl pi@pi:~/mpos.zip; ssh pi@pi 'unzip -uoq mpos.zip'; ssh pi@pi 'chmod +x ~/mpos/web/runprod.sh'; ssh pi@pi '~/mpos/web/runprod.sh';)
cackharot/ngen-milk-pos
deploy.sh
Shell
apache-2.0
209
#!/sbin/sh /sbin/mount -a chmod -R 0755 /system/bin/0*.sh chmod -R 0755 /system/bin/9*.sh chmod 0755 /system/xbin/sqlite3
AustinCanlin/NDCustomPart3Rom
samsumg/g9200/tomato/tmp/setsh.sh
Shell
apache-2.0
127
#!/usr/bin/env bash sh stop.sh sleep 1 export TF_CONFIG='{"cluster": {"worker": ["localhost:2222"], "ps": ["localhost:2223"], "chief": ["localhost:2224"]}, "task": {"type": "chief", "index": 0}}' python movielens-100k-estimator.py --mode warmup & sleep 1 export TF_CONFIG='{"cluster": {"worker": ["localhost:2222"], "ps": ["localhost:2223"], "chief": ["localhost:2224"]}, "task": {"type": "worker", "index": 0}}' python movielens-100k-estimator.py --mode warmup & sleep 1 export TF_CONFIG='{"cluster": {"worker": ["localhost:2222"], "ps": ["localhost:2223"], "chief": ["localhost:2224"]}, "task": {"type": "ps", "index": 0}}' python movielens-100k-estimator.py --mode warmup & echo "ok"
tensorflow/recommenders-addons
demo/dynamic_embedding/movielens-100k-estimator-with-warmup/warmup_train.sh
Shell
apache-2.0
692
#!/bin/bash echo "I am the predeploy script that on the ginux-dev as root" killall vzcontrol exit 0
gamingrobot/ginux
ops/root_predeploy.sh
Shell
apache-2.0
99
# extract text $JAVA_BIN -jar $scriptpath"lib/tika-app.jar" -t tmp/$uuid/tmp.cumulative.html > tmp/$uuid/cumulative.txt echo "completed text extraction from html to tmp/$uuid/cumulative.txt" | tee --append $sfb_log
fredzannarbor/pagekicker-community
scripts/includes/text-extraction-from-html.sh
Shell
apache-2.0
219
#!/bin/bash node ../sElect/tools/config2js.js config.json > webapp/js/config.js configRaw node ../sElect/tools/config2js.js ../_configFiles_/handlerConfigFile.json > webapp/js/ElectionConfigFile.js electionConfigRaw node ../sElect/tools/config2js.js ../_configFiles_/serverAddresses.json > webapp/js/serverAddresses.js sAddressesRaw 2>/dev/null
escapin/ElectionManager
ElectionHandler/refreshConfig.sh
Shell
apache-2.0
348
# Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # # Runs all the tests & stores the results in the array - list_logs # Displays all the results at the end # Whenever a new test_file.py is added it should be added here # function to show errors err() { echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $*" >&2 } # empty array initialized list_logs=() if ! python3 test_date_detection.py; then # if this test fails echos the error and also stores in an array err "test_date_detection failed" list_logs+=("test_date_detection failed") else # if this does not fails then just stores in the array list_logs+=("test_date_detection passed") fi if ! python3 util/test_min_max_date.py; then err "util/test_min_max_date failed" list_logs+=("util/test_min_max_date failed") else list_logs+=("util/test_min_max_date passed") fi echo 'All tests completed ' echo 'Results -' list_logs_length=${#list_logs[@]} # Displaying all the results at the end for ((i=0;i<list_logs_length;i++)); do echo "${list_logs[i]}" done
googleinterns/debaised-analysis
date_detection/test_all.sh
Shell
apache-2.0
1,554
#!/usr/bin/env bash docker build --no-cache -t vfetc . docker run -it --rm -v $(pwd)/data/tmp:/out vfetc files=data/vendor/agilent/batches.zip outputfile=/out/agilent_from_zip.txt && cat data/tmp/agilent_from_zip.txt docker run -it --rm -v $(pwd)/data/tmp:/out vfetc files=data/vendor/agilent/example_batch1.txt,data/vendor/agilent/example_batch2.txt outputfile=/out/agilent.txt && cat data/tmp/agilent.txt docker run -it --rm -v $(pwd)/data/tmp:/out vfetc files=data/vendor/sciex/example_batch1.txt outputfile=/out/sciex.txt && cat data/tmp/sciex.txt docker run -it --rm -v $(pwd)/data/tmp:/out vfetc files=data/vendor/shimadzu/example_batch1.txt,data/vendor/shimadzu/example_batch2.txt,data/vendor/shimadzu/example_batch3.txt outputfile=/out/shimadzu.txt && cat data/tmp/shimadzu.txt docker run -it --rm -v $(pwd)/data/tmp:/out vfetc files=data/vendor/waters/example_batch1.txt,data/vendor/waters/example_batch2.txt outputfile=/out/waters.txt && cat data/tmp/waters.txt
leidenuniv-lacdr-abs/ms-vfetc
rebuild_and_run.sh
Shell
apache-2.0
971
cp .theanorc_gpu ~/.theanorc
erikjandevries/ConfigLM17ML
extra/prep-TheanoForGPU.sh
Shell
apache-2.0
30
#!/bin/bash echo "Creating UrbanCode Enterprise Platform Services" echo "`ls -la`" cd ./compose/urbancode docker-compose -f bluemix-compose.yml -p UrbanCodePlatform up -d
stackinabox/stackinabox.io
scripts/deploy-to-bluemix/deploy_compose.sh
Shell
apache-2.0
173
#!/usr/bin/env bash # # Build script for CircleCi # # Inputs: # CIRCLE_OS_NAME - "linux" or "osx" # MODE - "static", "dynamic", "windows", "raspberrypi", or "minimal" # # Static builds use scripts to download libarchive and libconfuse, so those are # only installed on shared library builds. # set -e set -v FWUP_VERSION=$(cat VERSION) # Create ./configure ./autogen.sh case "${CIRCLE_OS_NAME}-${MODE}" in *-static) # If this is a static build, run 'build_pkg.sh' bash -v scripts/build_pkg.sh exit 0 ;; linux-minimal) bash -v scripts/build_and_test_minimal.sh exit 0 ;; linux-dynamic) ./configure --enable-gcov ;; linux-singlethread) # The verify-syscalls script can't follow threads, so # single thread builds are the only way to verify that # the issued read and write calls follow the expected # alignment, size and order ./configure --enable-gcov --without-pthreads ;; osx-dynamic) PKG_CONFIG_PATH="$(brew --prefix libarchive)/lib/pkgconfig:$(brew --prefix)/lib/pkgconfig:$PKG_CONFIG_PATH" ./configure ;; linux-windows) CC=x86_64-w64-mingw32-gcc \ CROSS_COMPILE=x86_64-w64-mingw32 \ bash -v scripts/build_pkg.sh exit 0 ;; linux-raspberrypi) CC=arm-linux-gnueabihf-gcc \ CROSS_COMPILE=arm-linux-gnueabihf \ PATH=~/tools/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/bin:$PATH \ QEMU_LD_PREFIX=~/tools/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/arm-linux-gnueabihf/libc/lib/arm-linux-gnueabihf \ bash -v scripts/build_pkg.sh exit 0 ;; *) echo "Unexpected build option: ${CIRCLE_OS_NAME}-${MODE}" exit 1 esac # Normal build make -j4 if ! make -j4 check; then cat tests/test-suite.log echo "git source 'make check' failed. See log above" exit 1 fi make dist # Check that the distribution version works by building it again mkdir distcheck cd distcheck tar xf ../fwup-$FWUP_VERSION.tar.gz cd fwup-$FWUP_VERSION if [ "$CIRCLE_OS_NAME" = "linux" ]; then ./configure; else PKG_CONFIG_PATH="$(brew --prefix libarchive)/lib/pkgconfig:$(brew --prefix)/lib/pkgconfig:$PKG_CONFIG_PATH" ./configure fi make -j4 if ! make -j4 check; then cat tests/test-suite.log echo "Distribution 'make check' failed. See log above" exit 1 fi cd ../..
fhunleth/fwup
scripts/ci_build.sh
Shell
apache-2.0
2,508
pkg_name=libcap-ng pkg_origin=core pkg_version=0.7.8 pkg_source=http://people.redhat.com/sgrubb/$pkg_name/$pkg_name-$pkg_version.tar.gz pkg_shasum=c21af997445cd4107a55d386f955c5ea6f6e96ead693e9151277c0ab5f97d05f pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>" pkg_description="The libcap-ng library is intended to make programming with posix capabilities much easier than the traditional libcap library" pkg_upstream_url="https://people.redhat.com/sgrubb/libcap-ng/" pkg_license=('GPL-2.0' 'LGPL-2.1') pkg_deps=(core/glibc lilian/python) pkg_build_deps=(lilian/make lilian/gcc) pkg_bin_dirs=(bin) pkg_lib_dirs=(lib) pkg_include_dirs=(include) source ../defaults.sh do_build() { ./configure --prefix="${pkg_prefix}" --enable-static=no --with-python="$(pkg_path_for python)" make -j "$(nproc)" }
be-plans/be
libcap-ng/plan.sh
Shell
apache-2.0
813
#!/bin/bash # Copyright 2020 The FedLearner Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ACCESS_KEY_ID=$1 ACCESS_KEY_SECRET=$2 IMAGE_HUB_URL=$3 IMAGE_HUB_USERNAME=$4 IMAGE_HUB_PASSWORD=$5 EXTERNAL_NAME=$6 GRPC_SSL_NAME=$7 DB_PASSWORD=$8 DOMAIN_URL=$9 REGION="cn-beijing" ZONE_ID="cn-beijing-h" GENERATER_NAME="fedlearnerwins" function echo_exit { echo $1 exit 1 } function echo_log { msg=$1 echo $msg echo $msg >> upgrade.log } function json2yaml { python -c 'import json; open("config", "w").write(json.load(open("./tmp","r"))["config"]);' } function install_cli { # Download kubectl kubectl help >/dev/null 2>&1 if [ $? -ne 0 ] then echo_log "Download kubectl." curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/darwin/amd64/kubectl mv kubectl /usr/local/bin/ chmod 755 /usr/local/bin/kubectl fi # Download helm helm version | grep Version:\"v3 >/dev/null 2>&1 if [ $? -ne 0 ] then echo_log "Download helm." curl -LO https://get.helm.sh/helm-v3.2.3-darwin-amd64.tar.gz tar -zxvf helm-v3.2.3-darwin-amd64.tar.gz mv darwin-amd64/helm /usr/local/bin/ chmod 755 /usr/local/bin/helm rm -rf darwin-amd64 helm-v3.2.3-darwin-amd64.tar.gz fi # Download aliyun cli aliyun version >/dev/null 2>&1 if [ $? -ne 0 ] then echo_log "Download aliyun cli." curl -LO https://aliyuncli.alicdn.com/aliyun-cli-macosx-3.0.32-amd64.tgz tar -zxvf aliyun-cli-macosx-3.0.32-amd64.tgz mv aliyun /usr/local/bin chmod 755 /usr/local/bin/aliyun rm -rf aliyun-cli-macosx-3.0.32-amd64.tgz fi # Configure aliyun cli aliyun auto-completion aliyun configure set --profile akProfile --region $REGION --access-key-id $ACCESS_KEY_ID --access-key-secret $ACCESS_KEY_SECRET --language en if [ $? -ne 0 ] then echo_exit "Failed to initiate aliyun cli." fi } function upgrade { cat ../../charts/fedlearner-add-on/configuration-snippet.txt | grep grpc_set_header >/dev/null 2>&1 if [ $? -ne 0 ] then echo "grpc_set_header Host $GRPC_SSL_NAME;" >> ../../charts/fedlearner-add-on/configuration-snippet.txt fi cat ../../charts/fedlearner-add-on/server-snippet.txt | grep grpc_ssl_name >/dev/null 2>&1 if [ $? -ne 0 ] then echo "grpc_ssl_name $GRPC_SSL_NAME;" >> ../../charts/fedlearner-add-on/server-snippet.txt fi CLUSTER_ID=`aliyun cs DescribeClusters | grep -A 1 name | grep -A 1 $GENERATER_NAME | grep cluster_id | awk -F "\"" '{print $4}'` if [ $? -ne 0 ] then echo_exit "Failed to get k8s cluster." fi rm -rf tmp config echo "Creating config file in current dir, you can move it to ~/.kube/config." aliyun cs GET /k8s/$CLUSTER_ID/user_config > ./tmp if [ $? -ne 0 ] then echo_exit "Failed to get k8s cluster config." fi json2yaml CURRENT_DIR=`pwd` export KUBECONFIG="$CURRENT_DIR/config" helm upgrade fedlearner-add-on ../../charts/fedlearner-add-on \ --set imageCredentials.registry=$IMAGE_HUB_URL \ --set imageCredentials.username=$IMAGE_HUB_USERNAME \ --set imageCredentials.password=$IMAGE_HUB_PASSWORD \ --set service.externalName=$EXTERNAL_NAME FILE_SYSTEM_ID=`aliyun nas DescribeFileSystems --Description $GENERATER_NAME | grep FileSystemId | awk -F "\"" '{print $4}'` if [ -n "$FILE_SYSTEM_ID" ] then MOUNT_TARGET_DOMAIN=`aliyun nas DescribeMountTargets --FileSystemId $FILE_SYSTEM_ID | grep MountTargetDomain | awk -F "\"" '{print $4}'` helm upgrade fedlearner-stack ../../charts/fedlearner-stack --set nfs-server-provisioner.enabled=false \ --set nfs-client-provisioner.enabled=true \ --set nfs-client-provisioner.nfs.server=$MOUNT_TARGET_DOMAIN \ --set mariadb.enabled=false \ --set 'ingress-nginx.controller.extraVolumeMounts[0].name=fedlearner-proxy-client' \ --set 'ingress-nginx.controller.extraVolumeMounts[0].mountPath=/etc/ingress-nginx/client/' \ --set 'ingress-nginx.controller.extraVolumes[0].name=fedlearner-proxy-client' \ --set 'ingress-nginx.controller.extraVolumes[0].secret.secretName=fedlearner-proxy-client' else echo_exit "Failed to update fedlearner-stack since missing MOUNT_TARGET_DOMAIN." fi VPC_ID=`aliyun vpc DescribeVpcs --VpcName $GENERATER_NAME | grep VpcId | awk -F "\"" '{print $4}'` if [[ $VPC_ID == "vpc"* ]] then DB_INSTANCE_ID=`aliyun rds DescribeDBInstances --VpcId $VPC_ID | grep \"DBInstanceId\" | awk -F "\"" '{print $4}'` if [ -n "$DB_INSTANCE_ID" ] then DB_URL=`aliyun rds DescribeDBInstanceNetInfo --DBInstanceId $DB_INSTANCE_ID | grep ConnectionString\" | awk -F "\"" '{print $4}'` helm upgrade fedlearner ../../charts/fedlearner \ --set fedlearner-web-console.cluster.env.DB_USERNAME=fedlearner \ --set fedlearner-web-console.cluster.env.DB_PASSWORD=$DB_PASSWORD \ --set fedlearner-web-console.cluster.env.DB_HOST=$DB_URL \ --set fedlearner-web-console.cluster.env.DB_PORT=3306 \ --set fedlearner-operator.extraArgs.ingress-extra-host-suffix=$DOMAIN_URL \ --set fedlearner-operator.extraArgs.ingress-client-auth-secret-name="default/ca-secret" \ --set fedlearner-operator.extraArgs.ingress-enabled-client-auth=true \ --set fedlearner-operator.extraArgs.ingress-secret-name=fedlearner-proxy-server else echo_exit "Failed to update fedlearner-stack since missing DB_INSTANCE_ID." fi else echo_exit "Failed to update fedlearner-stack since missing VPC_ID." fi } function usage { echo "Usage: " echo " ./upgrade-add-on.sh access_key_id access_key_secret image_hub_url image_hub_username image_hub_password external_name grpc_ssl_name db_password domain_url" echo "" echo "Params:" echo "" echo " access_key_id: the access key id provided by aliyun, required" echo " access_key_secret: the access key secret provided by aliyun, required" echo " image_hub_url: the docker image hub url, required" echo " image_hub_username: the docker image hub username, required" echo " image_hub_password: the docker image hub password, required" echo " external_name: the ip address for external service, required" echo " grpc_ssl_name: the grpc ssl name, required" echo " db_password: the database password, required" echo " domain_url: the domain url, required" } if [[ -z $ACCESS_KEY_ID ]] || [[ -z $ACCESS_KEY_SECRET ]] || [[ -z $IMAGE_HUB_URL ]] || [[ -z $IMAGE_HUB_USERNAME ]] || [[ -z $IMAGE_HUB_PASSWORD ]] || [[ -z $EXTERNAL_NAME ]] || [[ -z $GRPC_SSL_NAME ]] || [[ -z $DOMAIN_URL ]] then usage exit 1 else install_cli upgrade fi
bytedance/fedlearner
deploy/scripts/aliyun/upgrade-add-on.sh
Shell
apache-2.0
7,606
#!/bin/sh # # Purpose: # Enable Reads on standby (ROS) for Database using HADR. # # Usage: # ThisScript <DbName> # # exit: 0 # 1 - Input parameter error. # # Notice: # 1. When calling this script manually by logging as DB2 user. # a. This is the optional way to do backup temporarily. # b. It is ok to set up environment with "db2profile" or not. Because the environment has already been set up by logging as DB2 user. # # 2. When calling this script from cron job created for DB2 user. # a. This is the preferred way to do backup periodically. # b. It must call "db2profile" to set up environment. # # 3. When calling this script from cron job created for root user. # a. It must "su - <DB2 user>" first. # b. It must call "db2profile" to set up environment. # # Update log: (date / version / author : comments) # 2015-03-25 / 1.0.0 / Du Jiang : Creation . /home/db2inst1/sqllib/db2profile echo "============================================================" echo "Begin set DB config: `date`" echo "----------------------------------------" echo "Before:" echo db2set -all echo "----------------------------------------" db2set DB2_HADR_ROS=ON db2set DB2_STANDBY_ISO=UR echo "----------------------------------------" echo "After:" echo db2set -all echo "----------------------------------------" echo "End set DB config: `date`" echo "============================================================"
djsilenceboy/LearnTest
DB_Test/DB2/HADR/Enable_ROS.sh
Shell
apache-2.0
1,417
## ======================================================================== ## ## Copyright 2015-2016 Intel Corporation ## ## ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## ## you may not use this file except in compliance with the License. ## ## You may obtain a copy of the License at ## ## ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## ## ## Unless required by applicable law or agreed to in writing, software ## ## distributed under the License is distributed on an "AS IS" BASIS, ## ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## ## See the License for the specific language governing permissions and ## ## limitations under the License. ## ## ======================================================================== ## #!/bin/bash # to make sure we do not include nor link against wrong TBB export CPATH= export LIBRARY_PATH= export LD_LIBRARY_PATH= mkdir -p build_release cd build_release # set release settings cmake -L \ -G "Visual Studio 12 2013 Win64" \ -T "Intel C++ Compiler 16.0" \ -D OSPRAY_ZIP_MODE=OFF \ -D OSPRAY_BUILD_ISA=ALL \ -D OSPRAY_BUILD_MIC_SUPPORT=OFF \ -D OSPRAY_USE_EXTERNAL_EMBREE=ON \ -D embree_DIR=../../embree/lib/cmake/embree-2.9.0 \ -D USE_IMAGE_MAGICK=OFF \ -D CMAKE_INSTALL_INCLUDEDIR=include \ -D CMAKE_INSTALL_LIBDIR=lib \ -D CMAKE_INSTALL_DATAROOTDIR= \ -D CMAKE_INSTALL_DOCDIR=doc \ -D CMAKE_INSTALL_BINDIR=bin \ .. # -D TBB_ROOT=%TBB_PATH_LOCAL% \ # compile and create installers # option '--clean-first' somehow conflicts with options after '--' for msbuild cmake --build . --config Release --target PACKAGE -- -m -nologo # create ZIP files cmake -D OSPRAY_ZIP_MODE=ON .. cmake --build . --config Release --target PACKAGE -- -m -nologo cd ..
Twinklebear/OSPRay
scripts/release/win.sh
Shell
apache-2.0
2,152
#/bin/bash rep="photos/" cd $rep JHEAD=jhead SED=sed CONVERT=convert # rotation jhead -autorot *jpg jhead -autorot *JPG #rm small/*JPG for fichier in `ls *JPG` do if [ ! -f ../small/$fichier ] then echo "small - "$fichier convert $fichier -resize 25% ../small/$fichier fi if [ ! -f ../too_small/$fichier ] then echo "too_small - "$fichier convert $fichier -resize 5% ../too_small/$fichier fi done # rotation jhead -autorot ../small/*JPG jhead -autorot ../too_small/*JPG #rm small/*jpg for fichier in `ls *jpg` do if [ ! -f ../small/$fichier ] then echo $fichier convert $fichier -resize 60% ../small/$fichier fi if [ ! -f ../too_small/$fichier ] then convert $fichier -resize 15% ../too_small/$fichier fi done # rotation jhead -autorot ../small/*jpg jhead -autorot ../too_small/*jpg cd .. chmod -Rf 777 photos #lftp ftp://login:pwd@host -e "mirror -e -R /var/www/diapo/small /www/diapo/small ; quit" #lftp ftp://login:pwd@host -e "mirror -e -R /var/www/diapo/photos /www/diapo/photos ; quit" #lftp ftp://login:pwd@host -e "mirror -e -R /var/www/diapo/too_small /www/diapo/too_small ; quit"
julnegre/diapoo
convert.sh
Shell
apache-2.0
1,131
#!/bin/sh . ./_project.sh docker exec -i -t ${DOKER_NAME_PREFIX}redis-cluster redis-cli -p 7000 -c
dbflute-example/dbflute-example-with-non-rdb
etc/tools/rediscluster/redis-cli.sh
Shell
apache-2.0
100
jekyll build --destination /vol/websites/sjcom cpwd=pwd cd /vol/websites/sjcom cd publications/ mogrify -resize 200x -format png */*.png mogrify -resize 200x -format jpg */*.jpg cd .. cd images/ mogrify -resize '1000x>' -format png */*.png mogrify -resize '1000x>' -format jpg */*.jpg cd .. git add * val=date git commit -m "$val" git push --force cd pwd
StTu/sjcom
deploy.sh
Shell
apache-2.0
355
#!/bin/bash set +x # Load environment. . env_openstack.sh # Install command-line tools. pip install python-neutronclient python-openstackclient -U # Import CoreOS / CirrOS images. cd /tmp wget http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img wget https://stable.release.core-os.net/amd64-usr/current/coreos_production_openstack_image.img.bz2 bunzip2 coreos_production_openstack_image.img.bz2 openstack image create CoreOS --container-format bare --disk-format qcow2 --file /tmp/coreos_production_openstack_image.img --public openstack image create CirrOS --container-format bare --disk-format qcow2 --file /tmp/cirros-0.3.3-x86_64-disk.img --public rm coreos_production_openstack_image.img rm cirros-0.3.3-x86_64-disk.img # Create external network. openstack network create ext-net --external --provider-physical-network physnet1 --provider-network-type flat openstack subnet create ext-subnet --no-dhcp --allocation-pool start=172.16.0.2,end=172.16.0.249 --network=ext-net --subnet-range 172.16.0.0/24 --gateway 172.16.0.1 # Create default flavors. openstack flavor create --public m1.tiny --ram 512 --disk 1 --vcpus 1 openstack flavor create --public m1.small --ram 2048 --disk 20 --vcpus 1 openstack flavor create --public m1.medium --ram 4096 --disk 40 --vcpus 2 openstack flavor create --public m1.large --ram 8192 --disk 80 --vcpus 4 openstack flavor create --public m1.xlarge --ram 16384 --disk 160 --vcpus 8 # Create a demo tenant network, router and security group. openstack network create demo-net openstack subnet create demo-subnet --allocation-pool start=192.168.0.2,end=192.168.0.254 --network demo-net --subnet-range 192.168.0.0/24 --gateway 192.168.0.1 --dns-nameserver 8.8.8.8 --dns-nameserver 8.8.4.4 openstack router create demo-router neutron router-interface-add demo-router $(openstack subnet show demo-subnet -c id -f value) neutron router-gateway-set demo-router ext-net #openstack security group rule create default --protocol icmp #openstack security group rule create default --protocol tcp --dst-port 22 # Create keypair. openstack keypair create demo-key > ./stackanetes.id_rsa # Create a CoreOS instance. openstack server create demo-coreos \ --image $(openstack image show CoreOS -c id -f value) \ --flavor $(openstack flavor show m1.small -c id -f value) \ --nic net-id=$(openstack network show demo-net -c id -f value) \ --key-name demo-key # Allocate and attach a floating IP to the instance. openstack ip floating add $(openstack ip floating create ext-net -c floating_ip_address -f value) demo-coreos # Create a volume and attach it to the instance. openstack volume create demo-volume --size 10 sleep 10 openstack server add volume demo-coreos demo-volume # Live migrate the instance. # openstack server migrate --live b1-3 demo-coreos # Boot instance from volume. openstack volume create demo-boot-volume --image $(openstack image show CoreOS -c id -f value) --size 10 openstack server create demo-coreos-boot-volume \ --volume $(openstack volume show demo-boot-volume -c id -f value) \ --flavor $(openstack flavor show m1.small -c id -f value) \ --nic net-id=$(openstack network show demo-net -c id -f value) \ --key-name demo-key
stackanetes/stackanetes
demo_openstack.sh
Shell
apache-2.0
3,223
#!/usr/bin/env bash export CLASS_NAME="com.splicemachine.tutorials.spark.SparkStreamingKafka" export APPLICATION_JAR="splice-tutorial-file-spark-2.6.1.1736.jar" export SPARK_JARS_DIR="${SPARK_HOME}/jars" CURRENT_IP=$(ifconfig eth0 | grep inet | awk '{print $2}') SPARK_IMAGE=${SPARK_IMAGE:-"splicemachine/tutorial-spark-kafka-consumer:2.0.3"} echo "spark.driver.host $CURRENT_IP" >> $SPARK_HOME/conf/spark-defaults.conf echo "spark.mesos.executor.docker.image $SPARK_IMAGE" >> $SPARK_HOME/conf/spark-defaults.conf exec "${SPARK_HOME}"/bin/spark-submit \ --class ${CLASS_NAME} \ --files ${SPARK_HOME}/conf/hbase-site.xml,${SPARK_HOME}/conf/core-site.xml,${SPARK_HOME}/conf/hdfs-site.xml \ "${APPLICATION_JAR}" \ "$@"
splicemachine/splice-community-sample-code
spark-streaming-splice-adapter/src/main/resources/scripts/run-kafka-spark-streaming.sh
Shell
apache-2.0
745
#!/bin/sh set -e if [ "$1" = "configure" ]; then if [ ! -f "/etc/orientdb/orientdb-server-log.properties" ] then # The configuration file /etc/orientdb/orientdb-server-log.properties does not already exist cat /usr/share/doc/orientdb/examples/orientdb-server-log.properties.gz | gunzip > /etc/orientdb/orientdb-server-log.properties else echo "An old configuration file was found: /etc/orientdb/orientdb-server-log.properties" echo "We are not touching the file. You could have to modify it by yourself" fi if [ ! -f "/etc/orientdb/orientdb-client-log.properties" ] then # The configuration file /etc/orientdb/orientdb-client-log.properties does not already exist cat /usr/share/doc/orientdb/examples/orientdb-client-log.properties.gz | gunzip > /etc/orientdb/orientdb-client-log.properties else echo "An old configuration file was found: /etc/orientdb/orientdb-client-log.properties" echo "We are not touching the file. You could have to modify it by yourself" fi if [ ! -f "/etc/orientdb/orientdb-server-config.xml" ] then # The configuration file /etc/orientdb/orientdb-server-config.xml does not already exist cat /usr/share/doc/orientdb/examples/orientdb-server-config.xml.gz | gunzip > /etc/orientdb/orientdb-server-config.xml chmod 640 /etc/orientdb/orientdb-server-config.xml else echo "An old configuration file was found: /etc/orientdb/orientdb-server-config.xml" echo "We are not touching the file. You could have to modify it by yourself" fi if [ ! -f "/etc/orientdb/orientdb-dserver-config.xml" ] then # The configuration file /etc/orientdb/orientdb-dserver-config.xml does not already exist cat /usr/share/doc/orientdb/examples/orientdb-dserver-config.xml.gz | gunzip > /etc/orientdb/orientdb-dserver-config.xml chmod 640 /etc/orientdb/orientdb-dserver-config.xml else echo "An old configuration file was found: /etc/orientdb/orientdb-dserver-config.xml" echo "We are not touching the file. You could have to modify it by yourself" fi update-rc.d orientdb defaults echo echo "To start orientdb run:" echo "# service orientdb start" echo echo "To stop orientdb run:" echo "# service orientdb stop" echo echo "To get the orientdb status run:" echo "# service orientdb status" echo echo "To use the console run:" echo "# orientdb-console" echo echo "NOTE: OrientDB is free software. For more informations subscribe to the orientdb mailinglist" chown -R orientdb:orientdb /var/lib/orientdb /etc/orientdb/* /var/log/orientdb chown -R orientdb:orientdb /usr/share/orientdb/databases /usr/share/orientdb/config /usr/share/orientdb/log fi
mav-im/orientdb-debian-packager
debian/postinst.tpl.sh
Shell
apache-2.0
2,625
#!/bin/bash sudo rm -rf var/cache/* sudo rm -rf var/logs/* sudo rm -rf var/sessions/* mkdir -p var/cache var/logs var/sessions web/cache HTTPDUSER=`ps aux | grep -E '[a]pache|[h]ttpd|[_]www|[w]ww-data|[n]ginx' | grep -v root | head -1 | cut -d\ -f1` if [ "$1" ]; then ME=$1; else ME=`whoami`; fi sudo setfacl -R -m u:"$HTTPDUSER":rwX -m u:"$ME":rwX var/cache var/logs var/sessions web/cache web/images/user sudo setfacl -dR -m u:"$HTTPDUSER":rwX -m u:"$ME":rwX var/cache var/logs var/sessions web/cache web/images/user
calvera/admingenerator-template
symfony_facl.sh
Shell
apache-2.0
526
#!/bin/sh set -o errexit realpath() { echo "$(cd "$(dirname "$1")"; pwd)/$(basename "$1")" } DOCKER_DIR=$(dirname $(dirname $(realpath $0))) PROJECT_DIR=$(dirname $DOCKER_DIR ) SCRATCH_DIR="$HOME/.heron-docker" cleanup() { if [ -d $SCRATCH_DIR ]; then echo "Cleaning up scratch dir" rm -rf $SCRATCH_DIR fi } trap cleanup EXIT setup_scratch_dir() { if [ ! -f "$1" ]; then mkdir $1 mkdir $1/artifacts fi cp -r $DOCKER_DIR/* $1 } build_exec_image() { INPUT_TARGET_PLATFORM=$1 HERON_VERSION=$2 DOCKER_TAG_PREFIX=$3 OUTPUT_DIRECTORY=$(realpath $4) if [ "$INPUT_TARGET_PLATFORM" == "latest" ]; then TARGET_PLATFORM="ubuntu14.04" DOCKER_TAG="$DOCKER_TAG_PREFIX/heron:$HERON_VERSION" DOCKER_LATEST_TAG="$DOCKER_TAG_PREFIX/heron:latest" DOCKER_IMAGE_FILE="$OUTPUT_DIRECTORY/heron-$HERON_VERSION.tar" else TARGET_PLATFORM="$INPUT_TARGET_PLATFORM" DOCKER_TAG="$DOCKER_TAG_PREFIX/heron-$TARGET_PLATFORM:$HERON_VERSION" DOCKER_LATEST_TAG="$DOCKER_TAG_PREFIX/heron-$TARGET_PLATFORM:latest" DOCKER_IMAGE_FILE="$OUTPUT_DIRECTORY/heron-$TARGET_PLATFORM-$HERON_VERSION.tar" fi DOCKER_FILE="$SCRATCH_DIR/dist/Dockerfile.dist.$TARGET_PLATFORM" setup_scratch_dir $SCRATCH_DIR # need to copy artifacts locally TOOLS_FILE="$OUTPUT_DIRECTORY/heron-tools-install.sh" TOOLS_OUT_FILE="$SCRATCH_DIR/artifacts/heron-tools-install.sh" CORE_FILE="$OUTPUT_DIRECTORY/heron-core.tar.gz" CORE_OUT_FILE="$SCRATCH_DIR/artifacts/heron-core.tar.gz" cp $TOOLS_FILE $TOOLS_OUT_FILE cp $CORE_FILE $CORE_OUT_FILE export HERON_VERSION # build the image echo "Building docker image with tag:$DOCKER_TAG" if [ "$HERON_VERSION" == "nightly" ]; then docker build --build-arg heronVersion=$HERON_VERSION -t "$DOCKER_TAG" -f "$DOCKER_FILE" "$SCRATCH_DIR" else docker build --build-arg heronVersion=$HERON_VERSION -t "$DOCKER_TAG" -t "$DOCKER_LATEST_TAG" -f "$DOCKER_FILE" "$SCRATCH_DIR" fi # save the image as a tar file echo "Saving docker image to $DOCKER_IMAGE_FILE" docker save -o $DOCKER_IMAGE_FILE $DOCKER_TAG } publish_exec_image() { INPUT_TARGET_PLATFORM=$1 HERON_VERSION=$2 DOCKER_TAG_PREFIX=$3 INPUT_DIRECTORY=$(realpath $4) if [ "$INPUT_TARGET_PLATFORM" == "latest" ]; then TARGET_PLATFORM="ubuntu14.04" DOCKER_TAG="$DOCKER_TAG_PREFIX/heron:$HERON_VERSION" DOCKER_LATEST_TAG="$DOCKER_TAG_PREFIX/heron:latest" DOCKER_IMAGE_FILE="$INPUT_DIRECTORY/heron-$HERON_VERSION.tar" else TARGET_PLATFORM="$INPUT_TARGET_PLATFORM" DOCKER_TAG="$DOCKER_TAG_PREFIX/heron-$TARGET_PLATFORM:$HERON_VERSION" DOCKER_LATEST_TAG="$DOCKER_TAG_PREFIX/heron-$TARGET_PLATFORM:latest" DOCKER_IMAGE_FILE="$INPUT_DIRECTORY/heron-$TARGET_PLATFORM-$HERON_VERSION.tar" fi # publish the image to docker hub if [ "$HERON_VERSION" == "nightly" ]; then docker load -i $DOCKER_IMAGE_FILE docker push "$DOCKER_TAG" else docker load -i $DOCKER_IMAGE_FILE docker push "$DOCKER_TAG" docker push "$DOCKER_LATEST_TAG" fi } docker_image() { OPERATION=$1 if [ "$OPERATION" == "build" ]; then build_exec_image $2 $3 $4 $5 elif [ "$OPERATION" == "publish" ]; then publish_exec_image $2 $3 $4 $5 else echo "invalid operation" fi } case $# in 5) docker_image $1 $2 $3 $4 $5 ;; *) echo "Usage: $0 <operation> <platform> <version_string> <tag-prefix> <input-output-directory> " echo " " echo "Platforms Supported: latest ubuntu14.04, ubuntu15.10, ubuntu16.04 centos7" echo " " echo "Example:" echo " $0 build ubuntu14.04 0.12.0 heron ." echo " $0 publish ubuntu14.04 0.12.0 streamlio ~/ubuntu" echo " " exit 1 ;; esac
streamlio/heron
docker/scripts/ci-docker.sh
Shell
apache-2.0
3,729
#!/bin/bash PWD=`pwd` export LD_LIBRARY_PATH=$PWD/../../../../bin/libiio/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$PWD/../../../../lib:$LD_LIBRARY_PATH
excess-project/monitoring-agent
src/plugins/c/acme/setenv.sh
Shell
apache-2.0
155
#!/bin/bash # # Downloads the eduGAIN metadata aggregate # # NB: a HEAD request gets a "HTTP/1.1 405 Method Not Allowed" # Tomasz suggested http://mds.edugain.org/feed-sha256.xml # # Configuration options: DIRECTORY='/home/astuart4/eduGAIN/' eduGAIN='http://mds.edugain.org/feed-sha256.xml' eduGAINtest='http://mds-test.edugain.org' # End of configuration options DATE=`/bin/date -u '+%Y-%m-%dT%H:%M:%SZ'` FILE="eduGAIN.xml.$DATE" echo "downloading $eduGAIN and storing in $FILE" /usr/bin/curl $eduGAIN > ${DIRECTORY}/${FILE} 2>/dev/null
alexstuart/ukftools
geteduGAIN.sh
Shell
apache-2.0
538
#!/usr/bin/env bash # # Simple utility that uses ssh to check, run or kill the logger script # on every node of the cluster. # Automatically obtains the cluster nodes and writes them to a hostsfile. # NOTE: Runs in sequence not in paralell. # # # EXAMPLES: # # Runs 3 busybox containers per each node. # export TIMES=3; export MODE=1; ./manage_pods.sh -r 128 # # # Runs 5 standalone logger.sh processes logging forever # export TIMES=5; export MODE=2; ./manage_pods.sh -r 128 # # Both the above methods should log output to be picked up by the fluentd pods. # # # # Check for running pods. # export MODE=1; ./manage_pods.sh -c 1 # # # Run 5 pods in every node. # The argument to '-r' is the log line length. # This is the only arg that takes a value different than 1 # # export TIMES=5; export MODE=1; ./manage_pods.sh -r 250 # # Kill pods in every node. # export MODE=1; ./manage_pods.sh -k 1 # # Check pods. # export MODE=1; ./manage_pods.sh -c 1 # set -o pipefail if [[ `id -u` -ne 0 ]] then echo -e "Please run as root/sudo.\n" echo -e "Terminated." exit 1 fi SCRIPTNAME=$(basename ${0%.*}) SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" WORKDIR=$SCRIPTDIR HOSTSFILE=$WORKDIR/hostlist.txt declare -a NODELIST function cp_logger() { for host in ${NODELIST[@]} do scp $WORKDIR/logger.sh $host: done } function run_logger() { for host in ${NODELIST[@]} do echo -e "\n\n[+] Line length ${x}: $host" ssh -f $host "/root/logger.sh -r 60 -l ${x} -t ${TIMES} -m ${MODE}" done } function check_pods() { for host in ${NODELIST[@]} do ssh $host "echo $host; docker ps | grep busybox; echo" done } function kill_pods() { for host in ${NODELIST[@]} do echo -e "\n$host" ssh $host "docker kill \$(docker ps | grep busybox | awk '{print \$1}' ;echo)" done } function check_logger() { for host in ${NODELIST[@]} do echo -e "\n$host" ssh $host "ps -ef | grep [l]ogger.sh" done } function kill_logger() { for host in ${NODELIST[@]} do echo -e "\n$host" ssh $host "pkill -f logger.sh" done } function read_hosts() { hf=${1} while read line; do NODELIST+=($line); done < $hf } # MAIN if [[ -f $HOSTSFILE ]] then echo -e "Hosts file exists.\n" read_hosts $HOSTSFILE else echo "First run:" echo "Creating $HOSTSFILE ..." oc get nodes | awk '{print $1}' | grep -v 'NAME' > $HOSTSFILE [[ $? -eq 0 ]] && echo -e "Done.\n" || (echo 'Fatal: "oc get nodes failed."' ; exit 1) read_hosts $HOSTSFILE echo "Copying logger.sh to cluster nodes." cp_logger echo "Done." fi # for process mode if [[ ${MODE} -eq 2 ]]; then while getopts ":s:r:c:k:q:" option; do case "${option}" in s) x=${OPTARG} && [[ $x -eq 1 ]] && cp_logger ;; r) x=${OPTARG} if [[ $x -ne 0 ]]; then while [ ${TIMES} -ne 0 ] do run_logger $x ((TIMES--)) done fi ;; c) x=${OPTARG} && [[ $x -eq 1 ]] && check_logger ;; k) x=${OPTARG} && [[ $x -eq 1 ]] && kill_logger ;; q) x=${OPTARG} && [[ $x -eq 1 ]] && kill_logger ;; '*') echo -e "Invalid option / usage: ${option}\nExiting." exit 1 ;; esac done shift $((OPTIND-1)) fi # container mode if [[ ${MODE} -eq 1 ]]; then while getopts ":s:r:c:k:q:" option; do case "${option}" in s) x=${OPTARG} && [[ $x -eq 1 ]] && cp_logger ;; r) x=${OPTARG} && [[ $x -ne 0 ]] && run_logger $x ;; c) x=${OPTARG} && [[ $x -eq 1 ]] && check_pods ;; k) x=${OPTARG} && [[ $x -eq 1 ]] && kill_pods ;; q) x=${OPTARG} && [[ $x -eq 1 ]] && kill_logger ;; '*') echo -e "Invalid option / usage: ${option}\nExiting." exit 1 ;; esac done shift $((OPTIND-1)) fi echo -e "\nDone." exit 0
hroyrh/svt
logging_metrics_performance/enterprise_logging/test/manage_pods.sh
Shell
apache-2.0
3,905
#!/bin/bash -eu # Copyright 2016 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # It's not a good idea to link an MSYS dynamic library into a native Windows # JVM, so we need to build it with Visual Studio. However, Bazel doesn't # support multiple compilers in the same build yet, so we need to hack around # this limitation using a genrule. DLL="$1" shift 1 function fail() { echo >&2 "ERROR: $*" exit 1 } # Ensure the PATH is set up correctly. if ! which which >&/dev/null ; then PATH="/bin:/usr/bin:$PATH" which which >&/dev/null \ || fail "System PATH is not set up correctly, cannot run GNU bintools" fi # Create a temp directory. It will used for the batch file we generate soon and # as the temp directory for CL.EXE . VSTEMP=$(mktemp -d) trap "rm -fr \"$VSTEMP\"" EXIT VSVARS="" # Visual Studio or Visual C++ Build Tools might not be installed at default # location. Check BAZEL_VS and BAZEL_VC first. if [ -n "${BAZEL_VC+set}" ]; then VSVARS="${BAZEL_VC}/VCVARSALL.BAT" # Check if BAZEL_VC points to Visual C++ Build Tools 2017 if [ ! -f "${VSVARS}" ]; then VSVARS="${BAZEL_VC}/Auxiliary/Build/VCVARSALL.BAT" fi else # Find Visual Studio. We don't have any regular environment variables # available so this is the best we can do. if [ -z "${BAZEL_VS+set}" ]; then VSVERSION="$(ls "C:/Program Files (x86)" \ | grep -E "Microsoft Visual Studio [0-9]+" \ | sort --version-sort \ | tail -n 1)" BAZEL_VS="C:/Program Files (x86)/$VSVERSION" fi VSVARS="${BAZEL_VS}/VC/VCVARSALL.BAT" fi # Check if Visual Studio 2017 is installed. Look for it at the default # locations. if [ ! -f "${VSVARS}" ]; then VSVARS="C:/Program Files (x86)/Microsoft Visual Studio/2017/" VSEDITION="BuildTools" if [ -d "${VSVARS}Enterprise" ]; then VSEDITION="Enterprise" elif [ -d "${VSVARS}Professional" ]; then VSEDITION="Professional" elif [ -d "${VSVARS}Community" ]; then VSEDITION="Community" fi VSVARS+="$VSEDITION/VC/Auxiliary/Build/VCVARSALL.BAT" fi if [ ! -f "${VSVARS}" ]; then fail "VCVARSALL.bat not found, check your Visual Studio installation" fi JAVAINCLUDES="" if [ -n "${JAVA_HOME+set}" ]; then JAVAINCLUDES="$JAVA_HOME/include" else # Find Java. $(JAVA) in the BUILD file points to external/local_jdk/..., # which is not very useful for anything not MSYS-based. JAVA=$(ls "C:/Program Files/java" | grep -E "^jdk" | sort | tail -n 1) [[ -n "$JAVA" ]] || fail "JDK not found" JAVAINCLUDES="C:/Program Files/java/$JAVA/include" fi # Convert all compilation units to Windows paths. WINDOWS_SOURCES=() for i in $*; do if [[ "$i" =~ ^.*\.cc$ ]]; then WINDOWS_SOURCES+=("\"$(cygpath -a -w $i)\"") fi done # Copy jni headers to src/main/native folder # Mimic genrule //src/main/native:copy_link_jni_md_header and //src/main/native:copy_link_jni_header JNI_HEADERS_DIR="${VSTEMP}/src/main/native" mkdir -p "$JNI_HEADERS_DIR" cp -f "$JAVAINCLUDES/jni.h" "$JNI_HEADERS_DIR/" cp -f "$JAVAINCLUDES/win32/jni_md.h" "$JNI_HEADERS_DIR/" # CL.EXE needs a bunch of environment variables whose official location is a # batch file. We can't make that have an effect on a bash instance, so # generate a batch file that invokes it. # As for `abs_pwd` and `pwd_drive`: in cmd.exe, it's not enough to `cd` into a # directory. You must also change to its drive to truly set the cwd to that # directory. See https://github.com/bazelbuild/bazel/issues/3906 abs_pwd="$(cygpath -a -w "${PWD}")" pwd_drive="$(echo "$abs_pwd" | head -c2)" cat > "${VSTEMP}/windows_jni.bat" <<EOF @echo OFF @call "${VSVARS}" amd64 @$pwd_drive @cd "$abs_pwd" @set TMP=$(cygpath -a -w "${VSTEMP}") @CL /O2 /EHsc /LD /Fe:"$(cygpath -a -w ${DLL})" /I "%TMP%" /I . ${WINDOWS_SOURCES[*]} /link /DEFAULTLIB:advapi32.lib EOF # Invoke the file and hopefully generate the .DLL . chmod +x "${VSTEMP}/windows_jni.bat" exec "${VSTEMP}/windows_jni.bat"
meteorcloudy/bazel
src/main/native/windows/build_windows_jni.sh
Shell
apache-2.0
4,455
#set JAVA_HOME echo "Exporting JAVA_HOME path after JAVA's JRE 1.7 was installed !" export JAVA_HOME=/usr/lib/jvm/jre-1.7.0-openjdk.x86_64 sudo echo "export JAVA_HOME=/usr/lib/jvm/jre-1.7.0-openjdk.x86_64" >> /etc/environment sudo echo "Saving JAVA_HOME variable so it doesn't get affected from system REBOOT !" sudo echo ''\#\!/bin/sh'' > /etc/profile.d/jdk_home.sh sudo echo "export JAVA_HOME=/usr/lib/jvm/jre-1.7.0-openjdk.x86_64" >> /etc/profile.d/jdk_home.sh sudo echo ''export PATH=\$PATH:\$JAVA_HOME/bin'' >> /etc/profile.d/jdk_home.sh sudo chmod 775 /etc/profile.d/jdk_home.sh
krakky/market
coopr_standalone/bin/centos/3_set_java_home.sh
Shell
apache-2.0
585
#!/bin/bash set -e export PATH='/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin' source /var/tmp/helpers/default.sh readonly DOCKER_FILES='/var/tmp/docker' readonly UBUNTU_RELEASE=$(detect_ubuntu_release) readonly UBUNTU_VERSION=$(detect_ubuntu_version) readonly AMAZON_EC2=$(detect_amazon_ec2 && echo 'true') [[ -d $DOCKER_FILES ]] || mkdir -p "$DOCKER_FILES" # Old package repository has been shut down, see: # https://www.docker.com/blog/changes-dockerproject-org-apt-yum-repositories/ cat <<EOF > /etc/apt/sources.list.d/docker.list $(if [[ $UBUNTU_VERSION == '12.04' ]]; then echo "deb [arch=amd64] https://ftp.yandex.ru/mirrors/docker ubuntu-${UBUNTU_RELEASE} main" else if [[ $UBUNTU_VERSION =~ ^(14|16|18).04$ ]]; then echo "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${UBUNTU_RELEASE} stable" else # Starting from 20.04, Docker no long provides packages from their repository. echo "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable" fi fi) EOF chown root: /etc/apt/sources.list.d/docker.list chmod 644 /etc/apt/sources.list.d/docker.list if [[ $UBUNTU_VERSION == '12.04' ]]; then if [[ ! -f "${DOCKER_FILES}/12.04/docker.key" ]]; then # Download key directly from Docker project. wget -O "${DOCKER_FILES}/docker.key" \ "https://ftp.yandex.ru/mirrors/docker/gpg" else cp -f "${DOCKER_FILES}/12.04/docker.key" \ "${DOCKER_FILES}/docker.key" fi else if [[ ! -f "${DOCKER_FILES}/docker.key" ]]; then # Download key directly from Docker project. wget -O "${DOCKER_FILES}/docker.key" \ "https://download.docker.com/linux/ubuntu/gpg" fi fi apt-key add "${DOCKER_FILES}/docker.key" apt_get_update # Only refresh packages index from Docker's repository. apt-get --assume-yes update \ -o Dir::Etc::SourceList='/etc/apt/sources.list.d/docker.list' \ -o Dir::Etc::SourceParts='-' -o APT::Get::List-Cleanup='0' # Dependencies needed by Docker, etc. PACKAGES=( 'pciutils' 'procps' 'xfsprogs' 'git' ) if [[ $UBUNTU_VERSION =~ ^(12|14|16|18).04$ ]]; then PACKAGES+=( 'btrfs-tools' ) else # Starting from 20.04, btrfs-progs is no longer a virtual package. PACKAGES+=( 'btrfs-progs' ) fi DOCKER_PACKAGE='docker-ce' if [[ $UBUNTU_VERSION == '12.04' ]]; then DOCKER_PACKAGE='docker-engine' fi if [[ -n $DOCKER_VERSION ]]; then # The package name and version is now a little bit awkaward to work # which is why we rely on wildcard match for a given version of Docker, # for example: # - Old packages e.g., docker-engine_17.05.0~ce-0~ubuntu-trusty_amd64.deb; # - New packages e.g., docker-ce_17.12.0~ce-0~ubuntu_amd64.deb. PACKAGES+=( $(printf '%s=%s~ce*' "$DOCKER_PACKAGE" "$DOCKER_VERSION") ) else PACKAGES+=( "$DOCKER_PACKAGE" ) fi for package in "${PACKAGES[@]}"; do apt-get --assume-yes install "$package" done { if [[ ! $UBUNTU_VERSION =~ ^(12|14).04$ ]]; then systemctl stop docker else service docker stop fi } || true # Do not start Docker automatically when # running on Amazon EC2, as it might be # desirable to relocate the /var/lib/docker # on a separate mount point, etc. if [[ -n $AMAZON_EC2 ]]; then { if [[ ! $UBUNTU_VERSION =~ ^(12|14).04$ ]]; then systemctl disable docker else update-rc.d -f docker disable # Disable when using upstart. echo 'manual' | sudo tee /etc/init/docker.override fi } || true fi if ! getent group docker &>/dev/null; then groupadd --system docker fi for user in $(echo "root vagrant ubuntu ${USER}" | tr ' ' '\n' | sort -u); do if getent passwd "$user" &>/dev/null; then usermod -a -G docker "$user" fi done # Add Bash shell completion for Docker and Docker Compose. for file in docker docker-compose; do REPOSITORY='docker-ce' FILE_PATH='components/cli/contrib/completion/bash' if [[ $file =~ ^docker-compose$ ]]; then REPOSITORY='compose' FILE_PATH='contrib/completion/bash' fi if [[ ! -f "${DOCKER_FILES}/${file}" ]]; then wget -O "${DOCKER_FILES}/${file}" \ "https://raw.githubusercontent.com/docker/${REPOSITORY}/master/${FILE_PATH}/${file}" fi cp -f "${DOCKER_FILES}/${file}" \ "/etc/bash_completion.d/${file}" chown root: "/etc/bash_completion.d/${file}" chmod 644 "/etc/bash_completion.d/${file}" done sed -i -e \ 's/.*DOCKER_OPTS="\(.*\)"/DOCKER_OPTS="--config-file=\/etc\/docker\/daemon.json"/g' \ /etc/default/docker # Shouldn't the package create this? if [[ ! -d /etc/docker ]]; then mkdir -p /etc/docker chown root: /etc/docker chmod 755 /etc/docker fi # For now, the "userns-remap" option is disabled, # since it breaks almost everything at the moment. cat <<EOF > /etc/docker/daemon.json { "debug": false, $(if [[ $UBUNTU_VERSION == '12.04' ]]; then # No support for overlay2 file system in the # Linux kernel on older versions of Ubuntu. cat <<'EOS' "graph": "/var/lib/docker", "storage-driver": "aufs", EOS else cat <<'EOS' "data-root": "/var/lib/docker", "storage-driver": "overlay2", EOS fi) "ipv6": false, "dns": [ "1.1.1.1", "8.8.8.8", "4.2.2.2" ], "icc": false, "live-restore": true, "userland-proxy": false, "experimental": true } EOF chown root: /etc/docker/daemon.json chmod 644 /etc/docker/daemon.json # We can install the docker-compose pip, but it has to be done # under virtualenv as it has specific version requirements on # its dependencies, often causing other things to break. virtualenv /opt/docker-compose pushd /opt/docker-compose &>/dev/null # Make sure to switch into the virtualenv. . /opt/docker-compose/bin/activate # This is needed, as virtualenv by default will install # some really old version (e.g. 12.0.x, etc.), sadly. if [[ $UBUNTU_VERSION =~ '12.04' ]]; then pip install --upgrade setuptools==43.0.0 else pip install --upgrade setuptools fi # Resolve the "InsecurePlatformWarning" warning. pip install --upgrade ndg-httpsclient # The "--install-scripts" option is to make sure that binary # will be placed in the system-wide directory, rather than # inside the virtualenv environment only. if [[ -n $DOCKER_COMPOSE_VERSION ]]; then pip install \ --install-option='--install-scripts=/usr/local/bin' \ docker-compose=="${DOCKER_COMPOSE_VERSION}" else pip install \ --install-option='--install-scripts=/usr/local/bin' \ docker-compose fi deactivate popd &>/dev/null hash -r ln -sf /usr/local/bin/docker-compose \ /usr/bin/docker-compose if [[ -f /usr/local/bin/wsdump.py ]]; then ln -sf /usr/local/bin/wsdump.py \ /usr/local/bin/wsdump fi hash -r KERNEL_OPTIONS=( 'cgroup_enable=memory' 'swapaccount=1' ) # Support both grub and grub2 style configuration. if detect_grub2; then # Remove any repeated (de-duplicate) Kernel options. OPTIONS=$(sed -e \ "s/GRUB_CMDLINE_LINUX=\"\(.*\)\"/GRUB_CMDLINE_LINUX=\"\1 ${KERNEL_OPTIONS[*]}\"/" \ /etc/default/grub | \ grep -E '^GRUB_CMDLINE_LINUX=' | \ sed -e 's/GRUB_CMDLINE_LINUX=\"\(.*\)\"/\1/' | \ tr ' ' '\n' | sort -u | tr '\n' ' ' | xargs) sed -i -e \ "s/GRUB_CMDLINE_LINUX=\"\(.*\)\"/GRUB_CMDLINE_LINUX=\"${OPTIONS}\"/" \ /etc/default/grub else # Remove any repeated (de-duplicate) Kernel options. OPTIONS=$(sed -e \ "s/^#\sdefoptions=\(.*\)/# defoptions=\1 ${KERNEL_OPTIONS[*]}/" \ /boot/grub/menu.lst | \ grep -E '^#\sdefoptions=' | \ sed -e 's/.*defoptions=//' | \ tr ' ' '\n' | sort -u | tr '\n' ' ' | xargs) sed -i -e \ "s/^#\sdefoptions=.*/# defoptions=${OPTIONS}/" \ /boot/grub/menu.lst fi if [[ -f /etc/default/ufw ]]; then sed -i -e \ 's/DEFAULT_FORWARD_POLICY=.*/DEFAULT_FORWARD_POLICY="ACCEPT"/g' \ /etc/default/ufw fi grep 'docker' /proc/mounts | awk '{ print length, $2 }' | \ sort -g -r | cut -d' ' -f2- | xargs umount -l -f 2> /dev/null || true # This would normally be on a separate volume, # and most likely formatted to use "btrfs". for directory in /srv/docker /var/lib/docker; do [[ -d $directory ]] || mkdir -p "$directory" rm -Rf ${directory:?}/* chown root: "$directory" chmod 755 "$directory" done # A bind-mount for the Docker root directory. cat <<'EOS' | sed -e 's/\s\+/\t/g' >> /etc/fstab /srv/docker /var/lib/docker none bind 0 0 EOS rm -f ${DOCKER_FILES}/docker{.key,-compose}
kwilczynski/packer-templates
scripts/common/docker.sh
Shell
apache-2.0
8,760
#!/bin/bash CUR_DIR=`pwd` APP_DIR=`dirname $0` HIVE_USER=$USER . $APP_DIR/bee-env.sh TARGET_ENV=$1 if [ "$TARGET_ENV" == "" ]; then . $APP_DIR/beepass-default.sh else if [ -f $APP_DIR/beepass-$TARGET_ENV.sh ]; then . $APP_DIR/beepass-$TARGET_ENV.sh else echo "Create a 'beepass-<target>.sh' (use beepass-default.sh as template) file in the beewrap.sh directory and add the following:" echo "Omit HIVE_USER is you user id matches your HIVE User id" echo " HIVE_USER=<your hive user>" echo " HS2_PASSWORD=<your Hive Password>" echo " URL=jdbc:hive2://lnx21116.csxt.csx.com:10000" echo "" echo "chmod the file 700" fi fi beeline -u $URL -n $HIVE_USER -p $HS2_PASSWORD --hivevar USER=$HIVE_USER --hivevar EXEC_ENGINE=$EXEC_ENGINE -i $APP_DIR/beeline_init.sql "$@"
dstreev/hdp-mac-utils
bin/beewrap.sh
Shell
apache-2.0
847
#!/bin/bash set -e export PROJECT_NAME=async.single
codetojoy/gists
csharp/async.single/setvars.sh
Shell
apache-2.0
56
#!/bin/bash BASE_DIR=$(dirname $0) #source $BASE_DIR/../bin/setenv.sh #$SPARK_HOME/bin/spark-shell --master yarn --deploy-mode client spark-shell --master yarn --deploy-mode client
zhuwbigdata/hadoop-admin-utils
spark-utils/yarnClientSparkShell.sh
Shell
apache-2.0
183
sudo apt-get install -y --force-yes libirrlicht-dev #- High performance realtime 3D engine development library sudo apt-get install -y --force-yes libirrlicht-doc #- High performance realtime 3D engine (API documentation) sudo apt-get install -y --force-yes libirrlicht1.7a #- High performance realtime 3D engine sudo apt-get install -y --force-yes bkchem #- Chemical structures editor sudo apt-get install -y --force-yes mm3d #- OpenGL based 3D model editor sudo apt-get install -y --force-yes python-editobj #- Python object editor sudo apt-get install -y --force-yes qcake #- programming environment and scene editor for 3D games sudo apt-get install -y --force-yes qcake-data #- programming environment and scene editor for 3D games #- data files sudo apt-get install -y --force-yes whitedune #- graphical VRML97/X3D viewer, editor, 3D modeller and animation tool
bayvictor/distributed-polling-system
bin/install_3d_editor.sh
Shell
apache-2.0
887
set -e while [ true ] do echo . csmith > test.c; clang -I${CSMITH_PATH}/runtime -O3 -w test.c -o /dev/null; gcc -I${CSMITH_PATH}/runtime -O3 -w test.c -o /dev/null; done
ChrisLidbury/CLSmith
scripts/driver0.sh
Shell
bsd-2-clause
178
#!/bin/bash # - must have Cython installed # - must have already run: # mkdir cbuild # (cd cbuild && cmake .. && make -j 4 ) # - torch is expected to be already activated, ie run: # source ~/torch/install/bin/torch_activate.sh # ... or similar # - torch is expected to be at $HOME/torch export TORCH_INSTALL=$(dirname $(dirname $(which luajit) 2>/dev/null) 2>/dev/null) if [[ x${INCREMENTAL} == x ]]; then { rm -Rf build PyBuild.so dist *.egg-info cbuild ${TORCH_INSTALL}/lib/libPyTorch* pip uninstall -y PyTorch } fi mkdir -p cbuild if [[ x${TORCH_INSTALL} == x ]]; then { echo echo Please run: echo echo ' source ~/torch/install/bin/torch-activate' echo echo ... then try again echo exit 1 } fi if [[ $(uname -s) == 'Darwin' ]]; then { USE_LUAJIT=OFF; } fi if [[ x${USE_LUAJIT} == x ]]; then { USE_LUAJIT=ON; } fi if [[ x${CYTHON} != x ]]; then { JINJA2_ONLY=1 python setup.py || exit 1; } fi (cd cbuild; cmake .. -DCMAKE_BUILD_TYPE=Debug -DUSE_LUAJIT=${USE_LUAJIT} -DCMAKE_INSTALL_PREFIX=${TORCH_INSTALL} && make -j 4 install) || exit 1 if [[ x${VIRTUAL_ENV} != x ]]; then { # we are in a virtualenv python setup.py install || exit 1 } else { # not virtualenv python setup.py install --user || exit 1 } fi
hughperkins/pytorch
build.sh
Shell
bsd-2-clause
1,284
#!/bin/bash -xeE TOOLS_BASE_PATH=$1 . ./env.sh rm -Rf $DISTR_PATH mkdir -p $DISTR_PATH wget -P $DISTR_PATH $M4_URL wget -P $DISTR_PATH $AUTOCONF_URL wget -P $DISTR_PATH $AUTOMAKE_URL wget -P $DISTR_PATH $LIBTOOL_URL # Use as much processors # as we can to speedup NPROC=`nproc` export MAKE_JOBS=$NPROC . ./env.sh rm -Rf $SRC_PATH $PREFIX mkdir $SRC_PATH export PATH="$PREFIX/bin/":$PATH export LD_LIBRARY_PATH="$PREFIX/bin/":$LD_LIBRARY_PATH tar -xjvf $DISTR_PATH/$M4_DISTR -C $SRC_PATH cd $SRC_PATH/$M4_NAME ./configure --prefix=$PREFIX make make install tar -xzvf $DISTR_PATH/$AUTOCONF_DISTR -C $SRC_PATH cd $SRC_PATH/$AUTOCONF_NAME ./configure --prefix=$PREFIX make make install tar -xzvf $DISTR_PATH/$AUTOMAKE_DISTR -C $SRC_PATH cd $SRC_PATH/$AUTOMAKE_NAME ./configure --prefix=$PREFIX make make install tar -xzvf $DISTR_PATH/$LIBTOOL_DISTR -C $SRC_PATH cd $SRC_PATH/$LIBTOOL_NAME ./configure --prefix=$PREFIX make make install
artpol84/slurm-pmix-test
prepare_host/prepare_tools/prepare.sh
Shell
bsd-3-clause
946
#!/usr/bin/env bash # # Generate src/builtin_table.c # PROJECT_ROOT=$(dirname "$(dirname "$(realpath "$0")")") cd "$PROJECT_ROOT" || exit SRCDIR=$PROJECT_ROOT/src GENERATE=$SRCDIR/generate-builtin-table if [ ! -x "$GENERATE" ] ; then echo "please build $GENERATE first" exit 1 fi cp "$SRCDIR"/builtin_table.c.in "$SRCDIR"/builtin_table.c for TABLE in safe iso8859_1 unicode cp1252 ; do echo "process builtin $TABLE" $GENERATE "$PROJECT_ROOT"/table/$TABLE.tbl | sed -e"s/NEW/$TABLE/" >> "$SRCDIR"/builtin_table.c done
dharple/detox
bin/generate-builtin.sh
Shell
bsd-3-clause
526
#!/bin/bash # # This script is designed to be as platform independent as possible. It does # final preparations to run the platform specific test cases of CernVM-FS and # invokes a platform dependent script to steer the actual test session # usage() { local error_msg=$1 echo "$error_msg" echo echo "Mandatory options:" echo "-r <test script> platform specific script (inside the cvmfs sources)" echo echo "Optional parameters:" echo "-p <platform path> custom search path for platform specific script" echo "-u <user name> user name to use for test run" exit 1 } export LC_ALL=C # static information (check also remote_setup.sh and run.sh) cvmfs_workspace="/tmp/cvmfs-test-gcov-workspace" cvmfs_source_directory="${cvmfs_workspace}/cvmfs-source" cvmfs_log_directory="${cvmfs_workspace}/logs" # parameterized variables platform_script="" platform_script_path="" test_username="sftnight" # from now on everything is logged to the logfile # Note: the only output of this script is the absolute path to the generated # log files RUN_LOGFILE="${cvmfs_log_directory}/run.log" sudo touch $RUN_LOGFILE sudo chmod a+w $RUN_LOGFILE sudo chown $test_username:$test_username $RUN_LOGFILE exec &> $RUN_LOGFILE # switch to working directory cd $cvmfs_workspace # read parameters while getopts "r:p:u:" option; do case $option in r) platform_script=$OPTARG ;; p) platform_script_path=$OPTARG ;; u) test_username=$OPTARG ;; ?) shift $(($OPTIND-2)) usage "Unrecognized option: $1" ;; esac done # check if we have all bits and pieces if [ x"$platform_script" = "x" ]; then usage "Missing parameter(s)" fi # find the platform specific script if [ x$platform_script_path = "x" ]; then platform_script_path=${cvmfs_source_directory}/test/cloud_testing/platforms fi platform_script_abs=${platform_script_path}/${platform_script} if [ ! -f $platform_script_abs ]; then echo "platform specific script $platform_script not found here:" echo $platform_script_abs exit 2 fi # run the platform specific script to perform CernVM-FS tests echo "running platform specific script $platform_script ..." sudo -H -E -u $test_username bash $platform_script_abs -t $cvmfs_source_directory \ -l $cvmfs_log_directory
cernvm/ci-scripts
cvmfs/cloud_testing/remote_gcov_run.sh
Shell
bsd-3-clause
2,463
#! /bin/bash -e function sbt211() { sbt 'set scalaVersion := "2.11.0-RC3"' 'set scalaBinaryVersion := scalaVersion.value' $@ return $? } die () { echo "$@" exit 1 } CHECK=";clean;test;publishLocal" RELEASE=";clean;test;publish" VERSION=`gsed -rn 's/version :=.*"(.+).*"/\1/p' build.sbt` [[ -n "$(git status --porcelain)" ]] && die "working directory is not clean!" sbt $CHECK sbt $RELEASE sbt211 $CHECK sbt211 $RELEASE cat <<EOM Released! For non-snapshot releases: - tag: git tag -s -a v$VERSION -m "scala-java8-compat $VERSION" - push tag: git push origin v$VERSION - close and release the staging repository: https://oss.sonatype.org - change the version number in build.sbt to a suitable -SNAPSHOT version EOM
retronym/scala-java8-compat
release.sh
Shell
bsd-3-clause
730
#!/bin/sh # # Copyright (c) 2017, Caoimhe Chaos <caoimhechaos@protonmail.com>. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Ancient Solutions nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. POSTGRESQL_VERSION="9.4" if [ x"$POSTGRESQL_MASTER" = x"" ] then echo "Error: POSTGRESQL_MASTER is not set." 1>&2 exit 1 fi if [ ! -f /secrets/pg_syncpw ] then echo "Error: No PostgreSQL sync password in /secrets/pg_syncpw" 1>&2 exit 1 fi pw="$(cat /secrets/pg_syncpw)" /usr/bin/pg_basebackup -D "/var/lib/postgresql/${POSTGRESQL_VERSION}/main" \ -RP -d "host=${POSTGRESQL_MASTER} port=5432 user=replicator password=${pw} sslmode=require" # Access /usr/bin/pg_conftool -- set listen_addresses '*' /usr/bin/pg_conftool -- set ssl_prefer_server_ciphers on /usr/bin/pg_conftool -- set ssl_ciphers 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH' /usr/bin/pg_conftool -- set ssl_renegotiation_limit 512MB /usr/bin/pg_conftool -- set password_encryption on /usr/bin/pg_conftool -- set db_user_namespace off # Tuning paramaters /usr/bin/pg_conftool -- set shared_buffers 480MB /usr/bin/pg_conftool -- set temp_buffers 8MB /usr/bin/pg_conftool -- set max_prepared_transactions 64 /usr/bin/pg_conftool -- set work_mem 10MB /usr/bin/pg_conftool -- set maintenance_work_mem 120MB /usr/bin/pg_conftool -- set max_stack_depth 2MB /usr/bin/pg_conftool -- set dynamic_shared_memory_type posix /usr/bin/pg_conftool -- set full_page_writes on /usr/bin/pg_conftool -- set wal_buffers 4MB /usr/bin/pg_conftool -- set wal_writer_delay 200ms # Query tuning /usr/bin/pg_conftool -- set enable_bitmapscan on /usr/bin/pg_conftool -- set enable_hashagg on /usr/bin/pg_conftool -- set enable_hashjoin on /usr/bin/pg_conftool -- set enable_material on /usr/bin/pg_conftool -- set enable_mergejoin on /usr/bin/pg_conftool -- set enable_nestloop on /usr/bin/pg_conftool -- set enable_seqscan on /usr/bin/pg_conftool -- set enable_sort on /usr/bin/pg_conftool -- set enable_tidscan on /usr/bin/pg_conftool -- set default_statistics_target 10 /usr/bin/pg_conftool -- set constraint_exclusion off /usr/bin/pg_conftool -- set cursor_tuple_fraction 0.1 /usr/bin/pg_conftool -- set from_collapse_limit 8 /usr/bin/pg_conftool -- set join_collapse_limit 8 /usr/bin/pg_conftool -- set geqo on /usr/bin/pg_conftool -- set geqo_threshold 12 /usr/bin/pg_conftool -- set geqo_effort 5 /usr/bin/pg_conftool -- set geqo_pool_size 0 /usr/bin/pg_conftool -- set geqo_generations 0 /usr/bin/pg_conftool -- set geqo_selection_bias 2.0 /usr/bin/pg_conftool -- set geqo_seed 0.0 # Vacuuming /usr/bin/pg_conftool -- set autovacuum on /usr/bin/pg_conftool -- set track_activities on /usr/bin/pg_conftool -- set track_counts on /usr/bin/pg_conftool -- set track_io_timing on /usr/bin/pg_conftool -- set track_functions none /usr/bin/pg_conftool -- set track_activity_query_size 1024 /usr/bin/pg_conftool -- set log_autovacuum_min_duration 120000 /usr/bin/pg_conftool -- set autovacuum_max_workers 3 /usr/bin/pg_conftool -- set autovacuum_naptime 1min /usr/bin/pg_conftool -- set autovacuum_vacuum_threshold 50 /usr/bin/pg_conftool -- set autovacuum_analyze_threshold 50 /usr/bin/pg_conftool -- set autovacuum_vacuum_scale_factor 0.2 /usr/bin/pg_conftool -- set autovacuum_analyze_scale_factor 0.1 /usr/bin/pg_conftool -- set autovacuum_freeze_max_age 200000000 /usr/bin/pg_conftool -- set autovacuum_multixact_freeze_max_age 400000000 /usr/bin/pg_conftool -- set autovacuum_vacuum_cost_delay 20ms /usr/bin/pg_conftool -- set autovacuum_vacuum_cost_limit -1 # Replication /usr/bin/pg_conftool -- set hot_standby on /usr/bin/pg_conftool -- set hot_standby_feedback on /usr/bin/pg_conftool -- set wal_level hot_standby /usr/bin/pg_conftool -- set max_wal_senders 5 /usr/bin/pg_conftool -- set wal_keep_segments 8 /usr/bin/pg_conftool -- set checkpoint_segments 8 /usr/bin/pg_conftool -- set checkpoint_completion_target 0.7 # Logging /usr/bin/pg_conftool -- set log_destination stderr /usr/bin/pg_conftool -- set log_parser_stats off /usr/bin/pg_conftool -- set log_planner_stats off /usr/bin/pg_conftool -- set log_executor_stats off /usr/bin/pg_conftool -- set log_statement_stats off /usr/bin/pg_conftool -- set update_process_title on # Locale /usr/bin/pg_conftool -- set lc_messages en_US.UTF-8 /usr/bin/pg_conftool -- set lc_monetary en_US.UTF-8 /usr/bin/pg_conftool -- set lc_numeric en_US.UTF-8 /usr/bin/pg_conftool -- set lc_time en_US.UTF-8 # Secret configuration # TODO(caoimhe): generate hba config from etcd on demand. /usr/bin/pg_conftool -- set ssl_cert_file /secrets/postgresql.crt /usr/bin/pg_conftool -- set ssl_key_file /secrets/postgresql.key /usr/bin/pg_conftool -- set hba_file /secrets/postgresql.hba.conf /usr/bin/pg_conftool -- set ident_file /secrets/postgresql.ident.conf exec "/usr/lib/postgresql/${POSTGRESQL_VERSION}/bin/postmaster" "-D" "/var/lib/postgresql/${POSTGRESQL_VERSION}/main" "-c" "config_file=/etc/postgresql/${POSTGRESQL_VERSION}/main/postgresql.conf"
tonnerre/dockerfiles
db/postgresql/pg_config.sh
Shell
bsd-3-clause
6,331
curl -s https://raw.github.com/netkiller/shell/master/centos6.sh | bash curl -s https://raw.github.com/netkiller/shell/master/modules/ntp.sh | bash curl -s https://raw.github.com/netkiller/shell/master/filesystem/btrfs.sh | bash curl -s https://raw.github.com/netkiller/shell/master/nginx/nginx.sh | bash curl -s https://raw.github.com/netkiller/shell/master/php/5.4.x.sh | bash curl -s https://raw.github.com/netkiller/shell/master/php/redis.sh | bash
junun/shell
linux/node.nginx.sh
Shell
bsd-3-clause
453
node LicenseCheck.js
Ehryk/MNLicenseCheck
run.sh
Shell
bsd-3-clause
20
#!/bin/bash root="${1%/}/" n=1 leadingchas() { for ((i=0; i<$n; i++)) do echo -n " " done } recursiverm() { ((n++)) for d in *; do if [ -d $d ]; then (leadingchas) echo "<folder name='$d'>" (cd $d; recursiverm) (leadingchas) echo "</folder>" else x="`pwd`/$d" (leadingchas) echo -n "<file name='$d' url='" echo -n "/absexamples/" echo -n ${x#$root} echo "' />" fi done ((n--)) } echo "<examples>" echo "<exset id='set1'>" echo "<folder name='collaboratory'>" (cd $root/collaboratory; recursiverm) echo "</folder>" echo "</exset>" echo "</examples>"
abstools/easyinterface
server/config/envisage/offlineabsexamples.sh
Shell
bsd-3-clause
652
#!/bin/bash #input set -xv debug=1 USAGE='usage: sh $1 $2 ;'\ '$1: is value of [1,2] create file or directory '\ '$2: is number ' echo $USAGE echo $0 echo $1 echo $2 echo $# if [ $# -eq 2 ]
1GHL/2013y
exercise/816/create_file.sh
Shell
bsd-3-clause
194
#!/bin/sh -x set -e f=/tmp/sedcmd_ echo -n "sed -i '\ s:__cdecl: /*__cdecl*/:;\ s:__stdcall: /*__stdcall*/:;\ s:__usercall: /*__usercall*/:;\ s:__userpurge: /*__userpurge*/:;\ s:__thiscall: /*__thiscall*/:;\ s:__fastcall: /*__fastcall*/:;\ s:\(<[^<> ]*>\):/*\1*/:g;\ ' $1" > $f . $f
notaz/ia32rtools
run_idac_adj.sh
Shell
bsd-3-clause
285
#!/bin/bash -e PLUGIN=mtc.lv2 GIT_URI="https://github.com/BlokasLabs/${PLUGIN}" TMP_DIR=/tmp/${PLUGIN} rm -rf ${TMP_DIR} git clone --depth 1 ${GIT_URI} ${TMP_DIR} pushd ${TMP_DIR} export CC=arm-linux-gnueabihf-gcc export CXX=arm-linux-gnueabihf-g++ export LD=arm-linux-gnueabihf-gcc export STRIP=arm-linux-gnueabihf-strip export OPTIMIZATIONS="-fno-finite-math-only" export MOD=1 export LV2DIR=${LV2_DIR} make -j4 make install popd rm -rf ${TMP_DIR}
BlokasLabs/modep
stage5/17-mtc/01-run-chroot.sh
Shell
bsd-3-clause
459
#!/bin/bash set -v set -e # This script is intended to be run INSIDE the docker container, and # if all goes to plan, the _build/html folder should contain the contents # of the build # Turn on our conda environment source activate docs # Try to install dependencies on the fly, or rely on the existing environment #conda install six numpy cython matplotlib requests jinja2 pyyaml # Build/Install CoolProp and check cd /coolprop/wrappers/Python python setup.py bdist_wheel --dist-dir dist cmake=default,64 pip install -vvv --force-reinstall --ignore-installed --upgrade --no-index `ls dist/CoolProp*.whl` rm -rf dist cd /coolprop python -c "import CoolProp; print(CoolProp.__gitrevision__)" python -c "import CoolProp; print(CoolProp.__file__)" # Run the slow stuff, if needed, or demanded cd /coolprop/Web/scripts python -u __init__.py $1 # Doxygen cd /coolprop doxygen --version && doxygen Doxyfile # api documentation cd /coolprop/Web sphinx-apidoc -T -f -e -o apidoc ../wrappers/Python/CoolProp # All the rest of the docs cd /coolprop/Web make html
henningjp/CoolProp
Web/docker/build_docs.sh
Shell
mit
1,061
#!/bin/bash set -e cd "$(dirname "$0")" git clean -f -d -x app web echo ">> Prepare parameters.yml" touch app/config/parameters.yml if [ ! -f composer.phar ]; then echo ">> Download composer" curl -s http://getcomposer.org/installer | php fi echo ">> Install dependencies" php composer.phar install --optimize-autoloader --prefer-dist echo ">> Dump assets" php app/console assetic:dump --env=prod --no-debug echo ">> Remove development elements" rm -rf app/cache/* rm -rf app/logs/* echo ">> Compress" if [ -f pack.tar.gz ]; then rm pack.tar.gz fi ln -s . gitonomy tar -czf pack.tar.gz --exclude=.git \ gitonomy/app \ gitonomy/src/Gitonomy/Bundle \ gitonomy/src/Gitonomy/Component \ gitonomy/web \ gitonomy/vendor \ gitonomy/install.sh \ gitonomy/LICENSE \ gitonomy/README.md echo ">> Clean" rm gitonomy
MCHacker/git
pack.sh
Shell
mit
857
babel packages/core/src/ -d packages/core/lib/ & babel packages/plugin-knex/src/ -d packages/plugin-knex/lib/ & babel packages/plugin-scaffold/src/ -d packages/plugin-scaffold/lib/ & wait echo "Done"
aewing/mason
scripts/build.sh
Shell
mit
201
fails=0 n=0 sdate=$(date +"%s") for t in test/*-test.js; do echo -e "\n[ Bop" $t "]\n" node $t || let fails++ let n++ done edate=$(date +"%s") etime=$[ $edate-$sdate ] echo -e "\n" $n "test files executed ("$etime"s):" echo -e " tests passed:" $[ $n - $fails ] 'files.' echo -e " tests failed:" $fails 'files.\n' exit $fails
beni55/bop
test/run.sh
Shell
mit
331
#!/bin/bash set -e upstreamCommit="$1" dayVersion="${2:-1}" if [ -z "$upstreamCommit" ]; then echo >&2 "usage: $0 commit [day-version]" echo >&2 " ie: $0 8d849acb" echo >&2 " ie: $0 upstream # to tag the latest local upstream commit" echo >&2 " ie: $0 upstream 2 # to tag a second commit in the same day" echo >&2 echo >&2 'DO NOT USE BRANCH NAMES OR TAG NAMES FROM UPSTREAM!' echo >&2 'ONLY COMMIT HASHES OR "upstream" WILL WORK PROPERLY!' echo >&2 echo >&2 'Also, you _must_ update your upstream remote and branch first, because that' echo >&2 'is where these commits come from ultimately.' echo >&2 '(See also the "update-upstream-branch.sh" helper.)' exit 1 fi dir="$(dirname "$(readlink -f "$BASH_SOURCE")")" "$dir/setup-upstream-remote.sh" git fetch -qp --all || true commit="$(git log -1 --date='short' --pretty='%h' "$upstreamCommit" --)" unix="$(git log -1 --format='%at' "$commit" --)" gitTime="$(TZ=UTC date --date="@$unix" +'%Y%m%d')" version="0.0~git${gitTime}.${dayVersion}.${commit}" echo echo "commit $commit becomes version $version" echo tag="upstream/${version//'~'/_}" ( set -x; git tag -f "$tag" "$commit" ) echo echo "local tag '$tag' created" echo echo 'use the following to push it:' echo echo " git push -f origin $tag:$tag" echo echo 'if this upstream version has not been merged into master yet, use:' echo echo " git merge $tag" echo
tianon/debian-golang-pty
debian/helpers/create-upstream-tag.sh
Shell
mit
1,391
#!/bin/sh python -m tensorflow.models.image.mnist.convolutional
cncgl/ML-sandbox
tensorflow-nmist.sh
Shell
mit
64
#!/usr/bin/env bash set -e mkdir -p $HOME/.ethash # this will generate the DAG once, travis is configured to cache it and # subsequent calls will not regenerate geth makedag 0 $HOME/.ethash
tomaaron/raiden
.travis/make_dag.sh
Shell
mit
193
#!/bin/bash FN="simpIntLists_1.20.0.tar.gz" URLS=( "https://bioconductor.org/packages/3.9/data/experiment/src/contrib/simpIntLists_1.20.0.tar.gz" "https://bioarchive.galaxyproject.org/simpIntLists_1.20.0.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-simpintlists/bioconductor-simpintlists_1.20.0_src_all.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-simpintlists/bioconductor-simpintlists_1.20.0_src_all.tar.gz" ) MD5="27879fea1d9ddd0404829b7de8eaae77" # Use a staging area in the conda dir rather than temp dirs, both to avoid # permission issues as well as to have things downloaded in a predictable # manner. STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $STAGING TARBALL=$STAGING/$FN SUCCESS=0 for URL in ${URLS[@]}; do curl $URL > $TARBALL [[ $? == 0 ]] || continue # Platform-specific md5sum checks. if [[ $(uname -s) == "Linux" ]]; then if md5sum -c <<<"$MD5 $TARBALL"; then SUCCESS=1 break fi else if [[ $(uname -s) == "Darwin" ]]; then if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then SUCCESS=1 break fi fi fi done if [[ $SUCCESS != 1 ]]; then echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:" printf '%s\n' "${URLS[@]}" exit 1 fi # Install and clean up R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL rm $TARBALL rmdir $STAGING
jerowe/bioconda-recipes
recipes/bioconductor-simpintlists/post-link.sh
Shell
mit
1,430
#! /bin/sh # Copyright (C) 2010-2014 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Diagnose if the autoconf input is named configure.in. # Diagnose if both configure.in and configure.ac are present, prefer # configure.ac. . test-init.sh cat >configure.ac <<EOF AC_INIT([$me], [1.0]) AM_INIT_AUTOMAKE AC_CONFIG_FILES([Makefile]) EOF cat >configure.in <<EOF AC_INIT([$me], [1.0]) AM_INIT_AUTOMAKE([an-invalid-automake-option]) AC_CONFIG_FILES([Makefile]) EOF : >Makefile.am $ACLOCAL 2>stderr && { cat stderr >&2; exit 1; } cat stderr >&2 grep 'configure\.ac.*configure\.in.*both present' stderr $ACLOCAL -Wno-error 2>stderr || { cat stderr >&2; exit 1; } cat stderr >&2 grep 'configure\.ac.*configure\.in.*both present' stderr grep 'proceeding.*configure\.ac' stderr # Ensure we really proceed with configure.ac. AUTOMAKE_fails -Werror grep 'configure\.ac.*configure\.in.*both present' stderr grep 'proceeding.*configure\.ac' stderr AUTOMAKE_run -Wno-error grep 'configure\.ac.*configure\.in.*both present' stderr grep 'proceeding.*configure\.ac' stderr mv -f configure.ac configure.in AUTOMAKE_fails grep "autoconf input.*'configure.ac', not 'configure.in'" stderr :
kuym/openocd
tools/automake-1.15/t/configure.sh
Shell
gpl-2.0
1,785
#!/bin/sh # Copyright (C) 2007-2008 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA test_description='Test vgmerge operation' . lib/test aux prepare_pvs 4 64 # 'vgmerge succeeds with single linear LV in source VG' vgcreate -c n $vg1 $dev1 $dev2 vgcreate -c n $vg2 $dev3 $dev4 lvcreate -l 4 -n $lv1 $vg1 $dev1 vgchange -an $vg1 check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 0 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 1 0 vgremove -f $vg2 # 'vgmerge succeeds with single linear LV in source and destination VG' vgcreate -c n $vg1 $dev1 $dev2 vgcreate -c n $vg2 $dev3 $dev4 lvcreate -l 4 -n $lv1 $vg1 lvcreate -l 4 -n $lv2 $vg2 vgchange -an $vg1 vgchange -an $vg2 check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 1 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 2 0 vgremove -f $vg2 # 'vgmerge succeeds with linear LV + snapshots in source VG' vgcreate -c n $vg1 $dev1 $dev2 vgcreate -c n $vg2 $dev3 $dev4 lvcreate -l 16 -n $lv1 $vg1 lvcreate -l 4 -s -n $lv2 $vg1/$lv1 vgchange -an $vg1 check pvlv_counts $vg1 2 2 1 check pvlv_counts $vg2 2 0 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 2 1 lvremove -f $vg2/$lv2 vgremove -f $vg2 # 'vgmerge succeeds with mirrored LV in source VG' vgcreate -c n $vg1 $dev1 $dev2 $dev3 vgcreate -c n $vg2 $dev4 lvcreate -l 4 -n $lv1 -m1 $vg1 vgchange -an $vg1 check pvlv_counts $vg1 3 1 0 check pvlv_counts $vg2 1 0 0 vgmerge $vg2 $vg1 check pvlv_counts $vg2 4 1 0 lvremove -f $vg2/$lv1 vgremove -f $vg2 # 'vgmerge rejects LV name collision' vgcreate -c n $vg1 $dev1 $dev2 vgcreate -c n $vg2 $dev3 $dev4 lvcreate -l 4 -n $lv1 $vg1 lvcreate -l 4 -n $lv1 $vg2 vgchange -an $vg1 check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 1 0 not vgmerge $vg2 $vg1 2>err grep "Duplicate logical volume name \"$lv1\" in \"$vg2\" and \"$vg1" err check pvlv_counts $vg1 2 1 0 check pvlv_counts $vg2 2 1 0 vgremove -f $vg1 vgremove -f $vg2
michael42/dmcrypt-static-android
test/shell/vgmerge-operation.sh
Shell
gpl-2.0
2,299
#!/bin/bash # # Start the simulation # # Be sure to create a Makefile first and compile all Verilog and VHDL files. # vsim -t ps -voptargs=+acc MAX6682_tb_verilog_cfg -do "do wave-app.do ; run -all"
hansiglaser/chll
examples/wsn-soc/apps/max6682/sim-yosys/sim.sh
Shell
gpl-2.0
200
#!/bin/sh # Copyright (C) 2010 Red Hat, Inc. All rights reserved. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA SKIP_WITH_LVMPOLLD=1 . lib/inittest aux prepare_vg 5 80 aux lvmconf 'allocation/maximise_cling = 0' \ 'allocation/mirror_logs_require_separate_pvs = 1' # 2-way mirror with corelog, 2 PVs lvcreate -aey -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg "$dev1" "$dev2" check mirror_images_redundant $vg $lv1 # 2-way mirror with disklog, 3 PVs # lvcreate --nosync is in 100% sync after creation (bz429342) lvcreate -aey -l2 --type mirror -m1 --nosync -n $lv2 $vg "$dev1" "$dev2" "$dev3":0-1 2>&1 | tee out grep "New mirror won't be synchronised." out check lv_field $vg/$lv2 copy_percent "100.00" check mirror_images_redundant $vg $lv2 check mirror_log_on $vg $lv2 "$dev3" # 3-way mirror with disklog, 4 PVs lvcreate -aey -l2 --type mirror -m2 --nosync --mirrorlog disk -n $lv3 $vg "$dev1" "$dev2" "$dev4" "$dev3":0-1 check mirror_images_redundant $vg $lv3 check mirror_log_on $vg $lv3 "$dev3" lvremove -ff $vg # creating 2-way mirror with disklog from 2 PVs fails not lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2" vgremove -ff $vg
shehbazj/DyRe
test/shell/lvcreate-mirror.sh
Shell
gpl-2.0
1,521
#! /bin/sh -e # tup - A file-based build system # # Copyright (C) 2008-2017 Mike Shal <marfey@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. echo "Skip t5005 - not needed?" exit 0 . ./tup.sh echo 'this is a file' > file1 ln file1 file2 cat > Makefile << HERE all: new-file1 new-file2 new-%: % tup link "cp \$< \$@" -i\$< -o\$@ HERE tup touch file1 file2 Makefile update check_exist new-file1 new-file2 rm new-file1 new-file2 tup touch file1 update check_exist new-file1 new-file2 eotup
fasterthanlime/tup-fuseless
test/t5005-hard-link.sh
Shell
gpl-2.0
1,110
#!/bin/sh set -e set -x # TODO: This isn't ideal. cd externals git clone https://github.com/MerryMage/ext-boost git clone https://github.com/yuzu-emu/unicorn cd unicorn UNICORN_ARCHS=aarch64 ./make.sh cd ../.. mkdir -p $HOME/.local curl -L https://cmake.org/files/v3.8/cmake-3.8.0-Linux-x86_64.tar.gz \ | tar -xz -C $HOME/.local --strip-components=1
DaMan69/dynarmic
.travis/test-a64-on-x86_64-linux/deps.sh
Shell
gpl-2.0
357
#!/bin/sh exec </dev/console >/dev/console 2>&1 set -x export PATH=/sbin:/bin:/usr/sbin:/usr/bin export TERM=linux export PS1='nbdtest-server:\w\$ ' stty sane echo "made it to the rootfs!" echo server > /proc/sys/kernel/hostname wait_for_if_link() { local cnt=0 local li while [ $cnt -lt 600 ]; do li=$(ip -o link show dev $1 2>/dev/null) [ -n "$li" ] && return 0 if [[ $2 ]]; then li=$(ip -o link show dev $2 2>/dev/null) [ -n "$li" ] && return 0 fi sleep 0.1 cnt=$(($cnt+1)) done return 1 } wait_for_if_up() { local cnt=0 local li while [ $cnt -lt 200 ]; do li=$(ip -o link show up dev $1) [ -n "$li" ] && return 0 sleep 0.1 cnt=$(($cnt+1)) done return 1 } wait_for_route_ok() { local cnt=0 while [ $cnt -lt 200 ]; do li=$(ip route show) [ -n "$li" ] && [ -z "${li##*$1*}" ] && return 0 sleep 0.1 cnt=$(($cnt+1)) done return 1 } linkup() { wait_for_if_link $1 2>/dev/null\ && ip link set $1 up 2>/dev/null\ && wait_for_if_up $1 2>/dev/null } wait_for_if_link eth0 ens3 ip addr add 127.0.0.1/8 dev lo ip link set lo up ip link set dev eth0 name ens3 ip addr add 192.168.50.1/24 dev ens3 linkup ens3 modprobe af_packet nbd-server >/var/lib/dhcpd/dhcpd.leases chmod 777 /var/lib/dhcpd/dhcpd.leases dhcpd -d -cf /etc/dhcpd.conf -lf /var/lib/dhcpd/dhcpd.leases & echo "Serving NBD disks" while :; do [ -n "$(jobs -rp)" ] && echo > /dev/watchdog sleep 10 done mount -n -o remount,ro / poweroff -f
yuwata/dracut
test/TEST-40-NBD/server-init.sh
Shell
gpl-2.0
1,587
export PATH=$PATH:/home/xpaum/android/gcc-linaro-4.7/bin make ARCH=arm CROSS_COMPILE=arm-eabi-
xpaum/kernel_stock_g3815
kernel.sh
Shell
gpl-2.0
96
process_id=$$ x=`ps -ef|awk '{if($2!="'$process_id'")print}'|grep "cross_process_bidata"|grep -v grep|wc -l|awk '{print $1}'` if [[ "$x" -gt 1 ]];then #echo "waiting for another process" #echo "$x" exit fi if [[ -f ~/.bash_profile ]];then . ~/.bash_profile elif [[ -f ~/.profile ]];then . ~/.profile fi cd /home/oracle/dump ## Clear unfinished dump jobs with owner name uxinsight unfinishedJobsList=unfinishedJobsList.lst >$unfinishedJobsList sqlplus -s '/ as sysdba' <<! set lines 200 set pages 9999 set heading off set feedback off col job_name format a100 spool $unfinishedJobsList select ''''||replace(job_name,'=','\=')||'''' JN from dba_datapump_jobs where owner_name='UXINSIGHT' and state != 'NOT RUNNING'; spool off ! cat $unfinishedJobsList|awk '{if(NF>0)print $1}'|while read line ; do expdp uxinsight/oracle123 ATTACH=$line <<! kill_job yes exit ! done rm -f $unfinishedJobsList ## Main for table in WG__BIDATA_MASTER WG__BIDATA_PROPERTIES WG__BIDATA_USERFLOWS ; do ## Read the source table partitions sourceTablePartsList=${table}_parts.lst >$sourceTablePartsList sqlplus -s 'uxinsight/oracle123' <<! set lines 100 set pages 9999 set heading off set feedback off col partition_name format a30 col high_value format a10 col partition_position format 99999 spool $sourceTablePartsList select partition_name, high_value from user_tab_partitions where table_name='$table' order by partition_position; spool off ! if [[ -f $sourceTablePartsList ]] ; then ## Get the source pointer tmpTable='POINTER_FOR_BIDATA_MASTER'; if [[ "$table" == "WG__BIDATA_PROPERTIES" ]];then tmpTable='POINTER_FOR_BIDATA_PROPER'; elif [[ "$table" == "WG__BIDATA_USERFLOWS" ]];then tmpTable='POINTER_FOR_BIDATA_USERFL'; fi currPeriodID=`{ echo "set heading off"; echo "set feedback off"; echo "select period_id from $tmpTable ;"; }|sqlplus -s 'uxinsight/oracle123'|awk '{if(NF>0)print $1}'` echo "" echo "Current Period_ID=$currPeriodID" echo "" ## Get the prepaired source partitions prepairedPartsList=${sourceTablePartsList}.prepaired_parts >$prepairedPartsList line=""; cat $sourceTablePartsList|awk '{if(NF>0)print}'|while read line;do part_name=`echo $line|awk '{print $1}'` high_value=`echo $line|awk '{print $2}'` if [[ $high_value -lt $currPeriodID && $high_value != 0 ]];then echo $part_name >> $prepairedPartsList fi done ## Remove the partitions which have been done alreadyDoneParts=${sourceTablePartsList}.already_done_parts.lst >$alreadyDoneParts sqlplus -s 'uxinsight/oracle123'@ls2 <<! set lines 100 set pages 9999 set heading off set feedback off spool $alreadyDoneParts select distinct partition_name from bidata_parts_status where table_name='$table' and (status = 'DUMPED' or status like 'DOWNLOAD%' or status like 'IMPORT%' or status like 'ANAL%' or status like 'CLEAR%' or status='DONE'); spool off ! line="" inList="false" cat $alreadyDoneParts|awk '{if(NF>0)print $1}'|while read line;do i=1 inList="false" lineNo=0 while [[ $i -le `cat $prepairedPartsList|wc -l|awk '{print $1}'` ]];do tmpline=`head -$i $prepairedPartsList|tail -1|awk '{print $1}'` if [[ "$line" == "$tmpline" ]];then inList="true" lineNo=$i fi i=`expr $i + 1` done if [[ "$inList" == "true" ]];then sed "$lineNo d" $prepairedPartsList > $prepairedPartsList.tmp && mv $prepairedPartsList.tmp $prepairedPartsList fi done rm -f $alreadyDoneParts cat $prepairedPartsList ## Process prepaired parts list line by line cat $prepairedPartsList|awk '{if(NF>0)print $1}'|while read part;do echo "" echo "Processing partition $part for table $table" echo "" exchangeTable="EX_MASTER_${part}" if [[ $table == "WG__BIDATA_PROPERTIES" ]];then exchangeTable="EX_PROPER_${part}" elif [[ $table == "WG__BIDATA_USERFLOWS" ]];then exchangeTable="EX_USERFL_${part}" fi high_value=`cat $sourceTablePartsList|awk '{if($1=="'$part'")print $2}'` now=`date +"%d%H"` dumpfile=${table}.${part}.${now}.$RANDOM.dmp sqlplus 'uxinsight/oracle123'@ls2 <<! insert into bidata_parts_status (job_time,host_name,table_name, partition_name, dumpfile, exchange_table, high_value, status, start_time) values (sysdate,'$HOSTNAME','$table','$part','$dumpfile','$exchangeTable','$high_value','PROCESSING',sysdate); commit; exit; ! ## Create temp table for partition exchange on source database extCnt=`{ echo "select count(1) CNT from user_tables where table_name ='$exchangeTable';" ; } | sqlplus -s 'uxinsight/oracle123'|grep -v CNT|awk '{if(NF==1 && substr($1,1,1)!="-")print $1}'` ## Clear previously dirty data if [[ "$extCnt" != 0 ]];then sqlplus 'uxinsight/oracle123' <<! drop table $exchangeTable ; ! fi ## Exchange partition to table sqlplus 'uxinsight/oracle123' <<! create table $exchangeTable as select * from $table where rownum<1 ; alter table $table exchange partition $part with table $exchangeTable ; ! ## Dump exchanged table out to directory expdump sqlplus 'uxinsight/oracle123'@ls2 <<! update bidata_parts_status set status='DUMPING',start_exp_time=sysdate where host_name='$HOSTNAME' and table_name='$table' and partition_name='$part' and dumpfile='$dumpfile' and high_value='$high_value' and exchange_table='$exchangeTable' and status='PROCESSING'; commit; exit; ! rm -f /home/oracle/dump/${dumpfile}* #expdp uxinsight/oracle123 tables=uxinsight.$exchangeTable directory=expdump dumpfile=$dumpfile logfile=$dumpfile.log compression=all parallel=6 expdp uxinsight/oracle123 tables=uxinsight.$exchangeTable directory=expdump dumpfile=$dumpfile logfile=$dumpfile.log compression=all ## Inform LS2 to get the dumpfile and do the rest work sqlplus 'uxinsight/oracle123'@ls2 <<! update bidata_parts_status set status='DUMPED',end_exp_time=sysdate where host_name='$HOSTNAME' and table_name='$table' and partition_name='$part' and high_value='$high_value' and exchange_table='$exchangeTable' and (status='DUMPING' or status='PROCESSING'); commit; exit; ! done ## Clear work area rm -f $prepairedPartsList rm -f $sourceTablePartsList fi done
lyalls/SmartMON
monitor/other_code/ruei_cross/cross_process_bidata_dumpdata.sh
Shell
gpl-3.0
6,185
# # prepare stuff for Galaxy 7 # COPY_AS_IS+=( "${COPY_AS_IS_GALAXY7[@]}" ) COPY_AS_IS_EXCLUDE+=( "${COPY_AS_IS_EXCLUDE_GALAXY7[@]}" ) PROGS+=( chgrp touch ) # include argument file if specified if test "$GALAXY7_Q_ARGUMENTFILE" ; then COPY_AS_IS+=( "$GALAXY7_Q_ARGUMENTFILE" ) fi
gozora/rear
usr/share/rear/prep/GALAXY7/default/400_prep_galaxy.sh
Shell
gpl-3.0
285
#!/bin/sh wget https://github.com/jacobalberty/root-ro/raw/master/root-ro wget https://github.com/jacobalberty/root-ro/raw/master/raspi-gpio chmod 0755 root-ro chmod 0755 raspi-gpio sudo mv root-ro /etc/initramfs-tools/scripts/init-bottom sudo mv raspi-gpio /etc/initramfs-tools/hooks echo overlay | sudo tee --append /etc/initramfs-tools/modules > /dev/null sudo apt-get install -y raspi-gpio sudo mkinitramfs -o /boot/initrd sudo cat <<"EOF" | sudo tee --append /boot/config.txt > /dev/null initramfs initrd followkernel ramfsfile=initrd ramfsaddr=-1 EOF sudo sed -i -e '/rootwait/s/$/ root-ro-driver=overlay root-rw-pin=21/' /boot/cmdline.txt
tomeshnet/prototype-cjdns-pi
contrib/ramdisk-overlay/raspbian.sh
Shell
gpl-3.0
718
#!/bin/bash #controllo se esiste la directory "uomo" e quindi se la risposta al quesito del governatore è giusta o no ok() { cat ../.settings/$GAME/testi_livello_3/ok cp ../.settings/$GAME/testi_livello_4/leggimi.txt ../livello_4/leggimi.txt if [ $GAME == "monkey_island" ] ; then PAROLA='banana' RISPOSTA='monkey' fi if [ $GAME == "matrix" ] ; then PAROLA='cella' RISPOSTA='vigilant' fi if [ $GAME == "star_wars" ] ; then PAROLA='jarjar' RISPOSTA='fener' fi touch ../livello_4/$PAROLA echo "| Dovrai ** creare una parola ** utilizzando le lettere di altre parole. |" >> ../livello_4/leggimi.txt echo "| Le parole che ti serviranno sono i nomi dei file contenuti |" >> ../livello_4/leggimi.txt echo "| nella cartella archivio |" >> ../livello_4/leggimi.txt echo "| visualizza i file all'interno di archivio con il comando |" >> ../livello_4/leggimi.txt echo "| ls -l archivio |" >> ../livello_4/leggimi.txt echo "| |" >> ../livello_4/leggimi.txt echo "| Esempio: $ ls -l archivio |" >> ../livello_4/leggimi.txt echo "|Permessi H.Link Utente gruppo Dim. Data +++Ore e minuti +++ Nome |" >> ../livello_4/leggimi.txt echo "| -rw-r--r-- 1 scout scout 2403 ago 27 20:10 leggimi.txt |" >> ../livello_4/leggimi.txt echo "| |" >> ../livello_4/leggimi.txt echo "| per decifrare la parola è importante fare attenzione all'ordine |" >> ../livello_4/leggimi.txt echo "| in cui sono disposte le ore, i minuti e i nomi |" >> ../livello_4/leggimi.txt echo "| |" >> ../livello_4/leggimi.txt echo "| Quando avrai scoperto la parola |" >> ../livello_4/leggimi.txt echo "| rinomina il file $PAROLA con il comando |" >> ../livello_4/leggimi.txt echo "| mv $PAROLA codice_trovato |" >> ../livello_4/leggimi.txt echo "| |" >> ../livello_4/leggimi.txt echo "| e passa al livello sucessivo con il comando |" >> ../livello_4/leggimi.txt echo "| source azione.sh |" >> ../livello_4/leggimi.txt echo "| |" >> ../livello_4/leggimi.txt echo "| |" >> ../livello_4/leggimi.txt echo "|___________________________________________________________________________|" >> ../livello_4/leggimi.txt rm -r uomo mkdir -p ../livello_4/archivio source .oggetti.sh #alias ls='function _ls() { ls -l $0| awk '{print $8 "\t" $9}'}; _ls' NUMERO_LIVELLO=4 export LIVELLO=$LIVELLO_STRINGA$NUMERO_LIVELLO cd ../livello_4 } sbagliato() { echo echo Mi spiace, ma la risposta è errata! Puoi riprovare. echo } if [ -d ./uomo ]; then ok else sbagliato fi
BitPrepared/Mayalinux
.game/livello_3/azione.sh
Shell
gpl-3.0
3,334
#!/bin/bash for r in */ do if [[ -d "$r/dot_git" && -L "$r/.git" ]] then echo "unlink $r" rm "$r/.git" fi done
mpdeimos/git-repo-zipper
test-data/unlink.sh
Shell
gpl-3.0
119
#!/bin/bash # set -x if [ ! -v tree ] ; then # you must set the location of the cMIPS root directory in the variable tree # tree=${HOME}/cMIPS # export tree="$(dirname "$(pwd)")" export tree="$(echo $PWD | sed -e 's:\(/.*/cMIPS\)/.*:\1:')" fi # path to cross-compiler and binutils must be set to your installation WORK_PATH=/home/soft/linux/mips/cross/bin HOME_PATH=/opt/cross/bin if [ -x /opt/cross/bin/mips-gcc ] ; then export PATH=$PATH:$HOME_PATH elif [ -x /home/soft/linux/mips/cross/bin/mips-gcc ] ; then export PATH=$PATH:$WORK_PATH else echo -e "\n\n\tPANIC: cross-compiler not installed\n\n" ; exit 1; fi usage() { cat << EOF usage: $0 some_file_name.elf creates ROM.mif from an ELF object file some_file_name.elf OPTIONS: -h Show this message EOF } if [ $# = 0 ] ; then usage ; exit 1 ; fi inp=${1%.elf} if [ ${inp}.elf != $1 ] ; then usage ; echo " invalid input: $1"; exit 1 fi elf=$1 x_ROM_BASE=$(sed -n '/x_INST_BASE_ADDR/s/.*:= x"\(.*\)".*$/\1/p' $tree/vhdl/packageMemory.vhd) ROM_BASE=$((16#$x_ROM_BASE)) x_ROM_SIZE=$(sed -n '/x_INST_MEM_SZ/s/.*:= x"\(.*\)".*$/\1/p' $tree/vhdl/packageMemory.vhd) ROM_SZ=$((16#$x_ROM_SIZE)) mif=ROM.mif tmp=ROM.tmp mips-objdump -z -D -EL --section .text $elf |\ sed -e '1,6d' -e '/^$/d' -e '/^ /!d' -e 's:\t: :g' \ -e 's#^ *\([a-f0-9]*\): *\(........\) *\(.*\)$#\2;#' |\ awk 'BEGIN{c='$ROM_BASE';} //{ printf "%d : %s\n",c,$1 ; c=c+1; }' > $tmp echo -e "\n-- cMIPS code\n\nDEPTH=${ROM_SZ};\nWIDTH=32;\n" > $mif echo -e "ADDRESS_RADIX=DEC;\nDATA_RADIX=HEX;\nCONTENT BEGIN" >> $mif cat $tmp >> $mif echo "END;" >> $mif x_RAM_BASE=$(sed -n '/x_DATA_BASE_ADDR/s/.*:= x"\(.*\)".*$/\1/p' $tree/vhdl/packageMemory.vhd) RAM_BASE=$((16#$x_RAM_BASE)) x_RAM_SIZE=$(sed -n '/x_DATA_MEM_SZ/s/.*:= x"\(.*\)".*$/\1/p' $tree/vhdl/packageMemory.vhd) RAM_SZ=$((16#$x_RAM_SIZE)) mif=RAM.mif tmp=RAM.tmp mips-objdump -z -D -EL --section .data --section .rodata --section rodata1 --section .data1 --section .sdata --section .lit8 --section .lit4 --section .sbss --section .bss $elf |\ sed -e '1,6d' -e '/^$/d' -e '/^ /!d' -e 's:\t: :g' \ -e 's#^ *\([a-f0-9]*\): *\(........\) *\(.*\)$#\2;#' |\ awk 'BEGIN{c='$RAM_BASE';} //{ printf "%d : %s\n",c,$1 ; c=c+1; }' > $tmp echo -e "\n-- cMIPS data\n\nDEPTH=${RAM_SZ};\nWIDTH=32;\n" > $mif echo -e "ADDRESS_RADIX=DEC;\nDATA_RADIX=HEX;\nCONTENT BEGIN" >> $mif cat $tmp >> $mif echo "END;" >> $mif # rm -f {ROM,RAM}.tmp exit 0
rhexsel/cmips
cMIPS/bin/elf2mif.sh
Shell
gpl-3.0
2,516
#!/bin/sh -e # creates SLHA output files for all SLHA input files in the directory # model_files/ (and sub-directories) that begin with LesHouches.in. . # The names of the output files will begin with LesHouches.out. . # Author: Alexander Voigt # directory of this script BASEDIR=$(dirname $0) HOMEDIR=$(readlink -f "${BASEDIR}/../") FSCONFIG="${HOMEDIR}/flexiblesusy-config" model_file_dir="$BASEDIR/../model_files" directory=. #_____________________________________________________________________ help() { cat <<EOF Usage: ./`basename $0` [options] Options: --directory= Output directory (default: ${directory}) --help,-h Print this help message EOF } if test $# -gt 0 ; then while test ! "x$1" = "x" ; do case "$1" in -*=*) optarg=`echo "$1" | sed 's/[-_a-zA-Z0-9]*=//'` ;; *) optarg= ;; esac case $1 in --directory=*) directory=$optarg ;; --help|-h) help; exit 0 ;; *) echo "Invalid option '$1'. Try $0 --help" ; exit 1 ;; esac shift done fi default_input_files="\ models/CMSSM/LesHouches.in.CMSSM \ models/CMSSMSemiAnalytic/LesHouches.in.CMSSMSemiAnalytic \ models/MSSM/LesHouches.in.MSSM \ models/MSSMatMGUT/LesHouches.in.MSSMatMGUT \ models/MSSMNoFV/LesHouches.in.MSSMNoFV \ models/MSSMNoFVatMGUT/LesHouches.in.MSSMNoFVatMGUT \ models/CMSSMNoFV/LesHouches.in.CMSSMNoFV \ models/NUHMSSM/LesHouches.in.NUHMSSM \ models/lowMSSM/LesHouches.in.lowMSSM \ models/MSSMRHN/LesHouches.in.MSSMRHN \ models/NMSSM/LesHouches.in.NMSSM \ models/NUTNMSSM/LesHouches.in.NUTNMSSM \ models/NUTSMSSM/LesHouches.in.NUTSMSSM \ models/lowNMSSM/LesHouches.in.lowNMSSM \ models/lowNMSSMTanBetaAtMZ/LesHouches.in.lowNMSSMTanBetaAtMZ \ models/SMSSM/LesHouches.in.SMSSM \ models/UMSSM/LesHouches.in.UMSSM \ models/E6SSM/LesHouches.in.E6SSM \ models/MRSSM2/LesHouches.in.MRSSM2 \ models/TMSSM/LesHouches.in.TMSSM \ models/SM/LesHouches.in.SM \ models/HSSUSY/LesHouches.in.HSSUSY \ models/SplitMSSM/LesHouches.in.SplitMSSM \ models/THDMII/LesHouches.in.THDMII \ models/THDMIIMSSMBC/LesHouches.in.THDMIIMSSMBC \ models/HTHDMIIMSSMBC/LesHouches.in.HTHDMIIMSSMBC \ models/HGTHDMIIMSSMBC/LesHouches.in.HGTHDMIIMSSMBC \ models/MSSMEFTHiggs/LesHouches.in.MSSMEFTHiggs \ models/NMSSMEFTHiggs/LesHouches.in.NMSSMEFTHiggs \ models/E6SSMEFTHiggs/LesHouches.in.E6SSMEFTHiggs \ models/MRSSMEFTHiggs/LesHouches.in.MRSSMEFTHiggs \ models/CNMSSM/LesHouches.in.CNMSSM \ models/CE6SSM/LesHouches.in.CE6SSM \ models/MSSMNoFVatMGUTHimalaya/LesHouches.in.MSSMNoFVatMGUTHimalaya \ models/MSSMNoFVHimalaya/LesHouches.in.MSSMNoFVHimalaya \ models/NUHMSSMNoFVHimalaya/LesHouches.in.NUHMSSMNoFVHimalaya \ " [ -z "${directory}" ] && directory=. [ ! -d "${directory}" ] && mkdir -p "${directory}" models=$("$FSCONFIG" --models) echo "Configured models: $models" echo for model in ${models}; do # collect all input files that belong to the model input_files= for dif in ${default_input_files}; do case "${dif}" in ${model}/*) input_files="${input_files} ${HOMEDIR}/${dif}" ;; esac done for ifile in ${input_files}; do ofile=$(echo "${directory}/$(basename ${ifile})" | sed -e 's/\.in\./.out./') sg=$(echo "${model}" | awk -F / '{ print $NF }') exe="${HOMEDIR}/${model}/run_${sg}.x" cmd="${exe} --slha-input-file=${ifile} --slha-output-file=${ofile} > /dev/null 2>&1" printf "%s" "${cmd}" eval "${cmd}" || { printf " [FAIL]\n"; exit 1; } printf " [OK]\n" done done
Expander/FlexibleSUSY
release/generate-slha-output.sh
Shell
gpl-3.0
3,593
#!/bin/bash set -e pegasus_lite_version_major="4" pegasus_lite_version_minor="7" pegasus_lite_version_patch="0" pegasus_lite_enforce_strict_wp_check="true" pegasus_lite_version_allow_wp_auto_download="true" . pegasus-lite-common.sh pegasus_lite_init # cleanup in case of failures trap pegasus_lite_signal_int INT trap pegasus_lite_signal_term TERM trap pegasus_lite_exit EXIT echo -e "\n################################ Setting up workdir ################################" 1>&2 # work dir export pegasus_lite_work_dir=$PWD pegasus_lite_setup_work_dir echo -e "\n###################### figuring out the worker package to use ######################" 1>&2 # figure out the worker package to use pegasus_lite_worker_package echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2 # set the xbit for any executables staged /bin/chmod +x wikiflow-sessioncompute_1-1.0 echo -e "\n############################# executing the user tasks #############################" 1>&2 # execute the tasks set +e pegasus-kickstart -n wikiflow::sessioncompute_1:1.0 -N ID0000004 -R condorpool -L example_workflow -T 2017-01-18T13:31:45+00:00 ./wikiflow-sessioncompute_1-1.0 job_ec=$? set -e
elainenaomi/sciwonc-dataflow-examples
dissertation2017/Experiment 2/logs/w-08_2/20170118T133145+0000/00/00/sessioncompute_1_ID0000004.sh
Shell
gpl-3.0
1,228
################################################################################ # Add user packages to RPM database after update ################################################################################ cernvm_start() { if ls /var/lib/cernvm-update/run/*.rpm >/dev/null 2>&1; then echo for PKG in /var/lib/cernvm-update/run/*.rpm; do echo -n "Re-registering ${PKG}... " rpm -i --justdb --replacefiles --oldpackage "$PKG" if [ $? -eq 0 ]; then rm -f "$PKG" echo "OK" fi done fi } cernvm_stop() { : }
cernvm/cernvm-config
etc/cernvm/cernvm.d/S90reinstallpackages.sh
Shell
gpl-3.0
568
#!/bin/sh mkdir /etc/ssl/private-copy; mv /etc/ssl/private/* /etc/ssl/private-copy/; rm -r /etc/ssl/private; mv /etc/ssl/private-copy /etc/ssl/private; chmod -R 0700 /etc/ssl/private; chown -R postgres /etc/ssl/private sudo -u postgres /usr/lib/postgresql/9.4/bin/postgres -D /var/lib/postgresql/9.4/main -c config_file=/etc/postgresql/9.4/main/postgresql.conf
Terry-Weymouth/transmart-docker
1.2.4/embedded/start-postgres.sh
Shell
gpl-3.0
361
#!/bin/sh # generates dbdriver.h tmp=mk_dbdriver_h.tmp.$$ cat <<'EOT'> dbdriver.h /* this file was automatically generated by ../mk_dbdriver_h.sh */ #ifndef DBDRIVER_H #define DBDRIVER_H #include <grass/dbstubs.h> EOT grep -h '^\( *int *\)\?db__driver' *.c | sed \ -e 's/^\( *int *\)*/int /' \ -e 's/ *(.*$/();/' > $tmp cat $tmp >> dbdriver.h cat <<'EOT' >> dbdriver.h #define init_dbdriver() do{\ EOT sed 's/^int *db__\([a-zA-Z_]*\).*$/db_\1 = db__\1;\\/' $tmp >> dbdriver.h cat <<'EOT'>> dbdriver.h }while(0) #endif EOT rm $tmp
AsherBond/MondocosmOS
grass_trunk/db/drivers/mk_dbdriver_h.sh
Shell
agpl-3.0
541
#!/bin/bash -e echo "${NODE}.${duration} sleep: ${duration} sec" /bin/sleep $duration
alien4cloud/samples
org/alien4cloud/mock/jobs/scripts/operation.sh
Shell
apache-2.0
86