repo_id
stringlengths 0
42
| file_path
stringlengths 15
97
| content
stringlengths 2
2.41M
| __index_level_0__
int64 0
0
|
---|---|---|---|
bitcoin/contrib/completions | bitcoin/contrib/completions/fish/bitcoin-util.fish | # Disable files from being included in completions by default
complete --command bitcoin-util --no-files
# Extract options
function __fish_bitcoin_util_get_options
set --local cmd (commandline -opc)[1]
set --local options
set --append options ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=')
for option in $options
echo $option
end
end
# Extract commands
function __fish_bitcoin_util_get_commands
set --local cmd (commandline -opc)[1]
set --local commands
set --append commands ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '')
for command in $commands
echo $command
end
end
# Add options
complete \
--command bitcoin-util \
--condition "not __fish_seen_subcommand_from (__fish_bitcoin_util_get_commands)" \
--arguments "(__fish_bitcoin_util_get_options)"
# Add commands
complete \
--command bitcoin-util \
--condition "not __fish_seen_subcommand_from (__fish_bitcoin_util_get_commands)" \
--arguments "(__fish_bitcoin_util_get_commands)"
| 0 |
bitcoin/contrib/completions | bitcoin/contrib/completions/fish/bitcoin-wallet.fish | # Disable files from being included in completions by default
complete --command bitcoin-wallet --no-files
# Extract options
function __fish_bitcoin_wallet_get_options
set --local cmd (commandline -opc)[1]
for option in ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=')
echo $option
end
end
# Extract commands
function __fish_bitcoin_wallet_get_commands
set --local cmd (commandline -opc)[1]
for command in ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '')
echo $command
end
end
# Add options
complete \
--command bitcoin-wallet \
--condition "not __fish_seen_subcommand_from (__fish_bitcoin_wallet_get_commands)" \
--arguments "(__fish_bitcoin_wallet_get_options)"
# Add commands
complete \
--command bitcoin-wallet \
--condition "not __fish_seen_subcommand_from (__fish_bitcoin_wallet_get_commands)" \
--arguments "(__fish_bitcoin_wallet_get_commands)"
# Add file completions for load and set commands
complete --command bitcoin-wallet \
--condition "string match -r -- '(dumpfile|datadir)*=' (commandline -pt)" \
--force-files
| 0 |
bitcoin/contrib/completions | bitcoin/contrib/completions/fish/bitcoin-qt.fish | # Disable files from being included in completions by default
complete --command bitcoin-qt --no-files
# Extract options
function __fish_bitcoinqt_get_options
argparse 'nofiles' -- $argv
set --local cmd (commandline -opc)[1]
set --local options
if set -q _flag_nofiles
set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match --invert -r '^.*=$')
else
set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match -r '^.*=$')
end
for option in $options
echo $option
end
end
# Add options with file completion
complete \
--command bitcoin-qt \
--arguments "(__fish_bitcoinqt_get_options)"
# Enable file completions only if the commandline now contains a `*.=` style option
complete -c bitcoin-qt \
--condition 'string match --regex -- ".*=" (commandline -pt)' \
--force-files
# Add options without file completion
complete \
--command bitcoin-qt \
--arguments "(__fish_bitcoinqt_get_options --nofiles)"
| 0 |
bitcoin/contrib/completions | bitcoin/contrib/completions/fish/bitcoind.fish | # Disable files from being included in completions by default
complete --command bitcoind --no-files
# Extract options
function __fish_bitcoind_get_options
argparse 'nofiles' -- $argv
set --local cmd (commandline -opc)[1]
set --local options
if set -q _flag_nofiles
set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match --invert -r '^.*=$')
else
set --append options ($cmd -help-debug | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match -r '^.*=$')
end
for option in $options
echo $option
end
end
# Add options with file completion
complete \
--command bitcoind \
--arguments "(__fish_bitcoind_get_options)"
# Enable file completions only if the commandline now contains a `*.=` style option
complete --command bitcoind \
--condition 'string match --regex -- ".*=" (commandline -pt)' \
--force-files
# Add options without file completion
complete \
--command bitcoind \
--arguments "(__fish_bitcoind_get_options --nofiles)"
| 0 |
bitcoin/contrib/completions | bitcoin/contrib/completions/fish/bitcoin-cli.fish | # Disable files from being included in completions by default
complete --command bitcoin-cli --no-files
function __fish_bitcoin_cli_get_commands_helper
set --local cmd (commandline -oc)
# Don't return commands if '-help or -?' in commandline
if string match --quiet --regex -- '^-help$|^-\?$' $cmd
return
end
# Strip help cmd from token to avoid duplication errors
set --local cmd (string match --invert --regex -- '^help$' $cmd)
# Strip -stdin* options to avoid waiting for input while we fetch completions
# TODO: this appears to be broken when run as tab completion (requires ctrl+c to exit)
set --local cmd (string match --invert --regex -- '^-stdin.*$' $cmd)
# Match, format and return commands
for command in ($cmd help 2>&1 | string match --invert -r '^\=\=.*' | string match --invert -r '^\\s*$')
echo $command
end
end
function __fish_bitcoin_cli_get_commands
argparse 'nohelp' 'commandsonly' -- $argv
set --local commands
# Exclude description, exclude help
if set -q _flag_nohelp; and set -q _flag_commandsonly
set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace -r ' .*$' '' | string match --invert -r 'help')
# Include description, exclude help
else if set -q _flag_nohelp
set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace ' ' \t | string match --invert -r 'help')
# Exclude description, include help
else if set -q _flag_commandsonly
set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace -r ' .*$' '')
# Include description, include help
else
set --append commands (__fish_bitcoin_cli_get_commands_helper | string replace ' ' \t)
end
if string match -q -r '^.*error.*$' $commands[1]
# RPC offline or RPC wallet not loaded
return
else
for command in $commands
echo $command
end
end
end
function __fish_bitcoin_cli_get_options
argparse 'nofiles' -- $argv
set --local cmd (commandline -oc)
# Don't return options if '-help or -?' in commandline
if string match --quiet --regex -- '^-help$|-\?$' $cmd
return
end
set --local options
if set -q _flag_nofiles
set --append options ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match --invert -r '^.*=$')
else
set --append options ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=' | string match -r '^.*=$')
end
for option in $options
echo $option
end
end
# Add options with file completion
# Don't offer after a command is given
complete \
--command bitcoin-cli \
--no-files \
--condition "not __fish_seen_subcommand_from (__fish_bitcoin_cli_get_commands --commandsonly)" \
--arguments "(__fish_bitcoin_cli_get_options)"
# Enable file completions only if the commandline now contains a `*.=` style option
complete --command bitcoin-cli \
--condition 'string match --regex -- ".*=" (commandline -pt)' \
--force-files
# Add options without file completion
# Don't offer after a command is given
complete \
--command bitcoin-cli \
--no-files \
--condition "not __fish_seen_subcommand_from (__fish_bitcoin_cli_get_commands --commandsonly)" \
--arguments "(__fish_bitcoin_cli_get_options --nofiles)"
# Add commands
# Permit command completions after `bitcoin-cli help` but not after other commands
complete \
--command bitcoin-cli \
--no-files \
--condition "not __fish_seen_subcommand_from (__fish_bitcoin_cli_get_commands --commandsonly --nohelp)" \
--arguments "(__fish_bitcoin_cli_get_commands)"
| 0 |
bitcoin/contrib/completions | bitcoin/contrib/completions/fish/bitcoin-tx.fish | # Disable files from being included in completions by default
complete --command bitcoin-tx --no-files
# Modified version of __fish_seen_subcommand_from
# Uses regex to detect cmd= syntax
function __fish_bitcoin_seen_cmd
set -l cmd (commandline -oc)
set -e cmd[1]
for i in $cmd
for j in $argv
if string match --quiet --regex -- "^$j.*" $i
return 0
end
end
end
return 1
end
# Extract options
function __fish_bitcoin_tx_get_options
set --local cmd (commandline -oc)[1]
if string match --quiet --regex -- '^-help$|-\?$' $cmd
return
end
for option in ($cmd -help 2>&1 | string match -r '^ -.*' | string replace -r ' -' '-' | string replace -r '=.*' '=')
echo $option
end
end
# Extract commands
function __fish_bitcoin_tx_get_commands
argparse 'commandsonly' -- $argv
set --local cmd (commandline -oc)[1]
set --local commands
if set -q _flag_commandsonly
set --append commands ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '' | string replace -r '=.*' '')
else
set --append commands ($cmd -help | sed -e '1,/Commands:/d' -e 's/=/=\t/' -e 's/(=/=/' -e '/^ [a-z]/ p' -e d | string replace -r '\ \ ' '')
end
for command in $commands
echo $command
end
end
# Add options
complete \
--command bitcoin-tx \
--condition "not __fish_bitcoin_seen_cmd (__fish_bitcoin_tx_get_commands --commandsonly)" \
--arguments "(__fish_bitcoin_tx_get_options)" \
--no-files
# Add commands
complete \
--command bitcoin-tx \
--arguments "(__fish_bitcoin_tx_get_commands)" \
--no-files
# Add file completions for load and set commands
complete \
--command bitcoin-tx \
--condition 'string match --regex -- "(load|set)=" (commandline -pt)' \
--force-files
| 0 |
bitcoin/contrib | bitcoin/contrib/qos/README.md | ### QoS (Quality of service) ###
This is a Linux bash script that will set up tc to limit the outgoing bandwidth for connections to the Bitcoin network. It limits outbound TCP traffic with a source or destination port of 8333, but not if the destination IP is within a LAN.
This means one can have an always-on bitcoind instance running, and another local bitcoind/bitcoin-qt instance which connects to this node and receives blocks from it.
| 0 |
bitcoin/contrib | bitcoin/contrib/qos/tc.sh | #!/usr/bin/env bash
#
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Bitcoin protocol traffic to this rate
LIMIT="160kbit"
#defines the IPv4 address space for which you wish to disable rate limiting
LOCALNET_V4="192.168.0.0/16"
#defines the IPv6 address space for which you wish to disable rate limiting
LOCALNET_V6="fe80::/10"
#delete existing rules ('Error: Cannot delete qdisc with handle of zero.' means there weren't any.)
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
if [ -n "${LOCALNET_V6}" ] ; then
# v6 cannot have the same priority value as v4
tc filter add dev ${IF} parent 1: protocol ipv6 prio 3 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ipv6 prio 4 handle 2 fw classid 1:11
fi
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 8333. but not when dealing with a host on the local network
# (defined by $LOCALNET_V4 and $LOCALNET_V6)
# --set-mark marks packages matching these criteria with the number "2" (v4)
# --set-mark marks packages matching these criteria with the number "4" (v6)
# these packets are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2
if [ -n "${LOCALNET_V6}" ] ; then
ip6tables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4
ip6tables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4
fi
| 0 |
bitcoin/contrib | bitcoin/contrib/windeploy/detached-sig-create.sh | #!/bin/sh
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
if [ -z "$OSSLSIGNCODE" ]; then
OSSLSIGNCODE=osslsigncode
fi
if [ -z "$1" ]; then
echo "usage: $0 <osslcodesign args>"
echo "example: $0 -key codesign.key"
exit 1
fi
OUT=signature-win.tar.gz
SRCDIR=unsigned
WORKDIR=./.tmp
OUTDIR="${WORKDIR}/out"
OUTSUBDIR="${OUTDIR}/win"
TIMESERVER=http://timestamp.comodoca.com
CERTFILE="win-codesign.cert"
mkdir -p "${OUTSUBDIR}"
# shellcheck disable=SC2046
basename -a $(ls -1 "${SRCDIR}"/*-unsigned.exe) | while read UNSIGNED; do
echo Signing "${UNSIGNED}"
"${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -h sha256 -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@"
"${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}"
done
rm -f "${OUT}"
tar -C "${OUTDIR}" -czf "${OUT}" .
rm -rf "${WORKDIR}"
echo "Created ${OUT}"
| 0 |
bitcoin/contrib | bitcoin/contrib/windeploy/win-codesign.cert | -----BEGIN CERTIFICATE-----
MIIHfDCCBWSgAwIBAgIQCmVvdQal72U2QxbUTT3SRTANBgkqhkiG9w0BAQsFADBp
MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xQTA/BgNVBAMT
OERpZ2lDZXJ0IFRydXN0ZWQgRzQgQ29kZSBTaWduaW5nIFJTQTQwOTYgU0hBMzg0
IDIwMjEgQ0ExMB4XDTIyMDUyNDAwMDAwMFoXDTI0MDUyOTIzNTk1OVowgYAxCzAJ
BgNVBAYTAlVTMREwDwYDVQQIEwhEZWxhd2FyZTEOMAwGA1UEBxMFTGV3ZXMxJjAk
BgNVBAoTHUJpdGNvaW4gQ29yZSBDb2RlIFNpZ25pbmcgTExDMSYwJAYDVQQDEx1C
aXRjb2luIENvcmUgQ29kZSBTaWduaW5nIExMQzCCAiIwDQYJKoZIhvcNAQEBBQAD
ggIPADCCAgoCggIBALewxfjztuRTDNAGf7zkqqWNEt28CZmVJHoYltVRxtE1BP45
BfmptH5eM1JC/XosTPytHRFeOkO4YVAtiELxK9S/82OZlKA7Mx7PW6vv1184u8+m
P3WpTN/KAZTaW9fB0ELTSCuqsvXq2crM2T7NudJnSyWh2VBjLfPPCAcYwzyGKQbl
jQWjFEJDJWFK83t9mK/v0WQgA3jGJeaz+V6CYXMS7UgpdG8dUhg9o63gYJZAW5pY
RIsNRcJCM5LHhwEMW5329UsTmYCfP7/53emepbQ0n8ijVZjgaJ+LZ8NspBLSeCiF
9UPCKX82uWiQAUTbYHCfSi3I0f3wQidXL9ZY+PXmalM7BMuQ+c2xEcl97CnhrDzx
EBwZvvOC9wGoG+8+epV4TjUZWf+7QN1ZYeg1rai7c7c8u9ILogE8su2xVoz333TH
CDvScIgnQXmk+cbKMBtg9kM0F+aLWsN2xVf0uAj3U7sdXLrfJeW0DZIktWtTBQzX
O/OE4Ka+1WFnDg0HJIih0cTjl9YYvfe53L4pCGy+qGt/XGBRqCMfXp3g+H9FGR5r
pensVVcsrv3GbTfYdlpdmp9OHH5G57GTAZueobCZg7r7RKK0zPU9EiTLJxzyXuai
v/Ksd8eIhHRjewMaQuAtQM1tO+oKAbLF0v2M7v7/aVT76X32JllYAizm3zjvAgMB
AAGjggIGMIICAjAfBgNVHSMEGDAWgBRoN+Drtjv4XxGG+/5hewiIZfROQjAdBgNV
HQ4EFgQUvCpU58PIuofv0kHJ3Ty0YDKEy3cwDgYDVR0PAQH/BAQDAgeAMBMGA1Ud
JQQMMAoGCCsGAQUFBwMDMIG1BgNVHR8Ega0wgaowU6BRoE+GTWh0dHA6Ly9jcmwz
LmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVTaWduaW5nUlNBNDA5
NlNIQTM4NDIwMjFDQTEuY3JsMFOgUaBPhk1odHRwOi8vY3JsNC5kaWdpY2VydC5j
b20vRGlnaUNlcnRUcnVzdGVkRzRDb2RlU2lnbmluZ1JTQTQwOTZTSEEzODQyMDIx
Q0ExLmNybDA+BgNVHSAENzA1MDMGBmeBDAEEATApMCcGCCsGAQUFBwIBFhtodHRw
Oi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwgZQGCCsGAQUFBwEBBIGHMIGEMCQGCCsG
AQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20wXAYIKwYBBQUHMAKGUGh0
dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2VydFRydXN0ZWRHNENvZGVT
aWduaW5nUlNBNDA5NlNIQTM4NDIwMjFDQTEuY3J0MAwGA1UdEwEB/wQCMAAwDQYJ
KoZIhvcNAQELBQADggIBABhpTZufRws1vrtI0xB1/UWrSEJxdPHivfpXE708dzum
Jh3TFzpsEUCQX5BJJet1l7x92sKNeAL7votA+8O8YvMD64Kim7VKA2BB8AOHKQbp
r1c2iZBwwofInviRYvsrvQta6KBy2KOe1L/l0KnpUazL9Tv4VKvuWAw/Qc0/eTQr
NZRsmADORxnZ1qW+SpF+/WbazIYjod/Oqb1U3on+PzyiGD3SjzNhsdFRptqzrIaY
UVV+2XHG4fN6A8wkyQL5NIVXGiK7rqS5VrRAv58Lf1ZZTghdAL+5SySE0OsR9t0K
W73ZB9pxbuZZ6Zfxjotjw+IilCEm3ADbc7Eb2ijI4x8mix0XWMUrhL34s7/jRyDi
P+30aSgjWp611tp/EYRW5kpIaFR8AesDdM0DSSCCRXOMwQG2Tq2+CnqItB5oLNPp
2XySwlIWvmjbzsREfIpE3yh3bxmHY+vFIc2R0nNkbWNIT6AGtaEQ7oWkgpK8YMkA
QCf4EUC4Qa7qHiH6YSmYJhjApBLC7UDwevgwxuDrwimWAj+tDkzdnENMcBp4SAy6
LwUuDi2IU6HRSXWdh2YEkDbc3FdwknnnEWaB4dlRL85YjHyLXN0KiE7SKTj1LfR4
dGeDqVUlDj9D5+X4a7F89wLP/um40/52HUQv5t5WcNr/47r9aVkx9DHs1b8oUnLg
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIGsDCCBJigAwIBAgIQCK1AsmDSnEyfXs2pvZOu2TANBgkqhkiG9w0BAQwFADBi
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
RzQwHhcNMjEwNDI5MDAwMDAwWhcNMzYwNDI4MjM1OTU5WjBpMQswCQYDVQQGEwJV
UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xQTA/BgNVBAMTOERpZ2lDZXJ0IFRy
dXN0ZWQgRzQgQ29kZSBTaWduaW5nIFJTQTQwOTYgU0hBMzg0IDIwMjEgQ0ExMIIC
IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1bQvQtAorXi3XdU5WRuxiEL1
M4zrPYGXcMW7xIUmMJ+kjmjYXPXrNCQH4UtP03hD9BfXHtr50tVnGlJPDqFX/IiZ
wZHMgQM+TXAkZLON4gh9NH1MgFcSa0OamfLFOx/y78tHWhOmTLMBICXzENOLsvsI
8IrgnQnAZaf6mIBJNYc9URnokCF4RS6hnyzhGMIazMXuk0lwQjKP+8bqHPNlaJGi
TUyCEUhSaN4QvRRXXegYE2XFf7JPhSxIpFaENdb5LpyqABXRN/4aBpTCfMjqGzLm
ysL0p6MDDnSlrzm2q2AS4+jWufcx4dyt5Big2MEjR0ezoQ9uo6ttmAaDG7dqZy3S
vUQakhCBj7A7CdfHmzJawv9qYFSLScGT7eG0XOBv6yb5jNWy+TgQ5urOkfW+0/tv
k2E0XLyTRSiDNipmKF+wc86LJiUGsoPUXPYVGUztYuBeM/Lo6OwKp7ADK5GyNnm+
960IHnWmZcy740hQ83eRGv7bUKJGyGFYmPV8AhY8gyitOYbs1LcNU9D4R+Z1MI3s
MJN2FKZbS110YU0/EpF23r9Yy3IQKUHw1cVtJnZoEUETWJrcJisB9IlNWdt4z4FK
PkBHX8mBUHOFECMhWWCKZFTBzCEa6DgZfGYczXg4RTCZT/9jT0y7qg0IU0F8WD1H
s/q27IwyCQLMbDwMVhECAwEAAaOCAVkwggFVMBIGA1UdEwEB/wQIMAYBAf8CAQAw
HQYDVR0OBBYEFGg34Ou2O/hfEYb7/mF7CIhl9E5CMB8GA1UdIwQYMBaAFOzX44LS
cV1kTN8uZz/nupiuHA9PMA4GA1UdDwEB/wQEAwIBhjATBgNVHSUEDDAKBggrBgEF
BQcDAzB3BggrBgEFBQcBAQRrMGkwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRp
Z2ljZXJ0LmNvbTBBBggrBgEFBQcwAoY1aHR0cDovL2NhY2VydHMuZGlnaWNlcnQu
Y29tL0RpZ2lDZXJ0VHJ1c3RlZFJvb3RHNC5jcnQwQwYDVR0fBDwwOjA4oDagNIYy
aHR0cDovL2NybDMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0VHJ1c3RlZFJvb3RHNC5j
cmwwHAYDVR0gBBUwEzAHBgVngQwBAzAIBgZngQwBBAEwDQYJKoZIhvcNAQEMBQAD
ggIBADojRD2NCHbuj7w6mdNW4AIapfhINPMstuZ0ZveUcrEAyq9sMCcTEp6QRJ9L
/Z6jfCbVN7w6XUhtldU/SfQnuxaBRVD9nL22heB2fjdxyyL3WqqQz/WTauPrINHV
UHmImoqKwba9oUgYftzYgBoRGRjNYZmBVvbJ43bnxOQbX0P4PpT/djk9ntSZz0rd
KOtfJqGVWEjVGv7XJz/9kNF2ht0csGBc8w2o7uCJob054ThO2m67Np375SFTWsPK
6Wrxoj7bQ7gzyE84FJKZ9d3OVG3ZXQIUH0AzfAPilbLCIXVzUstG2MQ0HKKlS43N
b3Y3LIU/Gs4m6Ri+kAewQ3+ViCCCcPDMyu/9KTVcH4k4Vfc3iosJocsL6TEa/y4Z
XDlx4b6cpwoG1iZnt5LmTl/eeqxJzy6kdJKt2zyknIYf48FWGysj/4+16oh7cGvm
oLr9Oj9FpsToFpFSi0HASIRLlk2rREDjjfAVKM7t8RhWByovEMQMCGQ8M4+uKIw8
y4+ICw2/O/TOHnuO77Xry7fwdxPm5yg/rBKupS8ibEH5glwVZsxsDsrFhsP2JjMM
B0ug0wcCampAMEhLNKhRILutG4UI4lkNbcoFUCvqShyepf2gpx8GdOfy1lKQ/a+F
SCH5Vzu0nAPthkX0tGFuv2jiJmCG6sivqf6UHedjGzqGVnhO
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
+SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
-----END CERTIFICATE-----
| 0 |
bitcoin/contrib | bitcoin/contrib/message-capture/message-capture-parser.py | #!/usr/bin/env python3
# Copyright (c) 2020-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Parse message capture binary files. To be used in conjunction with -capturemessages."""
import argparse
import os
import shutil
import sys
from io import BytesIO
import json
from pathlib import Path
from typing import Any, Optional
sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional'))
from test_framework.messages import ser_uint256 # noqa: E402
from test_framework.p2p import MESSAGEMAP # noqa: E402
TIME_SIZE = 8
LENGTH_SIZE = 4
MSGTYPE_SIZE = 12
# The test framework classes stores hashes as large ints in many cases.
# These are variables of type uint256 in core.
# There isn't a way to distinguish between a large int and a large int that is actually a blob of bytes.
# As such, they are itemized here.
# Any variables with these names that are of type int are actually uint256 variables.
# (These can be easily found by looking for calls to deser_uint256, deser_uint256_vector, and uint256_from_str in messages.py)
HASH_INTS = [
"blockhash",
"block_hash",
"hash",
"hashMerkleRoot",
"hashPrevBlock",
"hashstop",
"prev_header",
"sha256",
"stop_hash",
]
HASH_INT_VECTORS = [
"hashes",
"headers",
"vHave",
"vHash",
]
class ProgressBar:
def __init__(self, total: float):
self.total = total
self.running = 0
def set_progress(self, progress: float):
cols = shutil.get_terminal_size()[0]
if cols <= 12:
return
max_blocks = cols - 9
num_blocks = int(max_blocks * progress)
print('\r[ {}{} ] {:3.0f}%'
.format('#' * num_blocks,
' ' * (max_blocks - num_blocks),
progress * 100),
end ='')
def update(self, more: float):
self.running += more
self.set_progress(self.running / self.total)
def to_jsonable(obj: Any) -> Any:
if hasattr(obj, "__dict__"):
return obj.__dict__
elif hasattr(obj, "__slots__"):
ret = {} # type: Any
for slot in obj.__slots__:
val = getattr(obj, slot, None)
if slot in HASH_INTS and isinstance(val, int):
ret[slot] = ser_uint256(val).hex()
elif slot in HASH_INT_VECTORS and all(isinstance(a, int) for a in val):
ret[slot] = [ser_uint256(a).hex() for a in val]
else:
ret[slot] = to_jsonable(val)
return ret
elif isinstance(obj, list):
return [to_jsonable(a) for a in obj]
elif isinstance(obj, bytes):
return obj.hex()
else:
return obj
def process_file(path: str, messages: list[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None:
with open(path, 'rb') as f_in:
if progress_bar:
bytes_read = 0
while True:
if progress_bar:
# Update progress bar
diff = f_in.tell() - bytes_read - 1
progress_bar.update(diff)
bytes_read = f_in.tell() - 1
# Read the Header
tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE)
if not tmp_header_raw:
break
tmp_header = BytesIO(tmp_header_raw)
time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int
msgtype = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] # type: bytes
length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int
# Start converting the message to a dictionary
msg_dict = {}
msg_dict["direction"] = "recv" if recv else "sent"
msg_dict["time"] = time
msg_dict["size"] = length # "size" is less readable here, but more readable in the output
msg_ser = BytesIO(f_in.read(length))
# Determine message type
if msgtype not in MESSAGEMAP:
# Unrecognized message type
try:
msgtype_tmp = msgtype.decode()
if not msgtype_tmp.isprintable():
raise UnicodeDecodeError
msg_dict["msgtype"] = msgtype_tmp
except UnicodeDecodeError:
msg_dict["msgtype"] = "UNREADABLE"
msg_dict["body"] = msg_ser.read().hex()
msg_dict["error"] = "Unrecognized message type."
messages.append(msg_dict)
print(f"WARNING - Unrecognized message type {msgtype} in {path}", file=sys.stderr)
continue
# Deserialize the message
msg = MESSAGEMAP[msgtype]()
msg_dict["msgtype"] = msgtype.decode()
try:
msg.deserialize(msg_ser)
except KeyboardInterrupt:
raise
except Exception:
# Unable to deserialize message body
msg_ser.seek(0, os.SEEK_SET)
msg_dict["body"] = msg_ser.read().hex()
msg_dict["error"] = "Unable to deserialize message."
messages.append(msg_dict)
print(f"WARNING - Unable to deserialize message in {path}", file=sys.stderr)
continue
# Convert body of message into a jsonable object
if length:
msg_dict["body"] = to_jsonable(msg)
messages.append(msg_dict)
if progress_bar:
# Update the progress bar to the end of the current file
# in case we exited the loop early
f_in.seek(0, os.SEEK_END) # Go to end of file
diff = f_in.tell() - bytes_read - 1
progress_bar.update(diff)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
epilog="EXAMPLE \n\t{0} -o out.json <data-dir>/message_capture/**/*.dat".format(sys.argv[0]),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"capturepaths",
nargs='+',
help="binary message capture files to parse.")
parser.add_argument(
"-o", "--output",
help="output file. If unset print to stdout")
parser.add_argument(
"-n", "--no-progress-bar",
action='store_true',
help="disable the progress bar. Automatically set if the output is not a terminal")
args = parser.parse_args()
capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths]
output = Path.cwd() / Path(args.output) if args.output else False
use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty()
messages = [] # type: list[Any]
if use_progress_bar:
total_size = sum(capture.stat().st_size for capture in capturepaths)
progress_bar = ProgressBar(total_size)
else:
progress_bar = None
for capture in capturepaths:
process_file(str(capture), messages, "recv" in capture.stem, progress_bar)
messages.sort(key=lambda msg: msg['time'])
if use_progress_bar:
progress_bar.set_progress(1)
jsonrep = json.dumps(messages)
if output:
with open(str(output), 'w+', encoding="utf8") as f_out:
f_out.write(jsonrep)
else:
print(jsonrep)
if __name__ == "__main__":
main()
| 0 |
bitcoin/contrib | bitcoin/contrib/message-capture/message-capture-docs.md | # Per-Peer Message Capture
## Purpose
This feature allows for message capture on a per-peer basis. It answers the simple question: "Can I see what messages my node is sending and receiving?"
## Usage and Functionality
* Run `bitcoind` with the `-capturemessages` option.
* Look in the `message_capture` folder in your datadir.
* Typically this will be `~/.bitcoin/message_capture`.
* See that there are many folders inside, one for each peer names with its IP address and port.
* Inside each peer's folder there are two `.dat` files: one is for received messages (`msgs_recv.dat`) and the other is for sent messages (`msgs_sent.dat`).
* Run `contrib/message-capture/message-capture-parser.py` with the proper arguments.
* See the `-h` option for help.
* To see all messages, both sent and received, for all peers use:
```
./contrib/message-capture/message-capture-parser.py -o out.json \
~/.bitcoin/message_capture/**/*.dat
```
* Note: The messages in the given `.dat` files will be interleaved in chronological order. So, giving both received and sent `.dat` files (as above with `*.dat`) will result in all messages being interleaved in chronological order.
* If an output file is not provided (i.e. the `-o` option is not used), then the output prints to `stdout`.
* View the resulting output.
* The output file is `JSON` formatted.
* Suggestion: use `jq` to view the output, with `jq . out.json`
| 0 |
bitcoin/contrib | bitcoin/contrib/testgen/gen_key_io_test_vectors.py | #!/usr/bin/env python3
# Copyright (c) 2012-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58/bech32(m) address and private key test vectors.
'''
from itertools import islice
import os
import random
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional'))
from test_framework.address import base58_to_byte, byte_to_base58, b58chars # noqa: E402
from test_framework.script import OP_0, OP_1, OP_2, OP_3, OP_16, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, OP_CHECKSIG # noqa: E402
from test_framework.segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET, Encoding # noqa: E402
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PUBKEY_ADDRESS_REGTEST = 111
SCRIPT_ADDRESS_REGTEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
PRIVKEY_REGTEST = 239
# script
pubkey_prefix = (OP_DUP, OP_HASH160, 20)
pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG)
script_prefix = (OP_HASH160, 20)
script_suffix = (OP_EQUAL,)
p2wpkh_prefix = (OP_0, 20)
p2wsh_prefix = (OP_0, 32)
p2tr_prefix = (OP_1, 32)
metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata, output_prefix, output_suffix
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'signet', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix),
((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()),
((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'signet', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'signet', True, None), (), ()),
((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()),
((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ())
]
# templates for valid bech32 sequences
bech32_templates = [
# hrp, version, witprog_size, metadata, encoding, output_prefix
('bc', 0, 20, (False, 'main', None, True), Encoding.BECH32, p2wpkh_prefix),
('bc', 0, 32, (False, 'main', None, True), Encoding.BECH32, p2wsh_prefix),
('bc', 1, 32, (False, 'main', None, True), Encoding.BECH32M, p2tr_prefix),
('bc', 2, 2, (False, 'main', None, True), Encoding.BECH32M, (OP_2, 2)),
('tb', 0, 20, (False, 'test', None, True), Encoding.BECH32, p2wpkh_prefix),
('tb', 0, 32, (False, 'test', None, True), Encoding.BECH32, p2wsh_prefix),
('tb', 1, 32, (False, 'test', None, True), Encoding.BECH32M, p2tr_prefix),
('tb', 3, 16, (False, 'test', None, True), Encoding.BECH32M, (OP_3, 16)),
('tb', 0, 20, (False, 'signet', None, True), Encoding.BECH32, p2wpkh_prefix),
('tb', 0, 32, (False, 'signet', None, True), Encoding.BECH32, p2wsh_prefix),
('tb', 1, 32, (False, 'signet', None, True), Encoding.BECH32M, p2tr_prefix),
('tb', 3, 32, (False, 'signet', None, True), Encoding.BECH32M, (OP_3, 32)),
('bcrt', 0, 20, (False, 'regtest', None, True), Encoding.BECH32, p2wpkh_prefix),
('bcrt', 0, 32, (False, 'regtest', None, True), Encoding.BECH32, p2wsh_prefix),
('bcrt', 1, 32, (False, 'regtest', None, True), Encoding.BECH32M, p2tr_prefix),
('bcrt', 16, 40, (False, 'regtest', None, True), Encoding.BECH32M, (OP_16, 40))
]
# templates for invalid bech32 sequences
bech32_ng_templates = [
# hrp, version, witprog_size, encoding, invalid_bech32, invalid_checksum, invalid_char
('tc', 0, 20, Encoding.BECH32, False, False, False),
('bt', 1, 32, Encoding.BECH32M, False, False, False),
('tb', 17, 32, Encoding.BECH32M, False, False, False),
('bcrt', 3, 1, Encoding.BECH32M, False, False, False),
('bc', 15, 41, Encoding.BECH32M, False, False, False),
('tb', 0, 16, Encoding.BECH32, False, False, False),
('bcrt', 0, 32, Encoding.BECH32, True, False, False),
('bc', 0, 16, Encoding.BECH32, True, False, False),
('tb', 0, 32, Encoding.BECH32, False, True, False),
('bcrt', 0, 20, Encoding.BECH32, False, False, True),
('bc', 0, 20, Encoding.BECH32M, False, False, False),
('tb', 0, 32, Encoding.BECH32M, False, False, False),
('bcrt', 0, 20, Encoding.BECH32M, False, False, False),
('bc', 1, 32, Encoding.BECH32, False, False, False),
('tb', 2, 16, Encoding.BECH32, False, False, False),
('bcrt', 16, 20, Encoding.BECH32, False, False, False),
]
def is_valid(v):
'''Check vector v for validity'''
if len(set(v) - set(b58chars)) > 0:
return is_valid_bech32(v)
try:
payload, version = base58_to_byte(v)
result = bytes([version]) + payload
except ValueError: # thrown if checksum doesn't match
return is_valid_bech32(v)
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return is_valid_bech32(v)
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['bc', 'tb', 'bcrt']:
if decode_segwit_address(hrp, v) != (None, None):
return True
return False
def gen_valid_base58_vector(template):
'''Generate valid base58 vector'''
prefix = bytearray(template[0])
payload = rand_bytes(size=template[1])
suffix = bytearray(template[2])
dst_prefix = bytearray(template[4])
dst_suffix = bytearray(template[5])
assert len(prefix) == 1
rv = byte_to_base58(payload + suffix, prefix[0])
return rv, dst_prefix + payload + dst_suffix
def gen_valid_bech32_vector(template):
'''Generate valid bech32 vector'''
hrp = template[0]
witver = template[1]
witprog = rand_bytes(size=template[2])
encoding = template[4]
dst_prefix = bytearray(template[5])
rv = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5))
return rv, dst_prefix + witprog
def gen_valid_vectors():
'''Generate valid test vectors'''
glist = [gen_valid_base58_vector, gen_valid_bech32_vector]
tlist = [templates, bech32_templates]
while True:
for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = payload.hex()
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
'''Generate possibly invalid vector'''
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
corrupt_prefix = randbool(0.2)
randomize_payload_size = randbool(0.2)
corrupt_suffix = randbool(0.2)
if corrupt_prefix:
prefix = rand_bytes(size=1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = rand_bytes(size=max(int(random.expovariate(0.5)), 50))
else:
payload = rand_bytes(size=template[1])
if corrupt_suffix:
suffix = rand_bytes(size=len(template[2]))
else:
suffix = bytearray(template[2])
assert len(prefix) == 1
val = byte_to_base58(payload + suffix, prefix[0])
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
return val
def gen_invalid_bech32_vector(template):
'''Generate possibly invalid bech32 vector'''
no_data = randbool(0.1)
to_upper = randbool(0.1)
hrp = template[0]
witver = template[1]
witprog = rand_bytes(size=template[2])
encoding = template[3]
if no_data:
rv = bech32_encode(encoding, hrp, [])
else:
data = [witver] + convertbits(witprog, 8, 5)
if template[4] and not no_data:
if template[2] % 5 in {2, 4}:
data[-1] |= 1
else:
data.append(0)
rv = bech32_encode(encoding, hrp, data)
if template[5]:
i = len(rv) - random.randrange(1, 7)
rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:]
if template[6]:
i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4)
rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:]
if to_upper:
rv = rv.swapcase()
return rv
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def rand_bytes(*, size):
return bytearray(random.getrandbits(8) for _ in range(size))
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
glist = [gen_invalid_base58_vector, gen_invalid_bech32_vector]
tlist = [templates, bech32_ng_templates]
while True:
for template, invalid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
val = invalid_vector_generator(template)
if not is_valid(val):
yield val,
if __name__ == '__main__':
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
random.seed(42)
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 0 |
bitcoin/contrib | bitcoin/contrib/testgen/README.md | ### TestGen ###
Utilities to generate test vectors for the data-driven Bitcoin tests.
To use inside a scripted-diff (or just execute directly):
./gen_key_io_test_vectors.py valid 70 > ../../src/test/data/key_io_valid.json
./gen_key_io_test_vectors.py invalid 70 > ../../src/test/data/key_io_invalid.json
| 0 |
bitcoin/contrib | bitcoin/contrib/linearize/linearize-hashes.py | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from http.client import HTTPConnection
import json
import re
import base64
import sys
import os
import os.path
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert resp_obj['id'] == x # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = bytes.fromhex(resp_obj['result'])[::-1].hex()
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
with open(sys.argv[1], encoding="utf8") as f:
for line in f:
# skip comment lines
m = re.search(r'^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| 0 |
bitcoin/contrib | bitcoin/contrib/linearize/example-linearize.cfg | # bitcoind RPC settings (linearize-hashes)
rpcuser=someuser
rpcpassword=somepassword
#datadir=~/.bitcoin
host=127.0.0.1
#mainnet default
port=8332
#testnet default
#port=18332
#regtest default
#port=18443
#signet default
#port=38332
# bootstrap.dat hashlist settings (linearize-hashes)
max_height=313000
# bootstrap.dat input/output settings (linearize-data)
# mainnet
netmagic=f9beb4d9
genesis=000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f
input=/home/example/.bitcoin/blocks
# testnet
#netmagic=0b110907
#genesis=000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943
#input=/home/example/.bitcoin/testnet3/blocks
# regtest
#netmagic=fabfb5da
#genesis=0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206
#input=/home/example/.bitcoin/regtest/blocks
# signet
#netmagic=0a03cf40
#genesis=00000008819873e925422c1ff0f99f7cc9bbb232af63a077a480a3633bee1ef6
#input=/home/example/.bitcoin/signet/blocks
# "output" option causes blockchain files to be written to the given location,
# with "output_file" ignored. If not used, "output_file" is used instead.
# output=/home/example/blockchain_directory
output_file=/home/example/Downloads/bootstrap.dat
hashlist=hashlist.txt
# Maximum size in bytes of out-of-order blocks cache in memory
out_of_order_cache_sz = 100000000
# Do we want the reverse the hash bytes coming from getblockhash?
rev_hash_bytes = False
# On a new month, do we want to set the access and modify times of the new
# blockchain file?
file_timestamp = 0
# Do we want to split the blockchain files given a new month or specific height?
split_timestamp = 0
# Do we want debug printouts?
debug_output = False
| 0 |
bitcoin/contrib | bitcoin/contrib/linearize/linearize-data.py | #!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import struct
import re
import os
import os.path
import sys
import hashlib
import datetime
import time
import glob
from collections import namedtuple
settings = {}
def calc_hash_str(blk_hdr):
blk_hdr_hash = hashlib.sha256(hashlib.sha256(blk_hdr).digest()).digest()
return blk_hdr_hash[::-1].hex()
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
with open(settings['hashlist'], "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = bytes.fromhex(line)[::-1].hex()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# This gets the first block file ID that exists from the input block
# file directory.
def getFirstBlockFileId(block_dir_path):
# First, this sets up a pattern to search for block files, for
# example 'blkNNNNN.dat'.
blkFilePattern = os.path.join(block_dir_path, "blk[0-9][0-9][0-9][0-9][0-9].dat")
# This search is done with glob
blkFnList = glob.glob(blkFilePattern)
if len(blkFnList) == 0:
print("blocks not pruned - starting at 0")
return 0
# We then get the lexicographic minimum, which should be the first
# block file name.
firstBlkFilePath = min(blkFnList)
firstBlkFn = os.path.basename(firstBlkFilePath)
# now, the string should be ['b','l','k','N','N','N','N','N','.','d','a','t']
# So get the ID by choosing: 3 4 5 6 7
# The ID is not necessarily 0 if this is a pruned node.
blkId = int(firstBlkFn[3:8])
return blkId
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
# Get first occurring block file id - for pruned nodes this
# will not necessarily be 0
self.inFn = getFirstBlockFileId(self.settings['input'])
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
# Seek backwards 7 bytes (skipping the first byte in the previous search)
# and continue searching from the new position if the magic bytes are not
# found.
self.inF.seek(-7, os.SEEK_CUR)
continue
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
with open(sys.argv[1], encoding="utf8") as f:
for line in f:
# skip comment lines
m = re.search(r'^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = bytes.fromhex(settings['netmagic'])
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| 0 |
bitcoin/contrib | bitcoin/contrib/linearize/README.md | # Linearize
Construct a linear, no-fork, best version of the Bitcoin blockchain.
## Step 1: Download hash list
$ ./linearize-hashes.py linearize.cfg > hashlist.txt
Required configuration file settings for linearize-hashes:
* RPC: `datadir` (Required if `rpcuser` and `rpcpassword` are not specified)
* RPC: `rpcuser`, `rpcpassword` (Required if `datadir` is not specified)
Optional config file setting for linearize-hashes:
* RPC: `host` (Default: `127.0.0.1`)
* RPC: `port` (Default: `8332`)
* Blockchain: `min_height`, `max_height`
* `rev_hash_bytes`: If true, the written block hash list will be
byte-reversed. (In other words, the hash returned by getblockhash will have its
bytes reversed.) False by default. Intended for generation of
standalone hash lists but safe to use with linearize-data.py, which will output
the same data no matter which byte format is chosen.
The `linearize-hashes` script requires a connection, local or remote, to a
JSON-RPC server. Running `bitcoind` or `bitcoin-qt -server` will be sufficient.
## Step 2: Copy local block data
$ ./linearize-data.py linearize.cfg
Required configuration file settings:
* `output_file`: The file that will contain the final blockchain.
or
* `output`: Output directory for linearized `blocks/blkNNNNN.dat` output.
Optional config file setting for linearize-data:
* `debug_output`: Some printouts may not always be desired. If true, such output
will be printed.
* `file_timestamp`: Set each file's last-accessed and last-modified times,
respectively, to the current time and to the timestamp of the most recent block
written to the script's blockchain.
* `genesis`: The hash of the genesis block in the blockchain.
* `input`: bitcoind blocks/ directory containing blkNNNNN.dat
* `hashlist`: text file containing list of block hashes created by
linearize-hashes.py.
* `max_out_sz`: Maximum size for files created by the `output_file` option.
(Default: `1000*1000*1000 bytes`)
* `netmagic`: Network magic number.
* `out_of_order_cache_sz`: If out-of-order blocks are being read, the block can
be written to a cache so that the blockchain doesn't have to be sought again.
This option specifies the cache size. (Default: `100*1000*1000 bytes`)
* `rev_hash_bytes`: If true, the block hash list written by linearize-hashes.py
will be byte-reversed when read by linearize-data.py. See the linearize-hashes
entry for more information.
* `split_timestamp`: Split blockchain files when a new month is first seen, in
addition to reaching a maximum file size (`max_out_sz`).
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-commits/gpg.sh | #!/bin/sh
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
INPUT=$(cat /dev/stdin)
if [ "$BITCOIN_VERIFY_COMMITS_ALLOW_SHA1" = 1 ]; then
printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null
exit $?
else
# Note how we've disabled SHA1 with the --weak-digest option, disabling
# signatures - including selfsigs - that use SHA1. While you might think that
# collision attacks shouldn't be an issue as they'd be an attack on yourself,
# in fact because what's being signed is a commit object that's
# semi-deterministically generated by untrusted input (the pull-req) in theory
# an attacker could construct a pull-req that results in a commit object that
# they've created a collision for. Not the most likely attack, but preventing
# it is pretty easy so we do so as a "belt-and-suspenders" measure.
for LINE in $(gpg --version); do
case "$LINE" in
"gpg (GnuPG) 1.4.1"*|"gpg (GnuPG) 2.0."*)
echo "Please upgrade to at least gpg 2.1.10 to check for weak signatures" > /dev/stderr
printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null
exit $?
;;
# We assume if you're running 2.1+, you're probably running 2.1.10+
# gpg will fail otherwise
# We assume if you're running 1.X, it is either 1.4.1X or 1.4.20+
# gpg will fail otherwise
esac
done
printf '%s\n' "$INPUT" | gpg --trust-model always --weak-digest sha1 "$@" 2>/dev/null
exit $?
fi
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-commits/verify-commits.py | #!/usr/bin/env python3
# Copyright (c) 2018-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify commits against a trusted keys list."""
import argparse
import hashlib
import logging
import os
import subprocess
import sys
import time
GIT = os.getenv('GIT', 'git')
def tree_sha512sum(commit='HEAD'):
"""Calculate the Tree-sha512 for the commit.
This is copied from github-merge.py. See https://github.com/bitcoin-core/bitcoin-maintainer-tools."""
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert metadata[1] == b'blob'
name = line[name_sep + 1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert reply[0] == blob and reply[1] == b'blob'
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert p.stdout.read(1) == b'\n' # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def main():
# Enable debug logging if running in CI
if 'CI' in os.environ and os.environ['CI'].lower() == "true":
logging.getLogger().setLevel(logging.DEBUG)
# Parse arguments
parser = argparse.ArgumentParser(usage='%(prog)s [options] [commit id]')
parser.add_argument('--disable-tree-check', action='store_false', dest='verify_tree', help='disable SHA-512 tree check')
parser.add_argument('--clean-merge', type=float, dest='clean_merge', default=float('inf'), help='Only check clean merge after <NUMBER> days ago (default: %(default)s)', metavar='NUMBER')
parser.add_argument('commit', nargs='?', default='HEAD', help='Check clean merge up to commit <commit>')
args = parser.parse_args()
# get directory of this program and read data files
dirname = os.path.dirname(os.path.abspath(__file__))
print("Using verify-commits data from " + dirname)
with open(dirname + "/trusted-git-root", "r", encoding="utf8") as f:
verified_root = f.read().splitlines()[0]
with open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8") as f:
verified_sha512_root = f.read().splitlines()[0]
with open(dirname + "/allow-revsig-commits", "r", encoding="utf8") as f:
revsig_allowed = f.read().splitlines()
with open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf8") as f:
unclean_merge_allowed = f.read().splitlines()
with open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf8") as f:
incorrect_sha512_allowed = f.read().splitlines()
with open(dirname + "/trusted-keys", "r", encoding="utf8") as f:
trusted_keys = f.read().splitlines()
# Set commit and variables
current_commit = args.commit
if ' ' in current_commit:
print("Commit must not contain spaces", file=sys.stderr)
sys.exit(1)
verify_tree = args.verify_tree
no_sha1 = True
prev_commit = ""
initial_commit = current_commit
# Iterate through commits
while True:
# Log a message to prevent Travis from timing out
logging.debug("verify-commits: [in-progress] processing commit {}".format(current_commit[:8]))
if current_commit == verified_root:
print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root))
sys.exit(0)
else:
# Make sure this commit isn't older than trusted roots
check_root_older_res = subprocess.run([GIT, "merge-base", "--is-ancestor", verified_root, current_commit])
if check_root_older_res.returncode != 0:
print(f"\"{current_commit}\" predates the trusted root, stopping!")
sys.exit(0)
if verify_tree:
if current_commit == verified_sha512_root:
print("All Tree-SHA512s matched up to {}".format(verified_sha512_root), file=sys.stderr)
verify_tree = False
no_sha1 = False
else:
# Skip the tree check if we are older than the trusted root
check_root_older_res = subprocess.run([GIT, "merge-base", "--is-ancestor", verified_sha512_root, current_commit])
if check_root_older_res.returncode != 0:
print(f"\"{current_commit}\" predates the trusted SHA512 root, disabling tree verification.")
verify_tree = False
no_sha1 = False
os.environ['BITCOIN_VERIFY_COMMITS_ALLOW_SHA1'] = "0" if no_sha1 else "1"
allow_revsig = current_commit in revsig_allowed
# Check that the commit (and parents) was signed with a trusted key
valid_sig = False
verify_res = subprocess.run([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', "--raw", current_commit], capture_output=True)
for line in verify_res.stderr.decode().splitlines():
if line.startswith("[GNUPG:] VALIDSIG "):
key = line.split(" ")[-1]
valid_sig = key in trusted_keys
elif (line.startswith("[GNUPG:] REVKEYSIG ") or line.startswith("[GNUPG:] EXPKEYSIG ")) and not allow_revsig:
valid_sig = False
break
if not valid_sig:
if prev_commit != "":
print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr)
print("Parents are:", file=sys.stderr)
parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit]).decode('utf8').splitlines()[0].split(' ')
for parent in parents:
subprocess.call([GIT, 'show', '-s', parent], stdout=sys.stderr)
else:
print("{} was not signed with a trusted key!".format(current_commit), file=sys.stderr)
sys.exit(1)
# Check the Tree-SHA512
if (verify_tree or prev_commit == "") and current_commit not in incorrect_sha512_allowed:
tree_hash = tree_sha512sum(current_commit)
if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit]).decode('utf8').splitlines():
print("Tree-SHA512 did not match for commit " + current_commit, file=sys.stderr)
sys.exit(1)
# Merge commits should only have two parents
parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit]).decode('utf8').splitlines()[0].split(' ')
if len(parents) > 2:
print("Commit {} is an octopus merge".format(current_commit), file=sys.stderr)
sys.exit(1)
# Check that the merge commit is clean
commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit]).decode('utf8').splitlines()[0])
check_merge = commit_time > time.time() - args.clean_merge * 24 * 60 * 60 # Only check commits in clean_merge days
allow_unclean = current_commit in unclean_merge_allowed
if len(parents) == 2 and check_merge and not allow_unclean:
current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit]).decode('utf8').splitlines()[0]
# This merge-tree functionality requires git >= 2.38. The
# --write-tree option was added in order to opt-in to the new
# behavior. Older versions of git will not recognize the option and
# will instead exit with code 128.
try:
recreated_tree = subprocess.check_output([GIT, "merge-tree", "--write-tree", parents[0], parents[1]]).decode('utf8').splitlines()[0]
except subprocess.CalledProcessError as e:
if e.returncode == 128:
print("git v2.38+ is required for this functionality.", file=sys.stderr)
sys.exit(1)
else:
raise e
if current_tree != recreated_tree:
print("Merge commit {} is not clean".format(current_commit), file=sys.stderr)
subprocess.call([GIT, 'diff', recreated_tree, current_tree])
sys.exit(1)
prev_commit = current_commit
current_commit = parents[0]
if __name__ == '__main__':
main()
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-commits/pre-push-hook.sh | #!/usr/bin/env bash
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
if ! [[ "$2" =~ ^(git@)?(www.)?github.com(:|/)bitcoin/bitcoin(.git)?$ ]]; then
exit 0
fi
while read LINE; do
set -- A "$LINE"
if [ "$4" != "refs/heads/master" ]; then
continue
fi
if ! ./contrib/verify-commits/verify-commits.py "$3" > /dev/null 2>&1; then
echo "ERROR: A commit is not signed, can't push"
./contrib/verify-commits/verify-commits.py
exit 1
fi
done < /dev/stdin
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-commits/trusted-sha512-root-commit | 309bf16257b2395ce502017be627186b749ee749
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-commits/README.md | Tooling for verification of PGP signed commits
----------------------------------------------
This is an incomplete work in progress, but currently includes a pre-push hook
script (`pre-push-hook.sh`) for maintainers to ensure that their own commits
are PGP signed (nearly always merge commits), as well as a Python 3 script to verify
commits against a trusted keys list.
Using verify-commits.py safely
------------------------------
Remember that you can't use an untrusted script to verify itself. This means
that checking out code, then running `verify-commits.py` against `HEAD` is
_not_ safe, because the version of `verify-commits.py` that you just ran could
be backdoored. Instead, you need to use a trusted version of verify-commits
prior to checkout to make sure you're checking out only code signed by trusted
keys:
```sh
git fetch origin && \
./contrib/verify-commits/verify-commits.py origin/master && \
git checkout origin/master
```
Note that the above isn't a good UI/UX yet, and needs significant improvements
to make it more convenient and reduce the chance of errors; pull-reqs
improving this process would be much appreciated.
Unless `--clean-merge 0` is specified, `verify-commits.py` will attempt to verify that
each merge commit applies cleanly (with some exceptions). This requires using at least
git v2.38.0.
Configuration files
-------------------
* `trusted-git-root`: This file should contain a single git commit hash which is the first unsigned git commit (hence it is the "root of trust").
* `trusted-sha512-root-commit`: This file should contain a single git commit hash which is the first commit without a SHA512 root commitment.
* `trusted-keys`: This file should contain a \n-delimited list of all PGP fingerprints of authorized commit signers (primary, not subkeys).
* `allow-revsig-commits`: This file should contain a \n-delimited list of git commit hashes. See next section for more info.
Import trusted keys
-------------------
In order to check the commit signatures, you must add the trusted PGP keys to your machine. [GnuPG](https://gnupg.org/) may be used to import the trusted keys by running the following command:
```sh
gpg --keyserver hkps://keys.openpgp.org --recv-keys $(<contrib/verify-commits/trusted-keys)
```
Key expiry/revocation
---------------------
When a key (or subkey) which has signed old commits expires or is revoked,
verify-commits will start failing to verify all commits which were signed by
said key. In order to avoid bumping the root-of-trust `trusted-git-root`
file, individual commits which were signed by such a key can be added to the
`allow-revsig-commits` file. That way, the PGP signatures are still verified
but no new commits can be signed by any expired/revoked key. To easily build a
list of commits which need to be added, verify-commits.py can be edited to test
each commit with BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG set to both 1 and 0, and
those which need it set to 1 printed.
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-commits/trusted-keys | E777299FC265DD04793070EB944D35F9AC3DB76A
D1DBF2C4B96F2DEBF4C16654410108112E7EA81F
152812300785C96444D3334D17565732E08E5E41
6B002C6EA3F91B1B0DF0C9BC8F617F1200A6D25C
4D1B3D5ECBA1A7E05371EEBE46800E30FC748A66
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-commits/trusted-git-root | 437dfe1c26e752c280014a30f809e62c684ad99e
| 0 |
bitcoin/contrib | bitcoin/contrib/seeds/nodes_main.txt | 1.65.195.98:8333 # AS4760
2.59.236.56:8333 # AS24904
2.83.114.20:8333 # AS8657
2.248.194.16:8333 # AS3301
5.2.154.6:8333 # AS8708
5.101.140.30:8333 # AS42831
5.128.87.126:8333 # AS31200
5.144.21.49:8333 # AS15600
5.172.132.104:8333 # AS15600
5.188.62.18:8333 # AS34665
5.200.2.180:8333 # AS49544
8.129.184.255:8333 # AS37963
8.209.105.138:8333 # AS45102
12.34.98.148:8333 # AS7018
14.199.102.151:8333 # AS9269
18.27.79.17:8333 # AS3
18.27.124.231:8333 # AS3
18.216.249.151:8333 # AS16509
23.88.155.58:8333 # AS10242
23.93.101.158:8333 # AS46375
23.109.156.76:8333 # AS7979
23.175.0.220:8333 # AS395502
23.175.0.222:8333 # AS395502
24.232.36.225:8333 # AS7303
27.124.108.19:8333 # AS58511
27.148.206.140:8333 # AS4134
31.7.70.195:8333 # AS49666
31.25.98.16:8333 # AS48635
31.41.23.249:8333 # AS31287
31.47.102.92:8333 # AS8251
31.47.202.112:8333 # AS34385
31.165.78.146:8333 # AS6730
31.165.228.138:8333 # AS6730
34.64.101.4:8333 # AS139070
34.105.19.97:8333 # AS15169
34.126.107.179:8333 # AS396982
34.126.115.35:8333 # AS396982
35.245.186.117:8333 # AS15169
37.15.60.144:8333 # AS12479
37.16.105.63:8333 # AS20904
37.120.155.34:8333 # AS9009
37.120.179.29:8333 # AS47147
37.139.102.73:8333 # AS35816
37.193.227.16:8333 # AS31200
37.220.135.151:8333 # AS41206
38.53.129.67:8333 # AS40237
38.54.14.89:8333 # AS138915
38.141.134.140:8333 # AS174
38.145.151.150:8333 # AS40545
41.72.154.66:8333 # AS37153
43.143.203.198:8333 # AS45090
45.15.124.117:8333 # AS35913
45.43.97.103:8333 # AS26827
45.44.213.116:8333 # AS54198
45.58.187.101:8333 # AS46844
45.79.192.236:8333 # AS63949
45.81.241.97:8333 # AS30823
45.83.220.102:8333 # AS39351
45.83.241.46:8333 # AS206238
45.87.106.57:8333 # AS39238
45.129.38.5:8333 # AS49666
45.130.20.177:8333 # AS3214
45.134.142.40:8333 # AS60068
45.135.4.143:8333 # AS25596
45.135.92.127:8333 # AS12555
45.145.188.112:8333 # AS206805
46.23.87.218:8333 # AS51088
46.32.50.98:8333 # AS39642
46.32.78.17:8333 # AS48416
46.59.40.91:8333 # AS8473
46.138.246.77:8333 # AS8359
46.166.142.2:8333 # AS43350
46.166.162.59:8333 # AS16125
46.175.178.3:8333 # AS28725
46.188.15.6:8333 # AS39153
46.188.30.118:8333 # AS39153
46.223.223.216:8333 # AS51185
46.226.18.135:8333 # AS52176
47.88.86.79:8333 # AS45102
47.148.7.69:8333 # AS5650
47.198.223.60:8333 # AS5650
50.2.13.164:8333 # AS62904
50.4.135.84:8333 # AS12083
50.53.39.237:8333 # AS20055
50.53.250.162:8333 # AS20055
50.68.121.44:8333 # AS6327
50.117.132.178:8333 # AS577
51.154.62.103:8333 # AS15796
51.158.150.155:8333 # AS12876
51.250.46.215:8333 # AS200350
54.176.63.16:8333 # AS16509
58.158.0.86:8333 # AS2519
60.205.205.119:8333 # AS37963
61.74.99.193:8333 # AS4766
61.92.59.104:8333 # AS9269
62.122.173.171:8333 # AS50245
62.171.129.32:8333 # AS51167
62.178.27.239:8333 # AS8412
62.209.210.3:8333 # AS6855
62.215.127.73:8333 # AS21050
62.238.148.104:8333 # AS15435
62.245.153.8:8333 # AS8767
64.146.136.45:8333 # AS16713
65.21.134.184:8333 # AS24940
66.18.13.146:8333 # AS13767
66.23.233.43:8333 # AS19318
66.27.98.216:8333 # AS20001
66.29.129.218:8333 # AS22612
66.38.94.13:8333 # AS11979
66.45.141.46:8333 # AS11232
66.58.243.215:8333 # AS8047
66.114.33.49:8333 # AS23175
66.198.211.167:8333 # AS10835
66.208.64.128:8333 # AS10352
66.219.196.170:8333 # AS29933
67.210.228.203:8333 # AS7819
68.183.75.251:8333 # AS14061
68.194.125.140:8333 # AS6128
68.199.120.17:8333 # AS6128
69.4.94.226:8333 # AS36352
69.8.175.201:8333 # AS21766
69.59.18.22:8333 # AS397444
69.196.152.33:8333 # AS5645
69.228.219.124:8333 # AS7018
70.64.27.12:8333 # AS6327
70.160.240.132:8333 # AS22773
71.79.109.128:8333 # AS7843
71.184.193.75:8333 # AS701
72.15.59.173:8333 # AS21949
72.48.253.168:8333 # AS7459
72.207.171.210:8333 # AS22773
73.117.132.138:8333 # AS7922
73.212.226.59:8333 # AS7922
74.76.151.110:8333 # AS7843
74.91.115.229:8333 # AS14586
74.118.137.119:8333 # AS20326
74.213.175.108:8333 # AS21949
74.213.251.239:8333 # AS14978
74.220.255.190:8333 # AS23175
74.221.189.109:8333 # AS26827
75.83.203.225:8333 # AS20001
75.172.52.186:8333 # AS209
76.24.143.22:8333 # AS1351
76.69.202.247:8333 # AS577
76.73.198.242:8333 # AS12083
76.119.248.240:8333 # AS1351
77.20.48.144:8333 # AS3209
77.22.152.239:8333 # AS204028
77.37.224.222:8333 # AS42610
77.48.196.234:8333 # AS16019
77.70.16.245:8333 # AS8717
77.162.190.90:8333 # AS1136
78.20.227.249:8333 # AS6848
78.21.167.8:8333 # AS6848
78.35.147.203:8333 # AS8422
78.108.108.25:8333 # AS8251
78.154.237.60:8333 # AS9155
79.11.31.76:8333 # AS3269
79.87.88.235:8333 # AS15557
79.101.1.25:8333 # AS8400
79.124.7.241:8333 # AS203380
79.124.7.253:8333 # AS203380
79.150.68.42:8333 # AS3352
79.249.10.53:8333 # AS3320
80.82.21.77:8333 # AS42927
80.82.76.59:8333 # AS202425
80.88.172.227:8333 # AS31263
80.93.213.246:8333 # AS42910
80.111.142.213:8333 # AS6830
80.208.227.134:8333 # AS62282
80.208.228.9:8333 # AS62282
80.209.64.86:8333 # AS31027
80.229.28.60:8333 # AS2856
81.7.16.182:8333 # AS35366
81.19.10.2:8333 # AS24641
81.162.196.43:8333 # AS34955
81.171.22.143:8333 # AS60781
81.172.221.4:8333 # AS12430
81.224.44.164:8333 # AS3301
81.245.96.36:8333 # AS5432
82.1.68.54:8333 # AS5089
82.66.10.11:8333 # AS12322
82.66.211.31:8333 # AS12322
82.71.4.154:8333 # AS13037
82.96.96.40:8333 # AS29686
82.116.50.101:8333 # AS30936
82.136.98.249:8333 # AS8821
82.195.237.253:8333 # AS1836
83.137.41.10:8333 # AS31394
83.171.175.5:8333 # AS8767
83.208.193.242:8333 # AS5610
83.233.76.165:8333 # AS29518
83.240.89.196:8333 # AS31246
84.38.3.249:8333 # AS196691
84.54.23.48:8333 # AS35913
84.126.216.77:8333 # AS12430
84.211.187.211:8333 # AS41164
84.246.200.122:8333 # AS42455
84.255.244.61:8333 # AS34779
85.165.42.115:8333 # AS2119
85.194.238.134:8333 # AS47605
85.208.69.21:8333 # AS25091
85.208.71.36:8333 # AS42275
85.209.240.91:8333 # AS205581
85.214.118.71:8333 # AS6724
85.214.161.252:8333 # AS6724
85.236.190.252:8333 # AS35032
85.243.115.136:8333 # AS8657
86.22.20.13:8333 # AS5089
86.49.34.92:8333 # AS16019
86.95.8.249:8333 # AS1136
86.104.228.10:8333 # AS31638
86.104.228.23:8333 # AS31638
87.79.94.221:8333 # AS8422
88.10.89.23:8333 # AS3352
88.84.223.30:8333 # AS21453
88.86.125.50:8333 # AS39392
88.90.77.100:8333 # AS2119
88.97.40.50:8333 # AS13037
88.137.109.62:8333 # AS15557
88.147.244.250:8333 # AS12389
88.208.115.70:8333 # AS29208
88.212.53.246:8333 # AS42841
89.35.142.168:8333 # AS34977
89.78.111.197:8333 # AS6830
89.117.59.129:8333 # AS1239
89.147.108.200:8333 # AS44735
89.163.132.180:8333 # AS24961
89.165.232.242:8333 # AS48161
89.216.21.96:8333 # AS31042
90.50.172.182:8333 # AS3215
90.146.130.214:8333 # AS12605
90.146.208.162:8333 # AS12605
90.156.26.148:8333 # AS12741
90.163.172.139:8333 # AS12479
90.177.163.77:8333 # AS5610
91.67.145.110:8333 # AS3209
91.93.194.154:8333 # AS34984
91.123.182.164:8333 # AS51648
91.123.183.219:8333 # AS51792
91.135.0.187:8333 # AS12496
91.147.232.98:8333 # AS5483
91.184.168.249:8333 # AS9063
91.193.237.116:8333 # AS42916
91.199.41.45:8333 # AS6866
91.204.149.5:8333 # AS42765
91.215.91.254:8333 # AS48078
91.219.25.232:8333 # AS50448
91.237.88.218:8333 # AS56813
92.27.150.46:8333 # AS13285
92.27.150.47:8333 # AS13285
92.221.20.232:8333 # AS29695
92.221.126.65:8333 # AS29695
93.33.192.204:8333 # AS12874
93.41.237.78:8333 # AS12874
93.95.88.13:8333 # AS35434
93.95.227.125:8333 # AS44735
93.103.13.1:8333 # AS34779
93.115.86.239:8333 # AS3223
93.123.180.164:8333 # AS35539
93.186.201.173:8333 # AS24961
93.190.117.26:8333 # AS196881
94.19.7.55:8333 # AS35807
94.23.21.80:8333 # AS16276
94.23.205.110:8333 # AS16276
94.131.0.73:8333 # AS29632
94.142.237.4:8333 # AS48926
94.154.159.99:8333 # AS62240
94.202.50.200:8333 # AS15802
94.231.253.18:8333 # AS35224
95.42.140.142:8333 # AS8866
95.67.18.100:8333 # AS34867
95.70.238.176:8333 # AS12735
95.83.73.31:8333 # AS8359
95.90.128.3:8333 # AS204028
95.110.234.93:8333 # AS31034
95.161.12.45:8333 # AS39598
95.172.62.167:8333 # AS201826
95.179.128.87:8333 # AS20473
95.191.130.100:8333 # AS12389
95.214.53.154:8333 # AS201814
96.3.53.254:8333 # AS11232
97.75.145.12:8333 # AS22709
97.81.198.180:8333 # AS20115
97.87.216.110:8333 # AS20115
99.229.210.111:8333 # AS812
99.246.87.2:8333 # AS812
101.43.124.195:8333 # AS45090
102.132.192.141:8333 # AS37680
103.21.3.89:8333 # AS38195
103.35.121.72:8333 # AS9498
103.99.168.100:8333 # AS6939
103.99.168.140:8333 # AS6939
103.99.170.210:8333 # AS54415
103.99.170.220:8333 # AS54415
103.105.202.50:8333 # AS137764
104.238.220.199:8333 # AS23470
104.243.33.165:8333 # AS23470
104.244.73.6:8333 # AS53667
108.26.125.214:8333 # AS701
109.86.60.33:8333 # AS13188
109.99.63.159:8333 # AS9050
109.120.194.136:8333 # AS34569
109.123.233.138:8333 # AS15685
109.123.240.53:8333 # AS15685
109.153.94.35:8333 # AS2856
109.173.126.157:8333 # AS42610
109.193.76.200:8333 # AS51185
109.221.229.197:8333 # AS3215
109.236.90.117:8333 # AS49981
109.248.206.13:8333 # AS203493
111.90.140.23:8333 # AS45839
111.90.140.46:8333 # AS45839
111.90.145.37:8333 # AS18106
114.173.159.209:8333 # AS4713
116.58.171.67:8333 # AS2514
119.31.179.202:8333 # AS17408
119.42.55.203:8333 # AS133159
122.222.160.190:8333 # AS2519
123.60.213.192:8333 # AS55990
124.197.54.113:8333 # AS9790
125.168.140.108:8333 # AS4826
128.0.190.26:8333 # AS30764
128.65.194.136:8333 # AS29222
129.13.189.212:8333 # AS34878
129.13.189.215:8333 # AS34878
129.226.216.148:8333 # AS132203
131.188.40.191:8333 # AS680
134.65.9.63:8333 # AS19653
134.122.200.160:8333 # AS64050
134.195.185.52:8333 # AS13536
135.19.253.101:8333 # AS5769
136.29.109.58:8333 # AS19165
136.32.238.6:8333 # AS16591
136.49.201.24:8333 # AS16591
137.226.34.46:8333 # AS680
138.207.211.189:8333 # AS11776
139.130.41.82:8333 # AS1221
140.238.220.99:8333 # AS31898
142.54.181.218:8333 # AS32097
142.166.19.23:8333 # AS855
142.254.87.115:8333 # AS46375
143.177.229.149:8333 # AS50266
144.2.101.21:8333 # AS3303
144.24.236.64:8333 # AS31898
145.40.51.52:8333 # AS49808
146.71.69.103:8333 # AS7782
146.120.241.173:8333 # AS208515
147.50.238.53:8333 # AS45265
148.103.101.132:8333 # AS28118
149.75.48.92:8333 # AS6079
152.44.137.83:8333 # AS11404
154.0.3.194:8333 # AS37680
154.26.137.105:8333 # AS174
154.26.154.73:8333 # AS1299
154.57.5.11:8333 # AS200736
155.4.55.21:8333 # AS8473
156.146.137.142:8333 # AS1448
156.146.177.221:8333 # AS1448
157.22.72.175:8333 # AS397379
157.97.0.118:8333 # AS43571
158.140.141.69:8333 # AS132132
158.181.132.84:8333 # AS41750
159.2.215.98:8333 # AS855
159.196.3.239:8333 # AS4764
159.224.189.250:8333 # AS13188
160.80.12.16:8333 # AS137
161.230.38.160:8333 # AS12353
161.246.11.230:8333 # AS9486
162.0.210.152:8333 # AS22612
162.62.18.226:8333 # AS132203
162.254.118.20:8333 # AS6130
163.158.168.181:8333 # AS15435
165.173.19.33:8333 # AS132132
165.228.174.117:8333 # AS1221
165.255.241.184:8333 # AS327693
167.88.11.203:8333 # AS20278
167.179.147.155:8333 # AS4764
170.17.151.235:8333 # AS3303
170.64.174.230:8333 # AS15108
172.92.102.115:8333 # AS11404
172.105.21.216:8333 # AS63949
172.111.176.244:8333 # AS46562
172.255.98.108:8333 # AS7979
173.82.5.202:8333 # AS35916
173.181.35.50:8333 # AS395570
173.212.253.137:8333 # AS51167
173.235.73.87:8333 # AS11272
174.30.29.85:8333 # AS209
174.141.209.40:8333 # AS6461
176.9.17.121:8333 # AS24940
176.12.16.135:8333 # AS8717
176.74.136.237:8333 # AS35613
176.74.139.120:8333 # AS35613
176.122.122.134:8333 # AS50581
176.126.167.10:8333 # AS8449
176.151.244.130:8333 # AS5410
176.186.19.106:8333 # AS5410
176.212.185.153:8333 # AS9049
177.142.146.193:8333 # AS4230
178.21.118.178:8333 # AS49544
178.61.141.198:8333 # AS21050
178.124.162.209:8333 # AS6697
178.143.25.194:8333 # AS15962
178.154.233.197:8333 # AS200350
178.159.98.133:8333 # AS202390
178.232.186.191:8333 # AS41164
178.236.137.63:8333 # AS44843
179.60.149.4:8333 # AS395839
184.160.110.104:8333 # AS5769
184.174.37.139:8333 # AS1239
185.8.104.179:8333 # AS16125
185.14.30.25:8333 # AS21100
185.25.48.184:8333 # AS61272
185.52.93.45:8333 # AS39449
185.64.116.15:8333 # AS31736
185.69.105.117:8333 # AS6855
185.98.54.20:8333 # AS39572
185.107.83.55:8333 # AS43350
185.132.109.122:8333 # AS38919
185.135.81.50:8333 # AS57494
185.140.253.169:8333 # AS200735
185.148.3.227:8333 # AS47605
185.154.2.3:8333 # AS29119
185.162.92.36:8333 # AS41722
185.163.44.36:8333 # AS39798
185.165.170.19:8333 # AS3223
185.167.113.59:8333 # AS207054
185.185.59.12:8333 # AS48614
185.203.41.148:8333 # AS9009
185.209.12.76:8333 # AS212323
185.209.70.17:8333 # AS204568
185.210.125.33:8333 # AS205671
185.233.189.210:8333 # AS61303
185.238.131.19:8333 # AS206238
185.239.220.210:8333 # AS61282
185.239.221.5:8333 # AS61282
185.250.90.246:8333 # AS61955
186.249.217.25:8333 # AS7195
186.250.95.132:8333 # AS262967
188.35.167.14:8333 # AS34123
188.68.53.44:8333 # AS47147
188.120.255.115:8333 # AS29182
189.6.195.111:8333 # AS28573
190.2.130.44:8333 # AS49981
190.13.122.89:8333 # AS33576
190.123.27.11:8333 # AS52468
190.145.127.254:8333 # AS14080
191.220.156.64:8333 # AS8167
192.31.136.90:8333 # AS54098
192.69.53.43:8333 # AS11142
192.146.137.44:8333 # AS25376
192.174.121.33:8333 # AS11492
192.222.147.175:8333 # AS1403
193.198.34.24:8333 # AS2108
193.222.130.14:8333 # AS29208
194.35.185.167:8333 # AS9063
194.54.83.234:8333 # AS41018
194.233.84.100:8333 # AS141995
195.2.73.88:8333 # AS48282
195.48.12.8:8333 # AS1836
195.154.200.157:8333 # AS12876
197.211.133.15:8333 # AS51265
198.84.146.8:8333 # AS5645
198.98.55.86:8333 # AS53667
199.247.7.208:8333 # AS20473
200.116.154.131:8333 # AS13489
201.191.6.103:8333 # AS11830
201.221.234.200:8333 # AS27928
202.47.225.242:8333 # AS9931
202.107.219.130:8333 # AS4134
202.108.211.135:8333 # AS4837
202.138.13.122:8333 # AS4826
203.86.195.32:8333 # AS23655
203.184.52.247:8333 # AS9790
204.111.163.114:8333 # AS4922
205.178.41.124:8333 # AS11039
206.192.203.0:8333 # AS7029
207.229.46.80:8333 # AS852
207.244.248.81:8333 # AS40021
207.255.193.47:8333 # AS11776
208.59.133.63:8333 # AS11039
209.58.145.157:8333 # AS394380
209.97.189.249:8333 # AS14061
209.177.138.245:8333 # AS7832
209.237.133.54:8333 # AS53859
210.54.37.190:8333 # AS4648
210.54.39.238:8333 # AS4648
212.34.225.118:8333 # AS44395
212.41.9.30:8333 # AS49505
212.51.132.176:8333 # AS13030
212.69.60.77:8333 # AS12496
212.86.32.106:8333 # AS15366
213.47.64.105:8333 # AS8412
213.141.154.201:8333 # AS12714
213.142.148.169:8333 # AS6762
213.184.244.24:8333 # AS60280
213.227.147.244:8333 # AS60781
213.250.21.112:8333 # AS5603
216.146.251.8:8333 # AS54579
216.232.157.104:8333 # AS395570
217.15.178.11:8333 # AS25534
217.26.32.10:8333 # AS197312
217.64.47.200:8333 # AS39324
217.76.51.25:8333 # AS39597
217.92.55.246:8333 # AS3320
217.170.124.170:8333 # AS35401
217.180.221.162:8333 # AS30600
217.180.238.137:8333 # AS30600
220.84.232.46:8333 # AS4766
220.133.39.61:8333 # AS3462
220.233.91.182:8333 # AS38195
[2001:19f0:1000:1db3:5400:4ff:fe56:5a8d]:8333 # AS20473
[2001:19f0:5:24da:3eec:efff:feb9:f36e]:8333 # AS20473
[2001:19f0:5:24da::]:8333 # AS20473
[2001:19f0:5:4535:3eec:efff:feb9:87e4]:8333 # AS20473
[2001:19f0:5:4535::]:8333 # AS20473
[2001:1bc0:c1::2000]:8333 # AS29686
[2001:1c04:4008:6300:8a5f:2678:114b:a660]:8333 # AS6830
[2001:41d0:203:3739::]:8333 # AS16276
[2001:41d0:203:8f49::]:8333 # AS16276
[2001:41d0:203:bb0a::]:8333 # AS16276
[2001:41d0:2:bf8f::]:8333 # AS16276
[2001:41d0:303:de8b::]:8333 # AS16276
[2001:41d0:403:3d61::]:8333 # AS16276
[2001:41d0:405:9600::]:8333 # AS16276
[2001:41d0:8:ed7f::1]:8333 # AS16276
[2001:41d0:a:69a2::1]:8333 # AS16276
[2001:41f0::62:6974:636f:696e]:8333 # AS6830
[2001:470:1b62::]:8333 # AS6939
[2001:470:1f05:43b:2831:8530:7179:5864]:8333 # AS6939
[2001:470:1f09:b14::11]:8333 # AS6939
[2001:470:1f15:106:e2d5:5eff:fe42:7ae5]:8333 # AS6939
[2001:470:1f1b:365:aa20:66ff:fe3f:1909]:8333 # AS6939
[2001:470:1f1b:5a6:216:3eff:fe24:1162]:8333 # AS6939
[2001:470:6a7c::]:8333 # AS6939
[2001:470:75e9:1::10]:8333 # AS6939
[2001:470:8ca0:2:4e72:b9ff:fe56:f8b8]:8333 # AS6939
[2001:470:dbc7:0:1010::100]:8333 # AS6939
[2001:4ba0:cafe:14cc::1]:8333 # AS24961
[2001:4ba0:ffff:24::1]:8333 # AS24961
[2001:4dd0:3564:0:30b7:1d7b:6fec:4c5c]:8333 # AS8422
[2001:4dd0:3564:0:88e:b4ff:2ad0:699b]:8333 # AS8422
[2001:4dd0:3564:0:9c1c:cc31:9fe8:5505]:8333 # AS8422
[2001:4dd0:3564:0:a0c4:d41f:4c4:1bb0]:8333 # AS8422
[2001:4dd0:3564:1::7676:8090]:8333 # AS8422
[2001:4dd0:3564:1:b977:bd71:4612:8e40]:8333 # AS8422
[2001:4dd0:af0e:3564::69:1]:8333 # AS8422
[2001:4dd0:af0e:3564::69:90]:8333 # AS8422
[2001:560:441f:1::4]:8333 # AS18530
[2001:638:a000:4140::ffff:191]:8333 # AS680
[2001:67c:25dc:91::2]:8333 # AS41018
[2001:67c:26b4:ff00::44]:8333 # AS25376
[2001:67c:2db8:6::36]:8333 # AS39798
[2001:7c0:2310:0:f816:3eff:fe6c:4f58]:8333 # AS34878
[2001:861:3242:8420::40]:8333 # AS5410
[2001:8b0:1301:1000::60]:8333 # AS20712
[2001:b030:2422::208d]:8333 # AS3462
[2001:b07:2ef:6e4a:3d:974e:784a:684b]:8333 # AS12874
[2001:b07:5d32:b142:8f77:3c7d:a2fd:ed2e]:8333 # AS12874
[2001:b07:6461:7811:489:d2da:e07:1af7]:8333 # AS12874
[2001:b07:646b:8074:32e8:9243:a337:e60a]:8333 # AS12874
[2001:b07:646b:8074:4cc6:79a5:3af7:7132]:8333 # AS12874
[2001:b07:ad4:ca4b:7dd5:8471:50c3:5363]:8333 # AS12874
[2001:bc8:1201:71a:2e59:e5ff:fe42:52f4]:8333 # AS12876
[2001:bc8:1600:0:208:a2ff:fe0c:8a2e]:8333 # AS12876
[2001:bc8:323c:ff:a634:384f:1849:f4bc]:8333 # AS12876
[2001:bc8:323c:ff:d217:c2ff:fe07:2cd9]:8333 # AS12876
[2001:bc8:700:2b14::1]:8333 # AS12876
[2001:bc8:700:8d16::1]:8333 # AS12876
[2001:e68:5400:58d0:bd15:ea8c:5b20:7523]:8333 # AS4788
[2400:2411:a3e1:4900:7298:f550:67e7:b99b]:8333 # AS17676
[2400:8901::f03c:93ff:fe2b:5c0b]:8333 # AS63949
[2400:8901::f03c:93ff:fe5a:685c]:8333 # AS63949
[2401:b140:1::100:210]:8333 # AS54415
[2401:b140:1::100:220]:8333 # AS54415
[2401:d002:3902:700:d72c:5e22:4e95:389d]:8333 # AS38195
[2404:4408:63a4:a01::250]:8333 # AS9790
[2406:3400:216:8b00:211:32ff:feca:336b]:8333 # AS10143
[2406:8c00:0:3422:133:18:228:108]:8333 # AS24282
[2406:da11:169:b03:32b5:f901:9f7c:3e4b]:8333 # AS16509
[2406:da18:9f1:f301:7d2e:c256:c112:f2be]:8333 # AS16509
[2406:da18:9f1:f303:c1c9:c569:b799:2057]:8333 # AS16509
[2406:da1e:a4e:8a00:20db:dd8d:3670:28f0]:8333 # AS16509
[2406:da1e:a4e:8a03:2aad:496b:768d:e497]:8333 # AS16509
[2407:3640:2107:1278::1]:8333 # AS141995
[2407:3640:3010:4012::1]:8333 # AS141995
[2407:8800:bc61:2202:d63d:7eff:fe6c:dc36]:8333 # AS7545
[2600:1700:5c5b:b0:aaa1:59ff:fe5f:615a]:8333 # AS7018
[2600:1700:ec7b:5730::48]:8333 # AS7018
[2600:1900:4000:4cc4:0:1::]:8333 # AS15169
[2600:1900:4000:4cc4:0:2::]:8333 # AS15169
[2600:1900:4000:4cc4:0:3::]:8333 # AS15169
[2600:1900:4000:4cc4::]:8333 # AS15169
[2600:1900:4030:a25e::]:8333 # AS15169
[2600:1f14:40e:e301:afdd:ad00:e568:d220]:8333 # AS16509
[2600:1f1c:2d3:2400:f15e:2f2a:760d:a33d]:8333 # AS16509
[2600:2104:1003:c5ab:dc5e:90ff:fe18:1d08]:8333 # AS11404
[2600:3c00::f03c:92ff:fe92:2745]:8333 # AS63949
[2600:3c00::f03c:92ff:fecf:61b6]:8333 # AS63949
[2600:3c00:e002:2e32::1:14]:8333 # AS63949
[2600:3c01::f03c:93ff:fe2a:5266]:8333 # AS63949
[2600:3c01::f03c:93ff:fe74:5f59]:8333 # AS63949
[2600:3c01::f03c:93ff:fee6:2146]:8333 # AS63949
[2600:3c02::f03c:92ff:fe5d:9fb]:8333 # AS63949
[2600:4040:2004:3201:459f:8fe8:444d:baf1]:8333 # AS13786
[2600:4040:4541:4900:4e1:b58a:8438:450e]:8333 # AS13786
[2600:6c54:7100:1ad1:c92e:36d:651:bd18]:8333 # AS20115
[2600:8801:2f80:477::141c]:8333 # AS22773
[2600:8801:8d00:3eb0:20c:29ff:fec3:d799]:8333 # AS22773
[2600:8805:2400:14e:12dd:b1ff:fef2:3013]:8333 # AS22773
[2601:184:300:156c:ba4c:30:9da:6c06]:8333 # AS7922
[2601:346:d7f:fff7:18c6:4856:ef75:744c]:8333 # AS7922
[2601:405:4a00:876:c8d3:f081:2ce8:ba8e]:8333 # AS7922
[2602:24c:b8f:cd90::7840]:8333 # AS46375
[2602:fec3:0:1::69]:8333 # AS62563
[2602:ff16:1:0:1:412:0:1]:8333 # AS29802
[2603:3001:2618:c000:2ec1:df1f:a463:9119]:8333 # AS7922
[2603:3003:11b:e100:20c:29ff:fe38:bbc0]:8333 # AS7922
[2603:3004:6a1:3800:851f:584d:7aba:affb]:8333 # AS7922
[2603:3004:6a1:3800::7bba]:8333 # AS7922
[2603:3004:6a1:3800::f667]:8333 # AS7922
[2603:3024:1606:1400::29ec]:8333 # AS7922
[2603:3024:18ee:8000:20e:c4ff:fed1:ef15]:8333 # AS7922
[2603:6000:a400:9300::2000]:8333 # AS7843
[2603:6010:7001:4830::2:1]:8333 # AS7843
[2603:8080:1f07:6fdd:7de2:d969:78c9:b7ea]:8333 # AS7843
[2603:8080:d600:1800:7ce1:74a2:6a8a:4643]:8333 # AS7843
[2603:8081:6c00:306e:215:5dff:fe02:150a]:8333 # AS7843
[2604:3d09:7182:8700:bba9:cde6:5b37:a8df]:8333 # AS6327
[2604:4080:1036:80b1::3be]:8333 # AS11404
[2604:a00:3:1223:216:3eff:fe27:76e0]:8333 # AS19318
[2604:a880:400:d0::261f:6001]:8333 # AS14061
[2604:a880:4:1d0::13e:f000]:8333 # AS14061
[2604:a880:4:1d0::17a:7000]:8333 # AS14061
[2604:a880:4:1d0::c1:3000]:8333 # AS14061
[2604:a880:4:1d0::e5:b000]:8333 # AS14061
[2605:4a80:a302:7940:7254:1ed4:90d7:4f39]:8333 # AS11232
[2605:4a80:a302:7940::2]:8333 # AS11232
[2605:6400:30:f220::]:8333 # AS53667
[2605:a140:3010:4014::1]:8333 # AS40021
[2605:ae00:203::203]:8333 # AS7819
[2605:b40:14d0:5b00:7988:eb8:6bb6:66e2]:8333 # AS174
[2605:c000:2a0a:1::102]:8333 # AS7393
[2607:5300:61:854::1]:8333 # AS16276
[2607:9280:b:73b:250:56ff:fe14:25b5]:8333 # AS395502
[2607:9280:b:73b:250:56ff:fe21:9c2f]:8333 # AS395502
[2607:9280:b:73b:250:56ff:fe21:bf32]:8333 # AS395502
[2607:9280:b:73b:250:56ff:fe33:4d1b]:8333 # AS395502
[2607:9280:b:73b:250:56ff:fe3d:401]:8333 # AS395502
[2620:6e:a000:1:42:42:42:42]:8333 # AS397444
[2620:a6:2000:1:1:0:5:1601]:8333 # AS27566
[2620:a6:2000:1:2:0:9:900b]:8333 # AS27566
[2620:a6:2000:1:2:0:b:300e]:8333 # AS27566
[2800:150:11d:d2f:bdac:7807:2f5:4aa0]:8333 # AS22047
[2803:9800:a007:82ba:650b:82b8:8377:d0]:8333 # AS19037
[2804:14c:155:45e0:1e86:15a3:efd9:7287]:8333 # AS28573
[2804:14c:657d:4030:28b4:eff:fe9b:8894]:8333 # AS28573
[2804:14d:1087:9434::1002]:8333 # AS4230
[2804:954:24:2:b390:d83b:358a:db53]:8333 # AS263073
[2804:d57:554d:de00:3e7c:3fff:fe7b:80aa]:8333 # AS8167
[2a00:1028:838c:563a:fd25:87b6:5a54:811]:8333 # AS5610
[2a00:1298:8001::6542]:8333 # AS5578
[2a00:1398:4:2a03:215:5dff:fed6:1033]:8333 # AS34878
[2a00:1398:4:2a03::bc03]:8333 # AS34878
[2a00:1768:2001:27::ef6a]:8333 # AS43350
[2a00:1f40:5001:108:5d17:7703:b0f5:4133]:8333 # AS42864
[2a00:23c5:fe80:7301:d6ae:52ff:fed5:56a5]:8333 # AS2856
[2a00:6020:13dc:bc00:5559:258:27d:b52b]:8333 # AS60294
[2a00:6020:4503:3700:20c:29ff:fe61:4a4c]:8333 # AS60294
[2a00:6020:b434:eb00:dea6:32ff:fe0d:a5c0]:8333 # AS60294
[2a00:6020:b489:2000:5054:ff:fefc:5ed8]:8333 # AS60294
[2a00:7c80:0:10c::2]:8333 # AS49981
[2a00:7c80:0:25::e37a]:8333 # AS49981
[2a00:8a60:e012:a00::21]:8333 # AS680
[2a00:bbe0:cc:0:5a11:22ff:feb4:8f5c]:8333 # AS47605
[2a00:bbe0:cc:0:62a4:4cff:fe23:7510]:8333 # AS47605
[2a00:ca8:a15:9a5b:8b42:a886:7d48:7a21]:8333 # AS30764
[2a00:ca8:a1f:f9b7:cb55:5766:524b:acaa]:8333 # AS30764
[2a00:d4e0:ff:fc02:5e55:4a7c:b83b:e5a1]:8333 # AS15600
[2a00:d520:9:9300:420b:544e:8019:6d3a]:8333 # AS15600
[2a00:d880:5:c2::d329]:8333 # AS198203
[2a00:ee2:1200:1900:8d3:d2ff:feb1:bc58]:8333 # AS5603
[2a01:4f8:173:230a::2]:8333 # AS24940
[2a01:4f8:200:7222::2]:8333 # AS24940
[2a01:4f8:202:3e6::2]:8333 # AS24940
[2a01:4f8:221:44d7::2]:8333 # AS24940
[2a01:4f8:231:915::2]:8333 # AS24940
[2a01:4f8:261:2bcd::2]:8333 # AS24940
[2a01:4f8:261:3cae::2]:8333 # AS24940
[2a01:4f8:261:420c::2]:8333 # AS24940
[2a01:4f9:2b:29a::2]:8333 # AS24940
[2a01:4f9:3a:2dd2::2]:8333 # AS24940
[2a01:7a7:2:2804:ae1f:6bff:fe9d:6c94]:8333 # AS20773
[2a01:7c8:aac2:180:5054:ff:fe56:8d10]:8333 # AS20857
[2a01:7c8:aac9:c9:5054:ff:fedf:ff95]:8333 # AS20857
[2a01:7e01::f03c:93ff:fe49:2f5b]:8333 # AS63949
[2a01:8740:1:753::e5cb]:8333 # AS57344
[2a01:8740:1:ffc5::8c6a]:8333 # AS57344
[2a01:cb00:b63:c000:227:eff:fe28:c565]:8333 # AS3215
[2a01:cb19:688:e900:aa60:b6ff:fe29:bbae]:8333 # AS3215
[2a01:e0a:163:c0b0:9da5:1690:a12b:bede]:8333 # AS12322
[2a01:e0a:282:67b0:b4f4:aaff:fe7c:44a6]:8333 # AS12322
[2a01:e0a:301:7010:b87d:e14b:cea9:b998]:8333 # AS12322
[2a01:e0a:320:39a0:325a:3aff:fe02:3180]:8333 # AS12322
[2a01:e0a:351:9fb0:6bf2:95d6:b7bd:b846]:8333 # AS12322
[2a01:e0a:5fa:a0a0:ca1f:66ff:fece:b8a2]:8333 # AS12322
[2a01:e0a:83d:dd30:3676:5d8e:8a6f:115a]:8333 # AS12322
[2a01:e0a:9e9:c240:7b44:f32a:6ec0:a8af]:8333 # AS12322
[2a01:e0a:b5:7f50:c257:a55b:4846:97e1]:8333 # AS12322
[2a01:e11:100c:70:cbc8:9e31:4b77:1626]:8333 # AS12322
[2a02:1210:2cdf:4600:2bc:e03e:43e8:4718]:8333 # AS3303
[2a02:1210:86bf:f100:a9ac:d041:1f8e:6925]:8333 # AS3303
[2a02:1210:94c3:3400:d8c3:743c:90f6:a48a]:8333 # AS3303
[2a02:168:2000:96::12]:8333 # AS13030
[2a02:168:420b:a::20]:8333 # AS13030
[2a02:168:676e:0:e65f:1ff:fe09:3591]:8333 # AS13030
[2a02:1748:f39f:5872:216:3eff:fe21:266]:8333 # AS51184
[2a02:180:1:1::517:10b6]:8333 # AS35366
[2a02:2780:9000:70::7]:8333 # AS35434
[2a02:2780:9000:70::f]:8333 # AS35434
[2a02:2780::e01a]:8333 # AS35434
[2a02:2f05:6008:ce00::1]:8333 # AS48571
[2a02:390:9000:0:aaa1:59ff:fe43:b57b]:8333 # AS12496
[2a02:578:85ce:1600:1e1b:dff:fee3:774b]:8333 # AS9031
[2a02:768:f92b:db46:5e46:772b:71d:29b7]:8333 # AS44489
[2a02:7a01::91:228:45:130]:8333 # AS16019
[2a02:7b40:50d0:e386::1]:8333 # AS62282
[2a02:7b40:50d1:e35b::1]:8333 # AS62282
[2a02:7b40:5928:89::1]:8333 # AS62282
[2a02:7b40:b945:344d::1]:8333 # AS62282
[2a02:7b40:d418:6d9a::1]:8333 # AS62282
[2a02:8070:b84:6ae0:f9c6:fbb9:1c41:81aa]:8333 # AS51185
[2a02:8070:f186:38e0::d5a6]:8333 # AS51185
[2a02:8084:103:6810:1e69:7aff:fea2:1acc]:8333 # AS6830
[2a02:8308:8081:f300:3b8:7ec0:2837:1b57]:8333 # AS16019
[2a02:8388:e302:7980:6f85:a0b3:4b4d:8b0f]:8333 # AS8412
[2a02:8388:e5c3:4a80:201:2eff:fe82:b3cc]:8333 # AS8412
[2a02:842a:1df:8a01:1e1b:dff:fe0b:236d]:8333 # AS15557
[2a02:a210:28be:5f80::111]:8333 # AS6830
[2a02:a44b:5cf9:1:b62e:99ff:fe49:d492]:8333 # AS1136
[2a02:a44d:14d6:1:2c0:8ff:fe8f:b3b2]:8333 # AS1136
[2a02:a45a:94cd:f00d::1]:8333 # AS1136
[2a02:a45f:3b9d:31::199]:8333 # AS1136
[2a02:a464:3d6b::1:2]:8333 # AS1136
[2a02:a46c:7f8e:1:35bf:3aeb:137c:1d35]:8333 # AS1136
[2a02:a46d:36f:1:20d:b9ff:fe4e:6398]:8333 # AS1136
[2a02:c205:2021:4216::1]:8333 # AS51167
[2a02:c206:2044:9826::1]:8333 # AS51167
[2a02:c206:2075:3351::1]:8333 # AS51167
[2a02:c207:0:3829::1]:8333 # AS51167
[2a02:c207:2014:4199::1]:8333 # AS51167
[2a02:c207:2014:8757::1]:8333 # AS51167
[2a02:c207:2026:6682::1]:8333 # AS51167
[2a02:c207:2034:7358::1]:8333 # AS51167
[2a02:c207:3002:7468::1]:8333 # AS51167
[2a02:c207:3008:4592::1]:8333 # AS51167
[2a02:cb43:4000::178]:8333 # AS33891
[2a02:e5e:1:10::27]:8333 # AS25057
[2a02:e98:20:1504::1]:8333 # AS24641
[2a03:4000:28:68:7411:53ff:fe4c:21d]:8333 # AS47147
[2a03:4000:65:fdc:3462:66ff:fe05:ec5c]:8333 # AS47147
[2a03:6000:870:0:46:23:87:218]:8333 # AS51088
[2a03:94e0:ffff:185:243:218:0:19]:8333 # AS56655
[2a03:b0c0:1:e0::397:6001]:8333 # AS14061
[2a03:b0c0:1:e0::794:9001]:8333 # AS14061
[2a03:b0c0:2:f0::288:c001]:8333 # AS14061
[2a03:b0c0:2:f0::30c:1]:8333 # AS14061
[2a03:b0c0:3:d0::e3b:5001]:8333 # AS14061
[2a03:cfc0:8000:7::5fd6:3557]:8333 # AS201814
[2a04:2180:dc05:2::3b]:8333 # AS61272
[2a04:2180:ffff:fffe::d]:8333 # AS61272
[2a04:52c0:103:c455::1]:8333 # AS60404
[2a04:bc40:1dc3:8d::2:1001]:8333 # AS35277
[2a05:3580:dc0b:1600:def4:5a62:de42:324a]:8333 # AS20764
[2a05:d014:a55:4000:8dde:69f:4ac7:b26]:8333 # AS16509
[2a05:d016:98f:5201:6be0:a4de:80c7:32d5]:8333 # AS16509
[2a05:d018:a75:6c03:75b:2c73:8caa:414b]:8333 # AS16509
[2a05:f480:1800:697:5400:2ff:feb6:c36d]:8333 # AS20473
[2a06:e040:7603:2918:c6ef:464e:9fe5:73ec]:8333 # AS198507
[2a07:abc4::89:234:180:194]:8333 # AS62000
[2a07:d884::127e]:8333 # AS6762
[2a09:2681:1010:10::5]:8333 # AS61282
[2a09:2681:102::210]:8333 # AS61282
[2a0b:f300:2:6::2]:8333 # AS62240
[2a0d:8340:24::2]:8333 # AS50113
[2a0e:8f02:21d1:144::101]:8333 # AS20473
[2a0e:b780::55d1:f05b]:8333 # AS205581
[2a10:3781:2c19::1]:8333 # AS206238
[2a10:d200:1:33:a6bf:1ff:fe6a:46a9]:8333 # AS212323
[2a12:8e40:5668:e40a::1]:8333 # AS34465
[2a12:8e40:5668:e40b::1]:8333 # AS34465
[2a12:8e40:5668:e40c::1]:8333 # AS34465
[2a12:8e40:5668:e40d::1]:8333 # AS34465
[2a12:8e40:5668:e40e::1]:8333 # AS34465
[2a12:8e40:5668:e40f::1]:8333 # AS34465
[2a12:8e40:5668:e410::1]:8333 # AS34465
[2a12:8e40:5668:e411::1]:8333 # AS34465
[2a12:8e40:5668:e412::1]:8333 # AS34465
[2a12:8e40:5668:e417::1]:8333 # AS34465
[2c0f:f8f0:da51:0:3a45:fc57:5e30:2593]:8333 # AS30844
# manually updated 2023-04 for minimal torv3 bootstrap support
2bqghnldu6mcug4pikzprwhtjjnsyederctvci6klcwzepnjd46ikjyd.onion:8333
4lr3w2iyyl5u5l6tosizclykf5v3smqroqdn2i4h3kq6pfbbjb2xytad.onion:8333
5g72ppm3krkorsfopcm2bi7wlv4ohhs4u4mlseymasn7g7zhdcyjpfid.onion:8333
5sbmcl4m5api5tqafi4gcckrn3y52sz5mskxf3t6iw4bp7erwiptrgqd.onion:8333
776aegl7tfhg6oiqqy76jnwrwbvcytsx2qegcgh2mjqujll4376ohlid.onion:8333
77mdte42srl42shdh2mhtjr7nf7dmedqrw6bkcdekhdvmnld6ojyyiad.onion:8333
azbpsh4arqlm6442wfimy7qr65bmha2zhgjg7wbaji6vvaug53hur2qd.onion:8333
b64xcbleqmwgq2u46bh4hegnlrzzvxntyzbmucn3zt7cssm7y4ubv3id.onion:8333
bsqbtcparrfihlwolt4xgjbf4cgqckvrvsfyvy6vhiqrnh4w6ghixoid.onion:8333
bsqbtctulf2g4jtjsdfgl2ed7qs6zz5wqx27qnyiik7laockryvszqqd.onion:8333
cwi3ekrwhig47dhhzfenr5hbvckj7fzaojygvazi2lucsenwbzwoyiqd.onion:8333
devinbtcmwkuitvxl3tfi5of4zau46ymeannkjv6fpnylkgf3q5fa3id.onion:8333
devinbtctu7uctl7hly2juu3thbgeivfnvw3ckj3phy6nyvpnx66yeyd.onion:8333
devinbtcyk643iruzfpaxw3on2jket7rbjmwygm42dmdyub3ietrbmid.onion:8333
dtql5vci4iaml4anmueftqr7bfgzqlauzfy4rc2tfgulldd3ekyijjyd.onion:8333
emzybtc25oddoa2prol2znpz2axnrg6k77xwgirmhv7igoiucddsxiad.onion:8333
emzybtc3ewh7zihpkdvuwlgxrhzcxy2p5fvjggp7ngjbxcytxvt4rjid.onion:8333
emzybtc454ewbviqnmgtgx3rgublsgkk23r4onbhidcv36wremue4kqd.onion:8333
emzybtc5bnpb2o6gh54oquiox54o4r7yn4a2wiiwzrjonlouaibm2zid.onion:8333
fpz6r5ppsakkwypjcglz6gcnwt7ytfhxskkfhzu62tnylcknh3eq6pad.onion:8333
hanvo3hzqbhcqm5vahhi5a3czxxdwc7vt56p5gr7bifcvelaqurv6iid.onion:8333
hz7oqntvj4adrwtqappcgaxfribg5u4rvfkpwlo3xup5fcuyvylkxlqd.onion:8333
ityrxhidvjnjnf6imzyuqqnkkwridjnebkbokx25so3suq3fzezmksid.onion:8333
jto2jfbsxhb6yvhcrrjddrgbakte6tgsy3c3z3prss64gndgvovvosyd.onion:8333
k7nb3r7hxi5exvr4xmvnilhfw6hei7sw4rwz2t6onh4py6wbora6tuyd.onion:8333
kpgvmscirrdqpekbqjsvw5teanhatztpp2gl6eee4zkowvwfxwenqaid.onion:8333
l7kw3vjs4cf5mnuejjgqcxrw6wwsjmabllq3h3amy4f5q33d6cgo2kyd.onion:8333
m7cbpjolo662uel7rpaid46as2otcj44vvwg3gccodnvaeuwbm3anbyd.onion:8333
mowb2qwpjgs2a6q3yj3xa7nxklfssul4w7ynonyycw3uyopfu3x6ujad.onion:8333
mwmfluek4au6mxxpw6fy7sjhkm65bdfc7izc7lpz3trewfdghyrzsbid.onion:8333
rfqmn3qe36uaptkxhdvi74p4hyrzhir6vhmzb2hqryxodig4gue2zbyd.onion:8333
rsgwtnousfc7zyg4qsm3gvczjx7cihh2njyjbjl3qvcj3xg7wmvhddqd.onion:8333
s2d52bbttuwcl3pdrwzhxpmhtxn3jg23havjqg5eygwhtiw6lgyelpqd.onion:8333
upvthy74hgvgbqi6w3zd2mlchoi5tvvw7b5hpmmhcddd5fnnwrixneid.onion:8333
who3qs4eqlqzoxhqqgan4mg54ua5uz3mk4lj33ag53ei4orvnznrjbad.onion:8333
wizbit5555bsslwv4ctronnsgk5vh2w2pdx7v7eyuivlyuoteejk7lid.onion:8333
yrmedr35tt4wqfnwgilltxh5bnukeukxjpgg3jzmmsyld5lgsn5amvyd.onion:8333
# manually updated 2023-04 for minimal i2p bootstrap support
255fhcp6ajvftnyo7bwz3an3t4a4brhopm3bamyh2iu5r3gnr2rq.b32.i2p:0
27yrtht5b5bzom2w5ajb27najuqvuydtzb7bavlak25wkufec5mq.b32.i2p:0
3gocb7wc4zvbmmebktet7gujccuux4ifk3kqilnxnj5wpdpqx2hq.b32.i2p:0
4fcc23wt3hyjk3csfzcdyjz5pcwg5dzhdqgma6bch2qyiakcbboa.b32.i2p:0
4osyqeknhx5qf3a73jeimexwclmt42cju6xdp7icja4ixxguu2hq.b32.i2p:0
4umsi4nlmgyp4rckosg4vegd2ysljvid47zu7pqsollkaszcbpqq.b32.i2p:0
6j2ezegd3e2e2x3o3pox335f5vxfthrrigkdrbgfbdjchm5h4awa.b32.i2p:0
6n36ljyr55szci5ygidmxqer64qr24f4qmnymnbvgehz7qinxnla.b32.i2p:0
72yjs6mvlby3ky6mgpvvlemmwq5pfcznrzd34jkhclgrishqdxva.b32.i2p:0
a5qsnv3maw77mlmmzlcglu6twje6ttctd3fhpbfwcbpmewx6fczq.b32.i2p:0
aovep2pco7v2k4rheofrgytbgk23eg22dczpsjqgqtxcqqvmxk6a.b32.i2p:0
bitcoi656nll5hu6u7ddzrmzysdtwtnzcnrjd4rfdqbeey7dmn5a.b32.i2p:0
brifkruhlkgrj65hffybrjrjqcgdgqs2r7siizb5b2232nruik3a.b32.i2p:0
c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:0
day3hgxyrtwjslt54sikevbhxxs4qzo7d6vi72ipmscqtq3qmijq.b32.i2p:0
du5kydummi23bjfp6bd7owsvrijgt7zhvxmz5h5f5spcioeoetwq.b32.i2p:0
e55k6wu46rzp4pg5pk5npgbr3zz45bc3ihtzu2xcye5vwnzdy7pq.b32.i2p:0
eciohu5nq7vsvwjjc52epskuk75d24iccgzmhbzrwonw6lx4gdva.b32.i2p:0
ejlnngarmhqvune74ko7kk55xtgbz5i5ncs4vmnvjpy3l7y63xaa.b32.i2p:0
fhzlp3xroabohnmjonu5iqazwhlbbwh5cpujvw2azcu3srqdceja.b32.i2p:0
fx6np3oheacr3t7gluftrqo2qxldbbatgw4hepp7ulb4j5ry57ca.b32.i2p:0
gehtac45oaghz54ypyopim64mql7oad2bqclla74l6tfeolzmodq.b32.i2p:0
hhfi4yqkg2twqiwezrfksftjjofbyx3ojkmlnfmcwntgnrjjhkya.b32.i2p:0
jz3s4eurm5vzjresf4mwo7oni4bk36daolwxh4iqtewakylgkxmq.b32.i2p:0
liu75cvktv4icbctg72w7nxbk4eibt7wamizfdii4omz7gcke5vq.b32.i2p:0
lrah7acdsgopybg43shadwwiv6igezaw64i6jb5muqdg7dmhj3la.b32.i2p:0
lzuu6mjtu7vd55d2biphicihufipoa7vyym6xfnkmmlra3tiziia.b32.i2p:0
m6bpynxkv2ktwxkg6p2gyudjfhdupb6kuzabeqdnckkdkf4kxjla.b32.i2p:0
m6v454xd6p3bt5swujgmveklsp7lzbkqlqqfc2p36cjlwv5dbucq.b32.i2p:0
mlgeizrroynuhpxbzeosajt5u4ddcvynxfmcbm6kwjpaufilxigq.b32.i2p:0
o6t4fr5ayfadzieutstgwcllvwxeuzjlxmzsmpj3hpkvefhzfaea.b32.i2p:0
ofubxr2ir7u2guzjwyrvujicivzmvinwa36nuzlrg7tnsmebal7a.b32.i2p:0
oz2ia3flpm3du2tyusulrn7h7e2eo3juzkrmn34bvnrlcrugv7ia.b32.i2p:0
pohfcrfc7prn4bvn4xstw6nt3e7hjmb7kuj4djtsfqsskwhmhnna.b32.i2p:0
qd6jlsevsexww3wefpqs7iglxb3f63y4e6ydulfzrvwflpicmdqa.b32.i2p:0
rfjkzdzv4cwpxo6hzuncicvuyui76wxqx3a23lynq72ktwqs7aja.b32.i2p:0
rizfinyses2r3or4iubs5wx66gdy6mpf73w7uobfacm2l5cral3q.b32.i2p:0
sedndhv5vpcgdmykyi5st4yqhdxl3hpdtglta4do435wupahhx6q.b32.i2p:0
tugq6wa2ls2bv27pr2iy3da3k5ow3fzefbcvjcr22uc7w5vmevja.b32.i2p:0
usztavbib756k5vqggzgkyswoj6mttihjvp3c2pa642t2mb4pvsa.b32.i2p:0
vgu6llqbyjphml25umd5ztvyxrxuplz2g74fzbx75g3kkaetoyiq.b32.i2p:0
wjrul5jwwb4vqdmkkrjbmly7osj6amecdpsac5xvaoqrti4nb3ha.b32.i2p:0
wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:0
xfkarmvk43vfkfvhkehy7ioj2b6wtfdlezvmlakblz3q4r7mccfq.b32.i2p:0
yc4xwin5ujenvcr6ynwkz7lnmmq3nmzxvfguele6ovqqpxgjvonq.b32.i2p:0
zdoabsg7ugzothyawodjhq54nvlofa746rxfkxpnjzj6nukmha6a.b32.i2p:0
zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:0
zysrlpii5ftrzivfcyhdrwpeyyqddbrdefnfu5q6otk5gtugmh2a.b32.i2p:0
# manually updated 2023-04 for minimal cjdns bootstrap support
[fc32:17ea:e415:c3bf:9808:149d:b5a2:c9aa]:8333
[fcc7:be49:ccd1:dc91:3125:f0da:457d:8ce]:8333
[fcdc:73ae:b1a9:1bf8:d4c2:811:a4c7:c34e]:8333
| 0 |
bitcoin/contrib | bitcoin/contrib/seeds/generate-seeds.py | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for kernel/chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>:<port>
[<ipv6>]:<port>
<onion>.onion:<port>
<i2p>.b32.i2p:<port>
The output will be two data structures with the peers in binary format:
static const uint8_t chainparams_seed_{main,test}[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from enum import Enum
import struct
import sys
import os
import re
class BIP155Network(Enum):
IPV4 = 1
IPV6 = 2
TORV2 = 3 # no longer supported
TORV3 = 4
I2P = 5
CJDNS = 6
def name_to_bip155(addr):
'''Convert address string to BIP155 (networkID, addr) tuple.'''
if addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) == 35:
assert vchAddr[34] == 3
return (BIP155Network.TORV3, vchAddr[:32])
elif len(vchAddr) == 10:
return (BIP155Network.TORV2, vchAddr)
else:
raise ValueError('Invalid onion %s' % vchAddr)
elif addr.endswith('.b32.i2p'):
vchAddr = b32decode(addr[0:-8] + '====', True)
if len(vchAddr) == 32:
return (BIP155Network.I2P, vchAddr)
else:
raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr: # IPv4
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr: # IPv6 or CJDNS
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert x < 2
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)
addr_bytes = bytes(sub[0] + ([0] * nullbytes) + sub[1])
if addr_bytes[0] == 0xfc:
# Assume that seeds with fc00::/8 addresses belong to CJDNS,
# not to the publicly unroutable "Unique Local Unicast" network, see
# RFC4193: https://datatracker.ietf.org/doc/html/rfc4193#section-8
return (BIP155Network.CJDNS, addr_bytes)
else:
return (BIP155Network.IPV6, addr_bytes)
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s):
'''Convert endpoint string to BIP155 (networkID, addr, port) tuple.'''
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = 0
else:
port = int(port)
host = name_to_bip155(host)
if host[0] == BIP155Network.TORV2:
return None # TORV2 is no longer supported, so we ignore it
else:
return host + (port, )
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def bip155_serialize(spec):
'''
Serialize (networkID, addr, port) tuple to BIP155 binary format.
'''
r = b""
r += struct.pack('B', spec[0].value)
r += ser_compact_size(len(spec[1]))
r += spec[1]
r += struct.pack('>H', spec[2])
return r
def process_nodes(g, f, structname):
g.write('static const uint8_t %s[] = {\n' % structname)
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
spec = parse_spec(line)
if spec is None: # ignore this entry (e.g. no longer supported addresses like TORV2)
continue
blob = bip155_serialize(spec)
hoststr = ','.join(('0x%02x' % b) for b in blob)
g.write(f' {hoststr},\n')
g.write('};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_main')
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_test')
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 0 |
bitcoin/contrib | bitcoin/contrib/seeds/nodes_main_manual.txt |
# manually updated 2023-04 for minimal torv3 bootstrap support
2bqghnldu6mcug4pikzprwhtjjnsyederctvci6klcwzepnjd46ikjyd.onion:8333
4lr3w2iyyl5u5l6tosizclykf5v3smqroqdn2i4h3kq6pfbbjb2xytad.onion:8333
5g72ppm3krkorsfopcm2bi7wlv4ohhs4u4mlseymasn7g7zhdcyjpfid.onion:8333
5sbmcl4m5api5tqafi4gcckrn3y52sz5mskxf3t6iw4bp7erwiptrgqd.onion:8333
776aegl7tfhg6oiqqy76jnwrwbvcytsx2qegcgh2mjqujll4376ohlid.onion:8333
77mdte42srl42shdh2mhtjr7nf7dmedqrw6bkcdekhdvmnld6ojyyiad.onion:8333
azbpsh4arqlm6442wfimy7qr65bmha2zhgjg7wbaji6vvaug53hur2qd.onion:8333
b64xcbleqmwgq2u46bh4hegnlrzzvxntyzbmucn3zt7cssm7y4ubv3id.onion:8333
bsqbtcparrfihlwolt4xgjbf4cgqckvrvsfyvy6vhiqrnh4w6ghixoid.onion:8333
bsqbtctulf2g4jtjsdfgl2ed7qs6zz5wqx27qnyiik7laockryvszqqd.onion:8333
cwi3ekrwhig47dhhzfenr5hbvckj7fzaojygvazi2lucsenwbzwoyiqd.onion:8333
devinbtcmwkuitvxl3tfi5of4zau46ymeannkjv6fpnylkgf3q5fa3id.onion:8333
devinbtctu7uctl7hly2juu3thbgeivfnvw3ckj3phy6nyvpnx66yeyd.onion:8333
devinbtcyk643iruzfpaxw3on2jket7rbjmwygm42dmdyub3ietrbmid.onion:8333
dtql5vci4iaml4anmueftqr7bfgzqlauzfy4rc2tfgulldd3ekyijjyd.onion:8333
emzybtc25oddoa2prol2znpz2axnrg6k77xwgirmhv7igoiucddsxiad.onion:8333
emzybtc3ewh7zihpkdvuwlgxrhzcxy2p5fvjggp7ngjbxcytxvt4rjid.onion:8333
emzybtc454ewbviqnmgtgx3rgublsgkk23r4onbhidcv36wremue4kqd.onion:8333
emzybtc5bnpb2o6gh54oquiox54o4r7yn4a2wiiwzrjonlouaibm2zid.onion:8333
fpz6r5ppsakkwypjcglz6gcnwt7ytfhxskkfhzu62tnylcknh3eq6pad.onion:8333
hanvo3hzqbhcqm5vahhi5a3czxxdwc7vt56p5gr7bifcvelaqurv6iid.onion:8333
hz7oqntvj4adrwtqappcgaxfribg5u4rvfkpwlo3xup5fcuyvylkxlqd.onion:8333
ityrxhidvjnjnf6imzyuqqnkkwridjnebkbokx25so3suq3fzezmksid.onion:8333
jto2jfbsxhb6yvhcrrjddrgbakte6tgsy3c3z3prss64gndgvovvosyd.onion:8333
k7nb3r7hxi5exvr4xmvnilhfw6hei7sw4rwz2t6onh4py6wbora6tuyd.onion:8333
kpgvmscirrdqpekbqjsvw5teanhatztpp2gl6eee4zkowvwfxwenqaid.onion:8333
l7kw3vjs4cf5mnuejjgqcxrw6wwsjmabllq3h3amy4f5q33d6cgo2kyd.onion:8333
m7cbpjolo662uel7rpaid46as2otcj44vvwg3gccodnvaeuwbm3anbyd.onion:8333
mowb2qwpjgs2a6q3yj3xa7nxklfssul4w7ynonyycw3uyopfu3x6ujad.onion:8333
mwmfluek4au6mxxpw6fy7sjhkm65bdfc7izc7lpz3trewfdghyrzsbid.onion:8333
rfqmn3qe36uaptkxhdvi74p4hyrzhir6vhmzb2hqryxodig4gue2zbyd.onion:8333
rsgwtnousfc7zyg4qsm3gvczjx7cihh2njyjbjl3qvcj3xg7wmvhddqd.onion:8333
s2d52bbttuwcl3pdrwzhxpmhtxn3jg23havjqg5eygwhtiw6lgyelpqd.onion:8333
upvthy74hgvgbqi6w3zd2mlchoi5tvvw7b5hpmmhcddd5fnnwrixneid.onion:8333
who3qs4eqlqzoxhqqgan4mg54ua5uz3mk4lj33ag53ei4orvnznrjbad.onion:8333
wizbit5555bsslwv4ctronnsgk5vh2w2pdx7v7eyuivlyuoteejk7lid.onion:8333
yrmedr35tt4wqfnwgilltxh5bnukeukxjpgg3jzmmsyld5lgsn5amvyd.onion:8333
# manually updated 2023-04 for minimal i2p bootstrap support
255fhcp6ajvftnyo7bwz3an3t4a4brhopm3bamyh2iu5r3gnr2rq.b32.i2p:0
27yrtht5b5bzom2w5ajb27najuqvuydtzb7bavlak25wkufec5mq.b32.i2p:0
3gocb7wc4zvbmmebktet7gujccuux4ifk3kqilnxnj5wpdpqx2hq.b32.i2p:0
4fcc23wt3hyjk3csfzcdyjz5pcwg5dzhdqgma6bch2qyiakcbboa.b32.i2p:0
4osyqeknhx5qf3a73jeimexwclmt42cju6xdp7icja4ixxguu2hq.b32.i2p:0
4umsi4nlmgyp4rckosg4vegd2ysljvid47zu7pqsollkaszcbpqq.b32.i2p:0
6j2ezegd3e2e2x3o3pox335f5vxfthrrigkdrbgfbdjchm5h4awa.b32.i2p:0
6n36ljyr55szci5ygidmxqer64qr24f4qmnymnbvgehz7qinxnla.b32.i2p:0
72yjs6mvlby3ky6mgpvvlemmwq5pfcznrzd34jkhclgrishqdxva.b32.i2p:0
a5qsnv3maw77mlmmzlcglu6twje6ttctd3fhpbfwcbpmewx6fczq.b32.i2p:0
aovep2pco7v2k4rheofrgytbgk23eg22dczpsjqgqtxcqqvmxk6a.b32.i2p:0
bitcoi656nll5hu6u7ddzrmzysdtwtnzcnrjd4rfdqbeey7dmn5a.b32.i2p:0
brifkruhlkgrj65hffybrjrjqcgdgqs2r7siizb5b2232nruik3a.b32.i2p:0
c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:0
day3hgxyrtwjslt54sikevbhxxs4qzo7d6vi72ipmscqtq3qmijq.b32.i2p:0
du5kydummi23bjfp6bd7owsvrijgt7zhvxmz5h5f5spcioeoetwq.b32.i2p:0
e55k6wu46rzp4pg5pk5npgbr3zz45bc3ihtzu2xcye5vwnzdy7pq.b32.i2p:0
eciohu5nq7vsvwjjc52epskuk75d24iccgzmhbzrwonw6lx4gdva.b32.i2p:0
ejlnngarmhqvune74ko7kk55xtgbz5i5ncs4vmnvjpy3l7y63xaa.b32.i2p:0
fhzlp3xroabohnmjonu5iqazwhlbbwh5cpujvw2azcu3srqdceja.b32.i2p:0
fx6np3oheacr3t7gluftrqo2qxldbbatgw4hepp7ulb4j5ry57ca.b32.i2p:0
gehtac45oaghz54ypyopim64mql7oad2bqclla74l6tfeolzmodq.b32.i2p:0
hhfi4yqkg2twqiwezrfksftjjofbyx3ojkmlnfmcwntgnrjjhkya.b32.i2p:0
jz3s4eurm5vzjresf4mwo7oni4bk36daolwxh4iqtewakylgkxmq.b32.i2p:0
liu75cvktv4icbctg72w7nxbk4eibt7wamizfdii4omz7gcke5vq.b32.i2p:0
lrah7acdsgopybg43shadwwiv6igezaw64i6jb5muqdg7dmhj3la.b32.i2p:0
lzuu6mjtu7vd55d2biphicihufipoa7vyym6xfnkmmlra3tiziia.b32.i2p:0
m6bpynxkv2ktwxkg6p2gyudjfhdupb6kuzabeqdnckkdkf4kxjla.b32.i2p:0
m6v454xd6p3bt5swujgmveklsp7lzbkqlqqfc2p36cjlwv5dbucq.b32.i2p:0
mlgeizrroynuhpxbzeosajt5u4ddcvynxfmcbm6kwjpaufilxigq.b32.i2p:0
o6t4fr5ayfadzieutstgwcllvwxeuzjlxmzsmpj3hpkvefhzfaea.b32.i2p:0
ofubxr2ir7u2guzjwyrvujicivzmvinwa36nuzlrg7tnsmebal7a.b32.i2p:0
oz2ia3flpm3du2tyusulrn7h7e2eo3juzkrmn34bvnrlcrugv7ia.b32.i2p:0
pohfcrfc7prn4bvn4xstw6nt3e7hjmb7kuj4djtsfqsskwhmhnna.b32.i2p:0
qd6jlsevsexww3wefpqs7iglxb3f63y4e6ydulfzrvwflpicmdqa.b32.i2p:0
rfjkzdzv4cwpxo6hzuncicvuyui76wxqx3a23lynq72ktwqs7aja.b32.i2p:0
rizfinyses2r3or4iubs5wx66gdy6mpf73w7uobfacm2l5cral3q.b32.i2p:0
sedndhv5vpcgdmykyi5st4yqhdxl3hpdtglta4do435wupahhx6q.b32.i2p:0
tugq6wa2ls2bv27pr2iy3da3k5ow3fzefbcvjcr22uc7w5vmevja.b32.i2p:0
usztavbib756k5vqggzgkyswoj6mttihjvp3c2pa642t2mb4pvsa.b32.i2p:0
vgu6llqbyjphml25umd5ztvyxrxuplz2g74fzbx75g3kkaetoyiq.b32.i2p:0
wjrul5jwwb4vqdmkkrjbmly7osj6amecdpsac5xvaoqrti4nb3ha.b32.i2p:0
wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:0
xfkarmvk43vfkfvhkehy7ioj2b6wtfdlezvmlakblz3q4r7mccfq.b32.i2p:0
yc4xwin5ujenvcr6ynwkz7lnmmq3nmzxvfguele6ovqqpxgjvonq.b32.i2p:0
zdoabsg7ugzothyawodjhq54nvlofa746rxfkxpnjzj6nukmha6a.b32.i2p:0
zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:0
zysrlpii5ftrzivfcyhdrwpeyyqddbrdefnfu5q6otk5gtugmh2a.b32.i2p:0
# manually updated 2023-04 for minimal cjdns bootstrap support
[fc32:17ea:e415:c3bf:9808:149d:b5a2:c9aa]:8333
[fcc7:be49:ccd1:dc91:3125:f0da:457d:8ce]:8333
[fcdc:73ae:b1a9:1bf8:d4c2:811:a4c7:c34e]:8333
| 0 |
bitcoin/contrib | bitcoin/contrib/seeds/makeseeds.py | #!/usr/bin/env python3
# Copyright (c) 2013-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import argparse
import collections
import ipaddress
import re
import sys
from typing import Union
from asmap import ASMap, net_to_prefix
NSEEDS=512
MAX_SEEDS_PER_ASN = {
'ipv4': 2,
'ipv6': 10,
}
MIN_BLOCKS = 730000
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([a-z2-7]{56}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Satoshi:("
r"0.14.(0|1|2|3|99)|"
r"0.15.(0|1|2|99)|"
r"0.16.(0|1|2|3|99)|"
r"0.17.(0|0.1|1|2|99)|"
r"0.18.(0|1|99)|"
r"0.19.(0|1|2|99)|"
r"0.20.(0|1|2|99)|"
r"0.21.(0|1|2|99)|"
r"22.(0|1|99)|"
r"23.(0|1|99)|"
r"24.(0|1|99)|"
r"25.99"
r")")
def parseline(line: str) -> Union[dict, None]:
""" Parses a line from `seeds_main.txt` into a dictionary of details for that line.
or `None`, if the line could not be parsed.
"""
if line.startswith('#'):
# Ignore line that starts with comment
return None
sline = line.split()
if len(sline) < 11:
# line too short to be valid, skip it.
return None
# Skip bad results.
if int(sline[1]) == 0:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips: list[dict]) -> list[dict]:
""" Remove duplicates from `ips` where multiple ips share address and port. """
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips: list[dict]) -> list[dict]:
""" Filter out hosts with more nodes per IP"""
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(asmap: ASMap, ips: list[dict], max_per_asn: dict, max_per_net: int) -> list[dict]:
""" Prunes `ips` by
(a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and
(b) trimming ips to have at most `max_per_asn` ips from each asn in each net.
"""
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count: dict[str, int] = collections.defaultdict(int)
asn_count: dict[int, int] = collections.defaultdict(int)
for i, ip in enumerate(ips_ipv46):
if net_count[ip['net']] == max_per_net:
# do not add this ip as we already too many
# ips from this network
continue
asn = asmap.lookup(net_to_prefix(ipaddress.ip_network(ip['ip'])))
if not asn or asn_count[ip['net'], asn] == max_per_asn[ip['net']]:
# do not add this ip as we already have too many
# ips from this ASN on this network
continue
asn_count[ip['net'], asn] += 1
net_count[ip['net']] += 1
ip['asn'] = asn
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips: list[dict]) -> str:
""" Format and return pretty string from `ips`. """
hist: dict[str, int] = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}"
def parse_args():
argparser = argparse.ArgumentParser(description='Generate a list of bitcoin node seed ip addresses.')
argparser.add_argument("-a","--asmap", help='the location of the asmap asn database file (required)', required=True)
argparser.add_argument("-s","--seeds", help='the location of the DNS seeds file (required)', required=True)
return argparser.parse_args()
def main():
args = parse_args()
print(f'Loading asmap database "{args.asmap}"…', end='', file=sys.stderr, flush=True)
with open(args.asmap, 'rb') as f:
asmap = ASMap.from_binary(f.read())
print('Done.', file=sys.stderr)
print('Loading and parsing DNS seeds…', end='', file=sys.stderr, flush=True)
with open(args.seeds, 'r', encoding='utf8') as f:
lines = f.readlines()
ips = [parseline(line) for line in lines]
print('Done.', file=sys.stderr)
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print(f'{ip_stats(ips):s} Initial', file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print(f'{ip_stats(ips):s} Skip entries with invalid address', file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print(f'{ip_stats(ips):s} Enforce minimal number of blocks', file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print(f'{ip_stats(ips):s} Require a known and recent user agent', file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print(f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(asmap, ips, MAX_SEEDS_PER_ASN, NSEEDS)
print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print(f"[{ip['ip']}]:{ip['port']}", end="")
else:
print(f"{ip['ip']}:{ip['port']}", end="")
if 'asn' in ip:
print(f" # AS{ip['asn']}", end="")
print()
if __name__ == '__main__':
main()
| 0 |
bitcoin/contrib | bitcoin/contrib/seeds/asmap.py | # Copyright (c) 2022 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""
This module provides the ASNEntry and ASMap classes.
"""
import copy
import ipaddress
import random
import unittest
from collections.abc import Callable, Iterable
from enum import Enum
from functools import total_ordering
from typing import Optional, Union, overload
def net_to_prefix(net: Union[ipaddress.IPv4Network,ipaddress.IPv6Network]) -> list[bool]:
"""
Convert an IPv4 or IPv6 network to a prefix represented as a list of bits.
IPv4 ranges are remapped to their IPv4-mapped IPv6 range (::ffff:0:0/96).
"""
num_bits = net.prefixlen
netrange = int.from_bytes(net.network_address.packed, 'big')
# Map an IPv4 prefix into IPv6 space.
if isinstance(net, ipaddress.IPv4Network):
num_bits += 96
netrange += 0xffff00000000
# Strip unused bottom bits.
assert (netrange & ((1 << (128 - num_bits)) - 1)) == 0
return [((netrange >> (127 - i)) & 1) != 0 for i in range(num_bits)]
def prefix_to_net(prefix: list[bool]) -> Union[ipaddress.IPv4Network,ipaddress.IPv6Network]:
"""The reverse operation of net_to_prefix."""
# Convert to number
netrange = sum(b << (127 - i) for i, b in enumerate(prefix))
num_bits = len(prefix)
assert num_bits <= 128
# Return IPv4 range if in ::ffff:0:0/96
if num_bits >= 96 and (netrange >> 32) == 0xffff:
return ipaddress.IPv4Network((netrange & 0xffffffff, num_bits - 96), True)
# Return IPv6 range otherwise.
return ipaddress.IPv6Network((netrange, num_bits), True)
# Shortcut for (prefix, ASN) entries.
ASNEntry = tuple[list[bool], int]
# Shortcut for (prefix, old ASN, new ASN) entries.
ASNDiff = tuple[list[bool], int, int]
class _VarLenCoder:
"""
A class representing a custom variable-length binary encoder/decoder for
integers. Each object represents a different coder, with different parameters
minval and clsbits.
The encoding is easiest to describe using an example. Let's say minval=100 and
clsbits=[4,2,2,3]. In that case:
- x in [100..115]: encoded as [0] + [4-bit BE encoding of (x-100)].
- x in [116..119]: encoded as [1,0] + [2-bit BE encoding of (x-116)].
- x in [120..123]: encoded as [1,1,0] + [2-bit BE encoding of (x-120)].
- x in [124..131]: encoded as [1,1,1] + [3-bit BE encoding of (x-124)].
In general, every number is encoded as:
- First, k "1"-bits, where k is the class the number falls in (there is one class
per element of clsbits).
- Then, a "0"-bit, unless k is the highest class, in which case there is nothing.
- Lastly, clsbits[k] bits encoding in big endian the position in its class that
number falls into.
- Every class k consists of 2^clsbits[k] consecutive integers. k=0 starts at minval,
other classes start one past the last element of the class before it.
"""
def __init__(self, minval: int, clsbits: list[int]):
"""Construct a new _VarLenCoder."""
self._minval = minval
self._clsbits = clsbits
self._maxval = minval + sum(1 << b for b in clsbits) - 1
def can_encode(self, val: int) -> bool:
"""Check whether value val is in the range this coder supports."""
return self._minval <= val <= self._maxval
def encode(self, val: int, ret: list[int]) -> None:
"""Append encoding of val onto integer list ret."""
assert self._minval <= val <= self._maxval
val -= self._minval
bits = 0
for k, bits in enumerate(self._clsbits):
if val >> bits:
# If the value will not fit in class k, subtract its range from v,
# emit a "1" bit and continue with the next class.
val -= 1 << bits
ret.append(1)
else:
if k + 1 < len(self._clsbits):
# Unless we're in the last class, emit a "0" bit.
ret.append(0)
break
# And then encode v (now the position within the class) in big endian.
ret.extend((val >> (bits - 1 - b)) & 1 for b in range(bits))
def encode_size(self, val: int) -> int:
"""Compute how many bits are needed to encode val."""
assert self._minval <= val <= self._maxval
val -= self._minval
ret = 0
bits = 0
for k, bits in enumerate(self._clsbits):
if val >> bits:
val -= 1 << bits
ret += 1
else:
ret += k + 1 < len(self._clsbits)
break
return ret + bits
def decode(self, stream, bitpos) -> tuple[int,int]:
"""Decode a number starting at bitpos in stream, returning value and new bitpos."""
val = self._minval
bits = 0
for k, bits in enumerate(self._clsbits):
bit = 0
if k + 1 < len(self._clsbits):
bit = stream[bitpos]
bitpos += 1
if not bit:
break
val += 1 << bits
for i in range(bits):
bit = stream[bitpos]
bitpos += 1
val += bit << (bits - 1 - i)
return val, bitpos
# Variable-length encoders used in the binary asmap format.
_CODER_INS = _VarLenCoder(0, [0, 0, 1])
_CODER_ASN = _VarLenCoder(1, list(range(15, 25)))
_CODER_MATCH = _VarLenCoder(2, list(range(1, 9)))
_CODER_JUMP = _VarLenCoder(17, list(range(5, 31)))
class _Instruction(Enum):
"""One instruction in the binary asmap format."""
# A return instruction, encoded as [0], returns a constant ASN. It is followed by
# an integer using the ASN encoding.
RETURN = 0
# A jump instruction, encoded as [1,0] inspects the next unused bit in the input
# and either continues execution (if 0), or skips a specified number of bits (if 1).
# It is followed by an integer, and then two subprograms. The integer uses jump encoding
# and corresponds to the length of the first subprogram (so it can be skipped).
JUMP = 1
# A match instruction, encoded as [1,1,0] inspects 1 or more of the next unused bits
# in the input with its argument. If they all match, execution continues. If they do
# not, failure is returned. If a default instruction has been executed before, instead
# of failure the default instruction's argument is returned. It is followed by an
# integer in match encoding, and a subprogram. That value is at least 2 bits and at
# most 9 bits. An n-bit value signifies matching (n-1) bits in the input with the lower
# (n-1) bits in the match value.
MATCH = 2
# A default instruction, encoded as [1,1,1] sets the default variable to its argument,
# and continues execution. It is followed by an integer in ASN encoding, and a subprogram.
DEFAULT = 3
# Not an actual instruction, but a way to encode the empty program that fails. In the
# encoder, it is used more generally to represent the failure case inside MATCH instructions,
# which may (if used inside the context of a DEFAULT instruction) actually correspond to
# a successful return. In this usage, they're always converted to an actual MATCH or RETURN
# before the top level is reached (see make_default below).
END = 4
class _BinNode:
"""A class representing a (node of) the parsed binary asmap format."""
@overload
def __init__(self, ins: _Instruction): ...
@overload
def __init__(self, ins: _Instruction, arg1: int): ...
@overload
def __init__(self, ins: _Instruction, arg1: "_BinNode", arg2: "_BinNode"): ...
@overload
def __init__(self, ins: _Instruction, arg1: int, arg2: "_BinNode"): ...
def __init__(self, ins: _Instruction, arg1=None, arg2=None):
"""
Construct a new asmap node. Possibilities are:
- _BinNode(_Instruction.RETURN, asn)
- _BinNode(_Instruction.JUMP, node_0, node_1)
- _BinNode(_Instruction.MATCH, val, node)
- _BinNode(_Instruction.DEFAULT, asn, node)
- _BinNode(_Instruction.END)
"""
self.ins = ins
self.arg1 = arg1
self.arg2 = arg2
if ins == _Instruction.RETURN:
assert isinstance(arg1, int)
assert arg2 is None
self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1)
elif ins == _Instruction.JUMP:
assert isinstance(arg1, _BinNode)
assert isinstance(arg2, _BinNode)
self.size = (_CODER_INS.encode_size(ins.value) + _CODER_JUMP.encode_size(arg1.size) +
arg1.size + arg2.size)
elif ins == _Instruction.DEFAULT:
assert isinstance(arg1, int)
assert isinstance(arg2, _BinNode)
self.size = _CODER_INS.encode_size(ins.value) + _CODER_ASN.encode_size(arg1) + arg2.size
elif ins == _Instruction.MATCH:
assert isinstance(arg1, int)
assert isinstance(arg2, _BinNode)
self.size = (_CODER_INS.encode_size(ins.value) + _CODER_MATCH.encode_size(arg1)
+ arg2.size)
elif ins == _Instruction.END:
assert arg1 is None
assert arg2 is None
self.size = 0
else:
assert False
@staticmethod
def make_end() -> "_BinNode":
"""Constructor for a _BinNode with just an END instruction."""
return _BinNode(_Instruction.END)
@staticmethod
def make_leaf(val: int) -> "_BinNode":
"""Constructor for a _BinNode of just a RETURN instruction."""
assert val is not None and val > 0
return _BinNode(_Instruction.RETURN, val)
@staticmethod
def make_branch(node0: "_BinNode", node1: "_BinNode") -> "_BinNode":
"""
Construct a _BinNode corresponding to running either the node0 or node1 subprogram,
based on the next input bit. It exploits shortcuts that are possible in the encoding,
and uses either a JUMP, MATCH, or END instruction.
"""
if node0.ins == _Instruction.END and node1.ins == _Instruction.END:
return node0
if node0.ins == _Instruction.END:
if node1.ins == _Instruction.MATCH and node1.arg1 <= 0xFF:
return _BinNode(node1.ins, node1.arg1 + (1 << node1.arg1.bit_length()), node1.arg2)
return _BinNode(_Instruction.MATCH, 3, node1)
if node1.ins == _Instruction.END:
if node0.ins == _Instruction.MATCH and node0.arg1 <= 0xFF:
return _BinNode(node0.ins, node0.arg1 + (1 << (node0.arg1.bit_length() - 1)),
node0.arg2)
return _BinNode(_Instruction.MATCH, 2, node0)
return _BinNode(_Instruction.JUMP, node0, node1)
@staticmethod
def make_default(val: int, sub: "_BinNode") -> "_BinNode":
"""
Construct a _BinNode that corresponds to the specified subprogram, with the specified
default value. It exploits shortcuts that are possible in the encoding, and will use
either a DEFAULT or a RETURN instruction."""
assert val is not None and val > 0
if sub.ins == _Instruction.END:
return _BinNode(_Instruction.RETURN, val)
if sub.ins in (_Instruction.RETURN, _Instruction.DEFAULT):
return sub
return _BinNode(_Instruction.DEFAULT, val, sub)
@total_ordering
class ASMap:
"""
A class whose objects represent a mapping from subnets to ASNs.
Internally the mapping is stored as a binary trie, but can be converted
from/to a list of ASNEntry objects, and from/to the binary asmap file format.
In the trie representation, nodes are represented as bare lists for efficiency
and ease of manipulation:
- [0] means an unassigned subnet (no ASN mapping for it is present)
- [int] means a subnet mapped entirely to the specified ASN.
- [node,node] means a subnet whose lower half and upper half have different
- mappings, represented by new trie nodes.
"""
def update(self, prefix: list[bool], asn: int) -> None:
"""Update this ASMap object to map prefix to the specified asn."""
assert asn == 0 or _CODER_ASN.can_encode(asn)
def recurse(node: list, offset: int) -> None:
if offset == len(prefix):
# Reached the end of prefix; overwrite this node.
node.clear()
node.append(asn)
return
if len(node) == 1:
# Need to descend into a leaf node; split it up.
oldasn = node[0]
node.clear()
node.append([oldasn])
node.append([oldasn])
# Descend into the node.
recurse(node[prefix[offset]], offset + 1)
# If the result is two identical leaf children, merge them.
if len(node[0]) == 1 and len(node[1]) == 1 and node[0] == node[1]:
oldasn = node[0][0]
node.clear()
node.append(oldasn)
recurse(self._trie, 0)
def update_multi(self, entries: list[tuple[list[bool], int]]) -> None:
"""Apply multiple update operations, where longer prefixes take precedence."""
entries.sort(key=lambda entry: len(entry[0]))
for prefix, asn in entries:
self.update(prefix, asn)
def _set_trie(self, trie) -> None:
"""Set trie directly. Internal use only."""
def recurse(node: list) -> None:
if len(node) < 2:
return
recurse(node[0])
recurse(node[1])
if len(node[0]) == 2:
return
if node[0] == node[1]:
if len(node[0]) == 0:
node.clear()
else:
asn = node[0][0]
node.clear()
node.append(asn)
recurse(trie)
self._trie = trie
def __init__(self, entries: Optional[Iterable[ASNEntry]] = None) -> None:
"""Construct an ASMap object from an optional list of entries."""
self._trie = [0]
if entries is not None:
def entry_key(entry):
"""Sort function that places shorter prefixes first."""
prefix, asn = entry
return len(prefix), prefix, asn
for prefix, asn in sorted(entries, key=entry_key):
self.update(prefix, asn)
def lookup(self, prefix: list[bool]) -> Optional[int]:
"""Look up a prefix. Returns ASN, or 0 if unassigned, or None if indeterminate."""
node = self._trie
for bit in prefix:
if len(node) == 1:
break
node = node[bit]
if len(node) == 1:
return node[0]
return None
def _to_entries_flat(self, fill: bool = False) -> list[ASNEntry]:
"""Convert an ASMap object to a list of non-overlapping (prefix, asn) objects."""
prefix : list[bool] = []
def recurse(node: list) -> list[ASNEntry]:
ret = []
if len(node) == 1:
if node[0] > 0:
ret = [(list(prefix), node[0])]
elif len(node) == 2:
prefix.append(False)
ret = recurse(node[0])
prefix[-1] = True
ret += recurse(node[1])
prefix.pop()
if fill and len(ret) > 1:
asns = set(x[1] for x in ret)
if len(asns) == 1:
ret = [(list(prefix), list(asns)[0])]
return ret
return recurse(self._trie)
def _to_entries_minimal(self, fill: bool = False) -> list[ASNEntry]:
"""Convert a trie to a minimal list of ASNEntry objects, exploiting overlap."""
prefix : list[bool] = []
def recurse(node: list) -> (tuple[dict[Optional[int], list[ASNEntry]], bool]):
if len(node) == 1 and node[0] == 0:
return {None if fill else 0: []}, True
if len(node) == 1:
return {node[0]: [], None: [(list(prefix), node[0])]}, False
ret: dict[Optional[int], list[ASNEntry]] = {}
prefix.append(False)
left, lhole = recurse(node[0])
prefix[-1] = True
right, rhole = recurse(node[1])
prefix.pop()
hole = not fill and (lhole or rhole)
def candidate(ctx: Optional[int], res0: Optional[list[ASNEntry]],
res1: Optional[list[ASNEntry]]):
if res0 is not None and res1 is not None:
if ctx not in ret or len(res0) + len(res1) < len(ret[ctx]):
ret[ctx] = res0 + res1
for ctx in set(left) | set(right):
candidate(ctx, left.get(ctx), right.get(ctx))
candidate(ctx, left.get(None), right.get(ctx))
candidate(ctx, left.get(ctx), right.get(None))
if not hole:
for ctx in list(ret):
if ctx is not None:
candidate(None, [(list(prefix), ctx)], ret[ctx])
if None in ret:
ret = {ctx:entries for ctx, entries in ret.items()
if ctx is None or len(entries) < len(ret[None])}
if hole:
ret = {ctx:entries for ctx, entries in ret.items() if ctx is None or ctx == 0}
return ret, hole
res, _ = recurse(self._trie)
return res[0] if 0 in res else res[None]
def __str__(self) -> str:
"""Convert this ASMap object to a string containing Python code constructing it."""
return f"ASMap({self._trie})"
def to_entries(self, overlapping: bool = True, fill: bool = False) -> list[ASNEntry]:
"""
Convert the mappings in this ASMap object to a list of ASNEntry objects.
Arguments:
overlapping: Permit the subnets in the resulting ASNEntry to overlap.
Setting this can result in a shorter list.
fill: Permit the resulting ASNEntry objects to cover subnets that
are unassigned in this ASMap object. Setting this can
result in a shorter list.
"""
if overlapping:
return self._to_entries_minimal(fill)
return self._to_entries_flat(fill)
@staticmethod
def from_random(num_leaves: int = 10, max_asn: int = 6,
unassigned_prob: float = 0.5) -> "ASMap":
"""
Construct a random ASMap object, with specified:
- Number of leaves in its trie (at least 1)
- Maximum ASN value (at least 1)
- Probability for leaf nodes to be unassigned
The number of leaves in the resulting object may be less than what is
requested. This method is mostly intended for testing.
"""
assert num_leaves >= 1
assert max_asn >= 1 or unassigned_prob == 1
assert _CODER_ASN.can_encode(max_asn)
assert 0.0 <= unassigned_prob <= 1.0
trie: list = []
leaves = [trie]
ret = ASMap()
for i in range(1, num_leaves):
idx = random.randrange(i)
leaf = leaves[idx]
lastleaf = leaves.pop()
if idx + 1 < i:
leaves[idx] = lastleaf
leaf.append([])
leaf.append([])
leaves.append(leaf[0])
leaves.append(leaf[1])
for leaf in leaves:
if random.random() >= unassigned_prob:
leaf.append(random.randrange(1, max_asn + 1))
else:
leaf.append(0)
#pylint: disable=protected-access
ret._set_trie(trie)
return ret
def _to_binnode(self, fill: bool = False) -> _BinNode:
"""Convert a trie to a _BinNode object."""
def recurse(node: list) -> tuple[dict[Optional[int], _BinNode], bool]:
if len(node) == 1 and node[0] == 0:
return {(None if fill else 0): _BinNode.make_end()}, True
if len(node) == 1:
return {None: _BinNode.make_leaf(node[0]), node[0]: _BinNode.make_end()}, False
ret: dict[Optional[int], _BinNode] = {}
left, lhole = recurse(node[0])
right, rhole = recurse(node[1])
hole = (lhole or rhole) and not fill
def candidate(ctx: Optional[int], arg1, arg2, func: Callable):
if arg1 is not None and arg2 is not None:
cand = func(arg1, arg2)
if ctx not in ret or cand.size < ret[ctx].size:
ret[ctx] = cand
for ctx in set(left) | set(right):
candidate(ctx, left.get(ctx), right.get(ctx), _BinNode.make_branch)
candidate(ctx, left.get(None), right.get(ctx), _BinNode.make_branch)
candidate(ctx, left.get(ctx), right.get(None), _BinNode.make_branch)
if not hole:
for ctx in set(ret) - set([None]):
candidate(None, ctx, ret[ctx], _BinNode.make_default)
if None in ret:
ret = {ctx:enc for ctx, enc in ret.items()
if ctx is None or enc.size < ret[None].size}
if hole:
ret = {ctx:enc for ctx, enc in ret.items() if ctx is None or ctx == 0}
return ret, hole
res, _ = recurse(self._trie)
return res[0] if 0 in res else res[None]
@staticmethod
def _from_binnode(binnode: _BinNode) -> "ASMap":
"""Construct an ASMap object from a _BinNode. Internal use only."""
def recurse(node: _BinNode, default: int) -> list:
if node.ins == _Instruction.RETURN:
return [node.arg1]
if node.ins == _Instruction.JUMP:
return [recurse(node.arg1, default), recurse(node.arg2, default)]
if node.ins == _Instruction.MATCH:
val = node.arg1
sub = recurse(node.arg2, default)
while val >= 2:
bit = val & 1
val >>= 1
if bit:
sub = [[default], sub]
else:
sub = [sub, [default]]
return sub
assert node.ins == _Instruction.DEFAULT
return recurse(node.arg2, node.arg1)
ret = ASMap()
if binnode.ins != _Instruction.END:
#pylint: disable=protected-access
ret._set_trie(recurse(binnode, 0))
return ret
def to_binary(self, fill: bool = False) -> bytes:
"""
Convert this ASMap object to binary.
Argument:
fill: permit the resulting binary encoder to contain mappers for
unassigned subnets in this ASMap object. Doing so may
reduce the size of the encoding.
Returns:
A bytes object with the encoding of this ASMap object.
"""
bits: list[int] = []
def recurse(node: _BinNode) -> None:
_CODER_INS.encode(node.ins.value, bits)
if node.ins == _Instruction.RETURN:
_CODER_ASN.encode(node.arg1, bits)
elif node.ins == _Instruction.JUMP:
_CODER_JUMP.encode(node.arg1.size, bits)
recurse(node.arg1)
recurse(node.arg2)
elif node.ins == _Instruction.DEFAULT:
_CODER_ASN.encode(node.arg1, bits)
recurse(node.arg2)
else:
assert node.ins == _Instruction.MATCH
_CODER_MATCH.encode(node.arg1, bits)
recurse(node.arg2)
binnode = self._to_binnode(fill)
if binnode.ins != _Instruction.END:
recurse(binnode)
val = 0
nbits = 0
ret = []
for bit in bits:
val += (bit << nbits)
nbits += 1
if nbits == 8:
ret.append(val)
val = 0
nbits = 0
if nbits:
ret.append(val)
return bytes(ret)
@staticmethod
def from_binary(bindata: bytes) -> Optional["ASMap"]:
"""Decode an ASMap object from the provided binary encoding."""
bits: list[int] = []
for byte in bindata:
bits.extend((byte >> i) & 1 for i in range(8))
def recurse(bitpos: int) -> tuple[_BinNode, int]:
insval, bitpos = _CODER_INS.decode(bits, bitpos)
ins = _Instruction(insval)
if ins == _Instruction.RETURN:
asn, bitpos = _CODER_ASN.decode(bits, bitpos)
return _BinNode(ins, asn), bitpos
if ins == _Instruction.JUMP:
jump, bitpos = _CODER_JUMP.decode(bits, bitpos)
left, bitpos1 = recurse(bitpos)
if bitpos1 != bitpos + jump:
raise ValueError("Inconsistent jump")
right, bitpos = recurse(bitpos1)
return _BinNode(ins, left, right), bitpos
if ins == _Instruction.MATCH:
match, bitpos = _CODER_MATCH.decode(bits, bitpos)
sub, bitpos = recurse(bitpos)
return _BinNode(ins, match, sub), bitpos
assert ins == _Instruction.DEFAULT
asn, bitpos = _CODER_ASN.decode(bits, bitpos)
sub, bitpos = recurse(bitpos)
return _BinNode(ins, asn, sub), bitpos
if len(bits) == 0:
binnode = _BinNode(_Instruction.END)
else:
try:
binnode, bitpos = recurse(0)
except (ValueError, IndexError):
return None
if bitpos < len(bits) - 7:
return None
if not all(bit == 0 for bit in bits[bitpos:]):
return None
return ASMap._from_binnode(binnode)
def __lt__(self, other: "ASMap") -> bool:
return self._trie < other._trie
def __eq__(self, other: object) -> bool:
if isinstance(other, ASMap):
return self._trie == other._trie
return False
def extends(self, req: "ASMap") -> bool:
"""Determine whether this matches req for all subranges where req is assigned."""
def recurse(actual: list, require: list) -> bool:
if len(require) == 1 and require[0] == 0:
return True
if len(require) == 1:
if len(actual) == 1:
return bool(require[0] == actual[0])
return recurse(actual[0], require) and recurse(actual[1], require)
if len(actual) == 2:
return recurse(actual[0], require[0]) and recurse(actual[1], require[1])
return recurse(actual, require[0]) and recurse(actual, require[1])
assert isinstance(req, ASMap)
#pylint: disable=protected-access
return recurse(self._trie, req._trie)
def diff(self, other: "ASMap") -> list[ASNDiff]:
"""Compute the diff from self to other."""
prefix: list[bool] = []
ret: list[ASNDiff] = []
def recurse(old_node: list, new_node: list):
if len(old_node) == 1 and len(new_node) == 1:
if old_node[0] != new_node[0]:
ret.append((list(prefix), old_node[0], new_node[0]))
else:
old_left: list = old_node if len(old_node) == 1 else old_node[0]
old_right: list = old_node if len(old_node) == 1 else old_node[1]
new_left: list = new_node if len(new_node) == 1 else new_node[0]
new_right: list = new_node if len(new_node) == 1 else new_node[1]
prefix.append(False)
recurse(old_left, new_left)
prefix[-1] = True
recurse(old_right, new_right)
prefix.pop()
assert isinstance(other, ASMap)
#pylint: disable=protected-access
recurse(self._trie, other._trie)
return ret
def __copy__(self) -> "ASMap":
"""Construct a copy of this ASMap object. Its state will not be shared."""
ret = ASMap()
#pylint: disable=protected-access
ret._set_trie(copy.deepcopy(self._trie))
return ret
def __deepcopy__(self, _) -> "ASMap":
# ASMap objects do not allow sharing of the _trie member, so we don't need the memoization.
return self.__copy__()
class TestASMap(unittest.TestCase):
"""Unit tests for this module."""
def test_ipv6_prefix_roundtrips(self) -> None:
"""Test that random IPv6 network ranges roundtrip through prefix encoding."""
for _ in range(20):
net_bits = random.getrandbits(128)
for prefix_len in range(0, 129):
masked_bits = (net_bits >> (128 - prefix_len)) << (128 - prefix_len)
net = ipaddress.IPv6Network((masked_bits.to_bytes(16, 'big'), prefix_len))
prefix = net_to_prefix(net)
self.assertTrue(len(prefix) <= 128)
net2 = prefix_to_net(prefix)
self.assertEqual(net, net2)
def test_ipv4_prefix_roundtrips(self) -> None:
"""Test that random IPv4 network ranges roundtrip through prefix encoding."""
for _ in range(100):
net_bits = random.getrandbits(32)
for prefix_len in range(0, 33):
masked_bits = (net_bits >> (32 - prefix_len)) << (32 - prefix_len)
net = ipaddress.IPv4Network((masked_bits.to_bytes(4, 'big'), prefix_len))
prefix = net_to_prefix(net)
self.assertTrue(32 <= len(prefix) <= 128)
net2 = prefix_to_net(prefix)
self.assertEqual(net, net2)
def test_asmap_roundtrips(self) -> None:
"""Test case that verifies random ASMap objects roundtrip to/from entries/binary."""
# Iterate over the number of leaves the random test ASMap objects have.
for leaves in range(1, 20):
# Iterate over the number of bits in the AS numbers used.
for asnbits in range(0, 24):
# Iterate over the probability that leaves are unassigned.
for pct in range(101):
# Construct a random ASMap object according to the above parameters.
asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits),
unassigned_prob=0.01 * pct)
# Run tests for to_entries and construction from those entries, both
# for overlapping and non-overlapping ones.
for overlapping in [False, True]:
entries = asmap.to_entries(overlapping=overlapping, fill=False)
random.shuffle(entries)
asmap2 = ASMap(entries)
assert asmap2 is not None
self.assertEqual(asmap2, asmap)
entries = asmap.to_entries(overlapping=overlapping, fill=True)
random.shuffle(entries)
asmap2 = ASMap(entries)
assert asmap2 is not None
self.assertTrue(asmap2.extends(asmap))
# Run tests for to_binary and construction from binary.
enc = asmap.to_binary(fill=False)
asmap3 = ASMap.from_binary(enc)
assert asmap3 is not None
self.assertEqual(asmap3, asmap)
enc = asmap.to_binary(fill=True)
asmap3 = ASMap.from_binary(enc)
assert asmap3 is not None
self.assertTrue(asmap3.extends(asmap))
def test_patching(self) -> None:
"""Test behavior of update, lookup, extends, and diff."""
#pylint: disable=too-many-locals,too-many-nested-blocks
# Iterate over the number of leaves the random test ASMap objects have.
for leaves in range(1, 20):
# Iterate over the number of bits in the AS numbers used.
for asnbits in range(0, 10):
# Iterate over the probability that leaves are unassigned.
for pct in range(0, 101):
# Construct a random ASMap object according to the above parameters.
asmap = ASMap.from_random(num_leaves=leaves, max_asn=1 + (1 << asnbits),
unassigned_prob=0.01 * pct)
# Make a copy of that asmap object to which patches will be applied.
# It starts off being equal to asmap.
patched = copy.copy(asmap)
# Keep a list of patches performed.
patches: list[ASNEntry] = []
# Initially there cannot be any difference.
self.assertEqual(asmap.diff(patched), [])
# Make 5 patches, each building on top of the previous ones.
for _ in range(0, 5):
# Construct a random path and new ASN to assign it to, apply it to patched,
# and remember it in patches.
pathlen = random.randrange(5)
path = [random.getrandbits(1) != 0 for _ in range(pathlen)]
newasn = random.randrange(1 + (1 << asnbits))
patched.update(path, newasn)
patches = [(path, newasn)] + patches
# Compute the diff, and whether asmap extends patched, and the other way
# around.
diff = asmap.diff(patched)
self.assertEqual(asmap == patched, len(diff) == 0)
extends = asmap.extends(patched)
back_extends = patched.extends(asmap)
# Determine whether those extends results are consistent with the diff
# result.
self.assertEqual(extends, all(d[2] == 0 for d in diff))
self.assertEqual(back_extends, all(d[1] == 0 for d in diff))
# For every diff found:
for path, old_asn, new_asn in diff:
# Verify asmap and patched actually differ there.
self.assertTrue(old_asn != new_asn)
self.assertEqual(asmap.lookup(path), old_asn)
self.assertEqual(patched.lookup(path), new_asn)
for _ in range(2):
# Extend the path far enough that it's smaller than any mapped
# range, and check the lookup holds there too.
spec_path = list(path)
while len(spec_path) < 32:
spec_path.append(random.getrandbits(1) != 0)
self.assertEqual(asmap.lookup(spec_path), old_asn)
self.assertEqual(patched.lookup(spec_path), new_asn)
# Search through the list of performed patches to find the last one
# applying to the extended path (note that patches is in reverse
# order, so the first match should work).
found = False
for patch_path, patch_asn in patches:
if spec_path[:len(patch_path)] == patch_path:
# When found, it must match whatever the result was patched
# to.
self.assertEqual(new_asn, patch_asn)
found = True
break
# And such a patch must exist.
self.assertTrue(found)
if __name__ == '__main__':
unittest.main()
| 0 |
bitcoin/contrib | bitcoin/contrib/seeds/README.md | # Seeds
Utility to generate the seeds.txt list that is compiled into the client
(see [src/chainparamsseeds.h](/src/chainparamsseeds.h) and other utilities in [contrib/seeds](/contrib/seeds)).
Be sure to update `PATTERN_AGENT` in `makeseeds.py` to include the current version,
and remove old versions as necessary (at a minimum when GetDesirableServiceFlags
changes its default return value, as those are the services which seeds are added
to addrman with).
The seeds compiled into the release are created from sipa's DNS seed and AS map
data. Run the following commands from the `/contrib/seeds` directory:
```
curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt
curl https://bitcoin.sipa.be/asmap-filled.dat > asmap-filled.dat
python3 makeseeds.py -a asmap-filled.dat -s seeds_main.txt > nodes_main.txt
cat nodes_main_manual.txt >> nodes_main.txt
python3 generate-seeds.py . > ../../src/chainparamsseeds.h
```
| 0 |
bitcoin/contrib | bitcoin/contrib/seeds/nodes_test.txt | # List of fixed seed nodes for testnet
# Onion nodes, last verified 2022-08 for minimal torv3 bootstrap support
24j74ahq6ed4wmfrghdwroyfzimlkhnrb7zh4zw3vl2allzxbjrhaqid.onion:18333
2fy74te65gm3c3gv3u5mhwdudvbdfh6k5fdz4gduimrltjjrxftbxrqd.onion:18333
2lsncqdflwk272dhydrxf7ikfy23ppnmm54dnynyxiym6lqf3wowrmqd.onion:18333
33o6qaidta7s2pmltet6vynd337vamgcifhh44rehwwxqpflcjt2njid.onion:18333
3oo6bsc5mvf6a6ypmoaikilta6ka7mbdhdwhrnqhuhjlbaxyedvfvaqd.onion:18333
3pe3fyklipy4sppkkgnhc22kcxtt57uler5kv72t676bbrwmcseo5qad.onion:18333
4u4mcz2sfvxs7pwcwncswgmmcdzqtzjx7ztfo332jv4pqucb22ikdhad.onion:18333
5v3i2kfqiqwp75gznjoptss7qgrcgseceqxpzpqkd34qeqzrg726i7id.onion:18333
5zlrxk6q24t4vz5k4ie7gtuasdjavhoelhinzimxbfhc77u7vafipsid.onion:18333
67s3af64ehw7xnxv422axm7tns4d6kutrftc6bjq375n74q3kj4pp7ad.onion:18333
6a4ony53julvnufo632ktgmwvhupz63wbdwx7n7qudjy32qyq6gm3bqd.onion:18333
6ftyg3nhc6tn2hyzls6zfdsfbroczhkxtdqumqb5q4yafhy5rdpapbid.onion:18333
7554uw5djruh34j5ddx3iprzgqgzypcjtptwoldymfbgoywqcw2wiwyd.onion:18333
766lozlabxaqjpbqsvt6sn3c65n6gkwwhoxyvggj7nfwnmw4cpaoccad.onion:18333
7blv5abnytdf47yvbhxmykprmvjryqob65i2jmdwq3rrajcn2iiysbqd.onion:18333
7v2ja4igx4v5y2jr6jrr6gaxohjhlzhvgwe4avlraxchozf7ea3kruqd.onion:18333
7zgbmtzxow2oevd5aaqtsormw7ujv4zprl3oi2355immhq4gk7cyw5ad.onion:18333
adstabjz7ec2y3jt4w2dvummowzv7g6m2f3kajeejffuaz7ojwj6epqd.onion:18333
aesy6tfufadkut6flu2bsqgnw2422ur2ynjalguxlzuzuktg3zehttqd.onion:18333
alxo32b5edi3bn2e224qrgytgxxpic4knyipvpdvctfsrvcaiq5lgeyd.onion:18333
aoeart34umoonvd2kbqr3bc4sweu6a4msh2gp4skyqvei3shzcxbgmyd.onion:18333
aprzvj7hgctsde4mkj3ewq35gvykspjvkqiygg7bpnw5tkvse2n7rhid.onion:18333
awpk6z3xghx6ozouhodcydaqtr6uzzbnw4creuix7mkupxoxlmhhspad.onion:18333
ayynqazucyh2jd5rehcfggmhunqpdwzlbhzbqgy6lj4ctz2ocj7chpid.onion:18333
b2ika53aqckv4gs7wmog3byrea2vfzm5p7ye33digcsmvvnpbyqmzoyd.onion:18333
be7zx3hh6dlahorlvsrrgqm4oahfrgqm2tbwnbd4u53ntu5f765n6hyd.onion:18333
bluk62wj24bsvdwh47muo54hhwsatkftiqxevt5kba7hstjoex6ueeyd.onion:18333
bubm6fiopfzkxqrfx6vqpioe5ahlhyubz57ogsqqy4ha5pnngiqlh6id.onion:18333
d3czabzjj57lgrsr5gawkjd7v3gznrqa7zyizqmk4lryascavmipnyad.onion:18333
ddj4cuvb32ve5chtp6jattcdnnmxmpoofjthzi7thgxxht7yqoetj3yd.onion:18333
dqhhlssfwmh3g6zhwxpcfbw64xz5rfikcglinbhoxv5ajv4qzicjyeid.onion:18333
drthcyb4x4rdfekw5g7xjogxi7aqoluilgulbgwvsme3nw3oibvchbad.onion:18333
dwb47cmqa2tjpmvjaear7gdcars2lez6niefhi4qf22qehtyta6577qd.onion:18333
e7tkrf54ng3q5vcn5gn77zwjwm74lkfav4mwdux3pvon6yvqg3tf46qd.onion:18333
etuymy47s3quepvdaoo72i5e5mc7uovrzu5m4jf5q6mwlwizoxy4xgid.onion:18333
fbimesnyhzubbzqc3uaufzkbyfmnkxvypoxaveaub7rzpzh2foxrn2yd.onion:18333
fzbrwmgwmko7quelrhfuskt3ijabac76zx7g52dfrevmhdkj6ivh7qyd.onion:18333
gy6nih4pmp5esyvvnhlj6qvk7zkbjuoswkxffyiip3dbkvsfxwz5zcqd.onion:18333
ha62ziqzqdogd75zg7lfh4fqrg3bim3cpqzyupo43w5pw4fen6nr2pyd.onion:18333
hacjjgj2mbqqrthzimmi6anvin7dljjhfl3ik6ebg3w3nmgsvr3ymmqd.onion:18333
hbkp5xwpqo4qm75kpglfrclyiuuvdgv7mtiqfys7oqks4dmpqgpeoeid.onion:18333
hqgoy62hoqjmz37brdfvoeov3cix5fixbqjoert4ydr6herg5oc3iwyd.onion:18333
hvbmmzvqrpgps2x5u4ip4ksf3e5m2fneac754gtnhjn2rsevni6cz3ad.onion:18333
hw3vzp32w4h6giplue6ix445oi6wt7gmeksrznb7tdfwhkgit7gnbbad.onion:18333
iddr66ewkhenivapgianudjkwqcp6dxtssg7ixrdot5az6uh7m5tmjqd.onion:18333
imya36iexiiiqrkwuxxcehnv4kg5shtirwd2vg4cnjy6lfjlph3fusqd.onion:18333
iuhhuocns7entrzlxsxktyz2ibs7hqgiggv6sauzqkzka6laslwz7oqd.onion:18333
ji5wmshokuc63eiulzlwj2zdvnligvrwfvvc76bice3tu43wfzvpmkyd.onion:18333
jjfuyj7krgzkmpxvn3b2j2hwlzkmze3ezy3ifwk7dnswwawgmzqhjrqd.onion:18333
jn2p4sgfphkxpow7kjrubrbqat77kkibzqkvuwhxyalcrazwmcqeaqyd.onion:18333
jrveyz4us6sog6e6czsvr5mvvhgzjgv4idbe4idrolmqeudvt5a2dgid.onion:18333
jsc4frvvnl2d3bhzyofsc72xpztgm23nl4fnb4dwkzsxr6fhij2q5iyd.onion:18333
klymxdvje7kccv3tznabo3udopsftkmjemkbi2urqxjm4hefaudejjyd.onion:18333
kwjxlauwjtecjfsiwopbl5pvn5n6z5rz76uk6osmlurd3uyuymcw7aid.onion:18333
lc7upz2srw2yhpcvwg4afy64ylcoo6mfwlttqj5ovuglqnhnohpi5iqd.onion:18333
lf3mpxfyjuovcqdvinl52pvdmmda6xqyfeiarlfamdjpgy3ouzmmlbyd.onion:18333
mc7k47ndjvvhcgs54wmjzxvate4rtuybbjoryikdssjhcxlx27psbyqd.onion:18333
mjbg3ggeuelmc7ixty3zjccyo2urg2uyherfqe7ytkm2ejkwlec7h6ad.onion:18333
nkyqozv6kdwi423s7s2mezzguf5bafot2a3hv4ed2dbvtblisdmad4qd.onion:18333
nvvqo4xxiwgb3y246jmcbuuveurfdq2zs3a5y7veqkeqv5jfhang7gyd.onion:18333
o6vfovqxz3oxszfppczpjejwouobztjrgvfojc3emvhan3bkyskzhuad.onion:18333
oaiw2lnhzgp5ry7ivzneuufmh7lfploquu2rjv5rozmlbefedsnxe5qd.onion:18333
oln7ybci53wk4g5n42nipyixvyjxbludsbrfsmhnirb6tk7ovlikd5id.onion:18333
otmfnhc6wrrbf2tpdy6zkisqc3r3urnsuowsnmatoto6yixaocnkseid.onion:18333
ovc6sajbqfcbwv3wrq7ylklu6q6prvisz4jr4lyycn4kgukzjfe4mjad.onion:18333
pm57didyzg5ljuvn5ufr5uun2iencuk3af2gzqc5zvgfh452c3rxtjyd.onion:18333
pmismhpwug34gnqzbutranvx2wjwbshyqj4un2dyzyuvak2eh55psfyd.onion:18333
polarisultijjhaku6z6u7jyboho5epdsg44ttebfaxmgau2z5sqolad.onion:18333
qe2jbe447he6panfvpyqhyntf7346gmuf55bxrmdzggmgwyjsyknhxyd.onion:18333
qz6yd5lsgdajcteoareeptwnipxsezyx5kks6ukpk5tvqinffzunqmyd.onion:18333
rp6pn3b3oesyr2giolbysbjhqeugxntsu7crnkth4y33ok4zvcl7yrqd.onion:18333
ujdchuw3hz5gkbouiv4p6pwbfdn7v4k6gluwvd4wiukqc7y7ow754uad.onion:18333
vctlwaqgmu53eutz2hewuakcipfgtyljsd7czut4dd62xr3rp6fqezad.onion:18333
vf5ur53tzmdtotvkndcgochklnuav7quqjvkc6mctqfvef6wnmn26mid.onion:18333
wnxgjgjgplv5iu4mssyuunycvku4qnqr5t4q6cfdt47k7uwrfifuirad.onion:18333
wpkbkdr7clw7zk3jkwiult6bf422j54u77ml4rgig2xq7icogyrcspid.onion:18333
wzpdt24tdark26eugredddorik3tqwcj5ialtt2yim4ceiuiq7phkyqd.onion:18333
xgapnikkbldoggjh5ewxkyauhuwnvf3xkspxroe3ojvfrk4lswkyx5yd.onion:18333
xkvzdhcirontixbq6pjhru57bf4sgtqylvphk25csfrsy5p5ay3oc3yd.onion:18333
xnipauenw5wnjb2zbx6v6umgvbb3g6xhf5kjo7pnyn5tdzvzaxtzicid.onion:18333
yda7kwpii33j2qpq32ftf6lp22znknswipjwaccvsqj7l337jvfesnid.onion:18333
z3j5foswuhpmtrg3kb56stkzmuoaesvd5jz3eztq46c4cidapglcyuad.onion:18333
zcep44k7unwjm2wxty4ijh2e4fv5zgbrvwlctzyaqnrqhltjfzrtodad.onion:18333
zmvizz7fd5hdue6wt3lwqumd6qwt4ijymmmotfzh75curq3mzjm53hyd.onion:18333
zoaa3x7quyuijggii5zl4uyeioodudsgtr2uyv2qtdsslac5ukiwlxid.onion:18333
zovauxlorl5eswumbsoxv2m5y3sm3qlk7657dcpr2uld7xf35en46sqd.onion:18333
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/gen-bitcoin-conf.sh | #!/usr/bin/env bash
# Copyright (c) 2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)}
BUILDDIR=${BUILDDIR:-$TOPDIR}
BINDIR=${BINDIR:-$BUILDDIR/src}
BITCOIND=${BITCOIND:-$BINDIR/bitcoind}
SHARE_EXAMPLES_DIR=${SHARE_EXAMPLES_DIR:-$TOPDIR/share/examples}
EXAMPLE_CONF_FILE=${EXAMPLE_CONF_FILE:-$SHARE_EXAMPLES_DIR/bitcoin.conf}
[ ! -x "$BITCOIND" ] && echo "$BITCOIND not found or not executable." && exit 1
DIRTY=""
VERSION_OUTPUT=$($BITCOIND --version)
if [[ $VERSION_OUTPUT == *"dirty"* ]]; then
DIRTY="${DIRTY}${BITCOIND}\n"
fi
if [ -n "$DIRTY" ]
then
echo -e "WARNING: $BITCOIND was built from a dirty tree.\n"
echo -e "To safely generate a bitcoin.conf file, please commit your changes to $BITCOIND, rebuild, then run this script again.\n"
fi
echo 'Generating example bitcoin.conf file in share/examples/'
# create the directory, if it doesn't exist
mkdir -p "${SHARE_EXAMPLES_DIR}"
# create the header text
cat > "${EXAMPLE_CONF_FILE}" << 'EOF'
##
## bitcoin.conf configuration file.
## Generated by contrib/devtools/gen-bitcoin-conf.sh.
##
## Lines beginning with # are comments.
## All possible configuration options are provided. To use, copy this file
## to your data directory (default or specified by -datadir), uncomment
## options you would like to change, and save the file.
##
### Options
EOF
# parse the output from bitcoind --help
# adding newlines is a bit funky to ensure portability for BSD
# see here for more details: https://stackoverflow.com/a/24575385
${BITCOIND} --help \
| sed '1,/Print this help message and exit/d' \
| sed -E 's/^[[:space:]]{2}\-/#/' \
| sed -E 's/^[[:space:]]{7}/# /' \
| sed -E '/[=[:space:]]/!s/#.*$/&=1/' \
| awk '/^#[a-z]/{x=$0;next}{if (NF==0) print x"\n",x="";else print}' \
| sed 's,\(^[[:upper:]].*\)\:$,\
### \1,' \
| sed 's/[[:space:]]*$//' >> "${EXAMPLE_CONF_FILE}"
# create the footer text
cat >> "${EXAMPLE_CONF_FILE}" << 'EOF'
# [Sections]
# Most options will apply to all networks. To confine an option to a specific
# network, add it under the relevant section below.
#
# Note: If not specified under a network section, the options addnode, connect,
# port, bind, rpcport, rpcbind, and wallet will only apply to mainnet.
# Options for mainnet
[main]
# Options for testnet
[test]
# Options for signet
[signet]
# Options for regtest
[regtest]
EOF
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/split-debug.sh.in | #!/bin/sh
set -e
if [ $# -ne 3 ];
then echo "usage: $0 <input> <stripped-binary> <debug-binary>"
fi
@OBJCOPY@ --enable-deterministic-archives -p --only-keep-debug $1 $3
@OBJCOPY@ --enable-deterministic-archives -p --strip-debug $1 $2
@STRIP@ --enable-deterministic-archives -p -s $2
@OBJCOPY@ --enable-deterministic-archives -p --add-gnu-debuglink=$3 $2
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/test-symbol-check.py | #!/usr/bin/env python3
# Copyright (c) 2020-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for symbol-check.py
'''
import os
import subprocess
import unittest
from utils import determine_wellknown_cmd
def call_symbol_check(cc: list[str], source, executable, options):
# This should behave the same as AC_TRY_LINK, so arrange well-known flags
# in the same order as autoconf would.
#
# See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for
# reference.
env_flags: list[str] = []
for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']:
env_flags += filter(None, os.environ.get(var, '').split(' '))
subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True)
p = subprocess.run([os.path.join(os.path.dirname(__file__), 'symbol-check.py'), executable], stdout=subprocess.PIPE, text=True)
os.remove(source)
os.remove(executable)
return (p.returncode, p.stdout.rstrip())
def get_machine(cc: list[str]):
p = subprocess.run([*cc,'-dumpmachine'], stdout=subprocess.PIPE, text=True)
return p.stdout.rstrip()
class TestSymbolChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
# -lutil is part of the libc6 package so a safe bet that it's installed
# it's also out of context enough that it's unlikely to ever become a real dependency
source = 'test2.c'
executable = 'test2'
with open(source, 'w', encoding="utf8") as f:
f.write('''
#include <utmp.h>
int main()
{
login(0);
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lutil']),
(1, executable + ': libutil.so.1 is not in ALLOWED_LIBRARIES!\n' +
executable + ': failed LIBRARY_DEPENDENCIES'))
# finally, check a simple conforming binary
source = 'test3.c'
executable = 'test3'
with open(source, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("42");
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, []),
(0, ''))
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'clang')
with open(source, 'w', encoding="utf8") as f:
f.write('''
#include <expat.h>
int main()
{
XML_ExpatVersion();
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']),
(1, 'libexpat.1.dylib is not in ALLOWED_LIBRARIES!\n' +
f'{executable}: failed DYNAMIC_LIBRARIES MIN_OS SDK'))
source = 'test2.c'
executable = 'test2'
with open(source, 'w', encoding="utf8") as f:
f.write('''
#include <CoreGraphics/CoreGraphics.h>
int main()
{
CGMainDisplayID();
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics', '-Wl,-platform_version','-Wl,macos', '-Wl,11.4', '-Wl,11.4']),
(1, f'{executable}: failed MIN_OS SDK'))
source = 'test3.c'
executable = 'test3'
with open(source, 'w', encoding="utf8") as f:
f.write('''
int main()
{
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,-platform_version','-Wl,macos', '-Wl,11.0', '-Wl,11.4']),
(1, f'{executable}: failed SDK'))
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc')
with open(source, 'w', encoding="utf8") as f:
f.write('''
#include <pdh.h>
int main()
{
PdhConnectMachineA(NULL);
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lpdh', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']),
(1, 'pdh.dll is not in ALLOWED_LIBRARIES!\n' +
executable + ': failed DYNAMIC_LIBRARIES'))
source = 'test2.c'
executable = 'test2.exe'
with open(source, 'w', encoding="utf8") as f:
f.write('''
int main()
{
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,--major-subsystem-version', '-Wl,9', '-Wl,--minor-subsystem-version', '-Wl,9']),
(1, executable + ': failed SUBSYSTEM_VERSION'))
source = 'test3.c'
executable = 'test3.exe'
with open(source, 'w', encoding="utf8") as f:
f.write('''
#include <combaseapi.h>
int main()
{
CoFreeUnusedLibrariesEx(0,0);
return 0;
}
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lole32', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']),
(0, ''))
if __name__ == '__main__':
unittest.main()
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/copyright_header.py | #!/usr/bin/env python3
# Copyright (c) 2016-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# auto generated:
'src/qt/bitcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/reverse_iterator.h',
'src/test/fuzz/FuzzedDataProvider.h',
'src/tinyformat.h',
'src/bench/nanobench.h',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
EXCLUDE_DIRS = [
# git subtrees
"src/crypto/ctaes/",
"src/leveldb/",
"src/minisketch",
"src/secp256k1/",
"src/crc32c/",
]
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.mm', '*.py', '*.sh', '*.bash-completion']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
for excluded_dir in EXCLUDE_DIRS:
if filename.startswith(excluded_dir):
return False
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files --full-name'.split(' ')
GIT_TOPLEVEL_CMD = 'git rev-parse --show-toplevel'.split(' ')
def call_git_ls(base_directory):
out = subprocess.check_output([*GIT_LS_CMD, base_directory])
return [f for f in out.decode("utf-8").split('\n') if f != '']
def call_git_toplevel():
"Returns the absolute path to the project root"
return subprocess.check_output(GIT_TOPLEVEL_CMD).strip().decode("utf-8")
def get_filenames_to_examine(base_directory):
"Returns an array of absolute paths to any project files in the base_directory that pass the include/exclude filters"
root = call_git_toplevel()
filenames = call_git_ls(base_directory)
return sorted([os.path.join(root, filename) for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = r'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile(r'%s %s,? %s( +\*)?\n' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
r"Satoshi Nakamoto",
r"The Bitcoin Core developers",
r"BitPay Inc\.",
r"University of Illinois at Urbana-Champaign\.",
r"Pieter Wuille",
r"Wladimir J\. van der Laan",
r"Jeff Garzik",
r"Jan-Klaas Kollhof",
r"ArtForz -- public domain half-a-node",
r"Intel Corporation ?",
r"The Zcash developers",
r"Jeremy Rubin",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(filename, 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
filenames = get_filenames_to_examine(base_directory)
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
with open(filename, 'r', encoding="utf8") as f:
file_lines = f.readlines()
return file_lines
def write_file_lines(filename, file_lines):
with open(filename, 'w', encoding="utf8") as f:
f.write(''.join(file_lines))
################################################################################
# update header years execution
################################################################################
COPYRIGHT = r'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year >= last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
for filename in get_filenames_to_examine(base_directory):
update_updatable_copyright(filename)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
SCRIPT_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_script_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(SCRIPT_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index is not None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_script_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_script_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
file_lines.insert(0, '\n')
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style in ['python', 'shell']:
insert_script_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the bitcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py', '.sh']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
elif extension == '.sh':
style = 'shell'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/clang-format-diff.py | #!/usr/bin/env python3
#
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License.
#
# ============================================================
#
# University of Illinois/NCSA
# Open Source License
#
# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign.
# All rights reserved.
#
# Developed by:
#
# LLVM Team
#
# University of Illinois at Urbana-Champaign
#
# http://llvm.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of the LLVM Team, University of Illinois at
# Urbana-Champaign, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
# SOFTWARE.
#
# ============================================================
#
#===------------------------------------------------------------------------===#
r"""
ClangFormat Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-format-diff.py -p1 -i
svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i
"""
import argparse
import difflib
import io
import re
import subprocess
import sys
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
def main():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto'
r'|protodevel|java)',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument('-sort-includes', action='store_true', default=False,
help='let clang-format sort include blocks')
parser.add_argument('-v', '--verbose', action='store_true',
help='be more verbose, ineffective without -i')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename is None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search(r'^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
# Reformat files containing changes in place.
for filename, lines in lines_by_file.items():
if args.i and args.verbose:
print('Formatting {}'.format(filename))
command = [binary, filename]
if args.i:
command.append('-i')
if args.sort_includes:
command.append('-sort-includes')
command.extend(lines)
command.extend(['-style=file', '-fallback-style=none'])
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=None,
stdin=subprocess.PIPE,
text=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
if not args.i:
with open(filename, encoding="utf8") as f:
code = f.readlines()
formatted_code = io.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = ''.join(diff)
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
main()
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/symbol-check.py | #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that release executables only contain certain symbols
and are only linked against allowed libraries.
Example usage:
find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import sys
import lief
# Debian 10 (Buster) EOL: 2024. https://wiki.debian.org/LTS
#
# - libgcc version 8.3.0 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libgcc1)
# - libc version 2.28 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 18.04 (Bionic) EOL: 2028. https://wiki.ubuntu.com/ReleaseTeam
#
# - libgcc version 8.4.0 (https://packages.ubuntu.com/bionic/libgcc1)
# - libc version 2.27 (https://packages.ubuntu.com/bionic/libc6)
#
# CentOS Stream 8 EOL: 2024. https://wiki.centos.org/About/Product
#
# - libgcc version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
# - libc version 2.28 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
#
# See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info.
MAX_VERSIONS = {
'GCC': (4,3,0),
'GLIBC': {
lief.ELF.ARCH.x86_64: (2,27),
lief.ELF.ARCH.ARM: (2,27),
lief.ELF.ARCH.AARCH64:(2,27),
lief.ELF.ARCH.PPC64: (2,27),
lief.ELF.ARCH.RISCV: (2,27),
},
'LIBATOMIC': (1,0),
'V': (0,5,0), # xkb (bitcoin-qt only)
}
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'environ', '_environ', '__environ', '_fini', '_init', 'stdin',
'stdout', 'stderr',
}
# Expected linker-loader names can be found here:
# https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16
ELF_INTERPRETER_NAMES: dict[lief.ELF.ARCH, dict[lief.ENDIANNESS, str]] = {
lief.ELF.ARCH.x86_64: {
lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2",
},
lief.ELF.ARCH.ARM: {
lief.ENDIANNESS.LITTLE: "/lib/ld-linux-armhf.so.3",
},
lief.ELF.ARCH.AARCH64: {
lief.ENDIANNESS.LITTLE: "/lib/ld-linux-aarch64.so.1",
},
lief.ELF.ARCH.PPC64: {
lief.ENDIANNESS.BIG: "/lib64/ld64.so.1",
lief.ENDIANNESS.LITTLE: "/lib64/ld64.so.2",
},
lief.ELF.ARCH.RISCV: {
lief.ENDIANNESS.LITTLE: "/lib/ld-linux-riscv64-lp64d.so.1",
},
}
ELF_ABIS: dict[lief.ELF.ARCH, dict[lief.ENDIANNESS, list[int]]] = {
lief.ELF.ARCH.x86_64: {
lief.ENDIANNESS.LITTLE: [3,2,0],
},
lief.ELF.ARCH.ARM: {
lief.ENDIANNESS.LITTLE: [3,2,0],
},
lief.ELF.ARCH.AARCH64: {
lief.ENDIANNESS.LITTLE: [3,7,0],
},
lief.ELF.ARCH.PPC64: {
lief.ENDIANNESS.LITTLE: [3,10,0],
lief.ENDIANNESS.BIG: [3,2,0],
},
lief.ELF.ARCH.RISCV: {
lief.ENDIANNESS.LITTLE: [4,15,0],
},
}
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld64.so.1', # POWER64 ABIv1 dynamic linker
'ld64.so.2', # POWER64 ABIv2 dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# bitcoin-qt only
'libxcb.so.1', # part of X11
'libxkbcommon.so.0', # keyboard keymapping
'libxkbcommon-x11.so.0', # keyboard keymapping
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2', # programming interface to dynamic linker
'libxcb-icccm.so.4',
'libxcb-image.so.0',
'libxcb-shm.so.0',
'libxcb-keysyms.so.1',
'libxcb-randr.so.0',
'libxcb-render-util.so.0',
'libxcb-render.so.0',
'libxcb-shape.so.0',
'libxcb-sync.so.1',
'libxcb-xfixes.so.0',
'libxcb-xinerama.so.0',
'libxcb-xkb.so.1',
}
MACHO_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# bitcoin-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'ColorSync',
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'CoreVideo', # video processing
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'IOSurface', # cross process image/drawing buffers
'libobjc.A.dylib', # Objective-C runtime library
'Metal', # 3D graphics
'Security', # access control and authentication
'QuartzCore', # animation
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'WS2_32.dll', # sockets
# bitcoin-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'NETAPI32.dll', # network management
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'USER32.dll', # user interface
'USERENV.dll', # user management
'UxTheme.dll', # visual style
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
'WTSAPI32.dll', # Remote Desktop
}
def check_version(max_versions, version, arch) -> bool:
(lib, _, ver) = version.rpartition('_')
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
if isinstance(max_versions[lib], tuple):
return ver <= max_versions[lib]
else:
return ver <= max_versions[lib][arch]
def check_imported_symbols(binary) -> bool:
ok: bool = True
for symbol in binary.imported_symbols:
if not symbol.imported:
continue
version = symbol.symbol_version if symbol.has_version else None
if version:
aux_version = version.symbol_version_auxiliary.name if version.has_auxiliary_version else None
if aux_version and not check_version(MAX_VERSIONS, aux_version, binary.header.machine_type):
print(f'{filename}: symbol {symbol.name} from unsupported version {version}')
ok = False
return ok
def check_exported_symbols(binary) -> bool:
ok: bool = True
for symbol in binary.dynamic_symbols:
if not symbol.exported:
continue
name = symbol.name
if binary.header.machine_type == lief.ELF.ARCH.RISCV or name in IGNORE_EXPORTS:
continue
print(f'{binary.name}: export of symbol {name} not allowed!')
ok = False
return ok
def check_ELF_libraries(binary) -> bool:
ok: bool = True
for library in binary.libraries:
if library not in ELF_ALLOWED_LIBRARIES:
print(f'{filename}: {library} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_MACHO_libraries(binary) -> bool:
ok: bool = True
for dylib in binary.libraries:
split = dylib.name.split('/')
if split[-1] not in MACHO_ALLOWED_LIBRARIES:
print(f'{split[-1]} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_MACHO_min_os(binary) -> bool:
if binary.build_version.minos == [11,0,0]:
return True
return False
def check_MACHO_sdk(binary) -> bool:
if binary.build_version.sdk == [14, 0, 0]:
return True
return False
def check_MACHO_ld64(binary) -> bool:
if binary.build_version.tools[0].version == [711, 0, 0]:
return True
return False
def check_PE_libraries(binary) -> bool:
ok: bool = True
for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_PE_subsystem_version(binary) -> bool:
major: int = binary.optional_header.major_subsystem_version
minor: int = binary.optional_header.minor_subsystem_version
if major == 6 and minor == 1:
return True
return False
def check_ELF_interpreter(binary) -> bool:
expected_interpreter = ELF_INTERPRETER_NAMES[binary.header.machine_type][binary.abstract.header.endianness]
return binary.concrete.interpreter == expected_interpreter
def check_ELF_ABI(binary) -> bool:
expected_abi = ELF_ABIS[binary.header.machine_type][binary.abstract.header.endianness]
note = binary.concrete.get(lief.ELF.NOTE_TYPES.ABI_TAG)
assert note.details.abi == lief.ELF.NOTE_ABIS.LINUX
return note.details.version == expected_abi
CHECKS = {
lief.EXE_FORMATS.ELF: [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries),
('INTERPRETER_NAME', check_ELF_interpreter),
('ABI', check_ELF_ABI),
],
lief.EXE_FORMATS.MACHO: [
('DYNAMIC_LIBRARIES', check_MACHO_libraries),
('MIN_OS', check_MACHO_min_os),
('SDK', check_MACHO_sdk),
('LD64', check_MACHO_ld64),
],
lief.EXE_FORMATS.PE: [
('DYNAMIC_LIBRARIES', check_PE_libraries),
('SUBSYSTEM_VERSION', check_PE_subsystem_version),
]
}
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
binary = lief.parse(filename)
etype = binary.format
if etype == lief.EXE_FORMATS.UNKNOWN:
print(f'{filename}: unknown executable format')
retval = 1
continue
failed: list[str] = []
for (name, func) in CHECKS[etype]:
if not func(binary):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
retval = 1
except IOError:
print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/test_utxo_snapshots.sh | #!/usr/bin/env bash
# Demonstrate the creation and usage of UTXO snapshots.
#
# A server node starts up, IBDs up to a certain height, then generates a UTXO
# snapshot at that point.
#
# The server then downloads more blocks (to create a diff from the snapshot).
#
# We bring a client up, load the UTXO snapshot, and we show the client sync to
# the "network tip" and then start a background validation of the snapshot it
# loaded. We see the background validation chainstate removed after validation
# completes.
#
# The shellcheck rule SC2086 (quoted variables) disablements are necessary
# since this rule needs to be violated in order to get bitcoind to pick up on
# $EARLY_IBD_FLAGS for the script to work.
export LC_ALL=C
set -e
BASE_HEIGHT=${1:-30000}
INCREMENTAL_HEIGHT=20000
FINAL_HEIGHT=$((BASE_HEIGHT + INCREMENTAL_HEIGHT))
SERVER_DATADIR="$(pwd)/utxodemo-data-server-$BASE_HEIGHT"
CLIENT_DATADIR="$(pwd)/utxodemo-data-client-$BASE_HEIGHT"
UTXO_DAT_FILE="$(pwd)/utxo.$BASE_HEIGHT.dat"
# Chosen to try to not interfere with any running bitcoind processes.
SERVER_PORT=8633
SERVER_RPC_PORT=8632
CLIENT_PORT=8733
CLIENT_RPC_PORT=8732
SERVER_PORTS="-port=${SERVER_PORT} -rpcport=${SERVER_RPC_PORT}"
CLIENT_PORTS="-port=${CLIENT_PORT} -rpcport=${CLIENT_RPC_PORT}"
# Ensure the client exercises all indexes to test that snapshot use works
# properly with indexes.
ALL_INDEXES="-txindex -coinstatsindex -blockfilterindex=1"
if ! command -v jq >/dev/null ; then
echo "This script requires jq to parse JSON RPC output. Please install it."
echo "(e.g. sudo apt install jq)"
exit 1
fi
DUMP_OUTPUT="dumptxoutset-output-$BASE_HEIGHT.json"
finish() {
echo
echo "Killing server and client PIDs ($SERVER_PID, $CLIENT_PID) and cleaning up datadirs"
echo
rm -f "$UTXO_DAT_FILE" "$DUMP_OUTPUT"
rm -rf "$SERVER_DATADIR" "$CLIENT_DATADIR"
kill -9 "$SERVER_PID" "$CLIENT_PID"
}
trap finish EXIT
# Need to specify these to trick client into accepting server as a peer
# it can IBD from, otherwise the default values prevent IBD from the server node.
EARLY_IBD_FLAGS="-maxtipage=9223372036854775207 -minimumchainwork=0x00"
server_rpc() {
./src/bitcoin-cli -rpcport=$SERVER_RPC_PORT -datadir="$SERVER_DATADIR" "$@"
}
client_rpc() {
./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir="$CLIENT_DATADIR" "$@"
}
server_sleep_til_boot() {
while ! server_rpc ping >/dev/null 2>&1; do sleep 0.1; done
}
client_sleep_til_boot() {
while ! client_rpc ping >/dev/null 2>&1; do sleep 0.1; done
}
server_sleep_til_shutdown() {
while server_rpc ping >/dev/null 2>&1; do sleep 0.1; done
}
mkdir -p "$SERVER_DATADIR" "$CLIENT_DATADIR"
echo "Hi, welcome to the assumeutxo demo/test"
echo
echo "We're going to"
echo
echo " - start up a 'server' node, sync it via mainnet IBD to height ${BASE_HEIGHT}"
echo " - create a UTXO snapshot at that height"
echo " - IBD ${INCREMENTAL_HEIGHT} more blocks on top of that"
echo
echo "then we'll demonstrate assumeutxo by "
echo
echo " - starting another node (the 'client') and loading the snapshot in"
echo " * first you'll have to modify the code slightly (chainparams) and recompile"
echo " * don't worry, we'll make it easy"
echo " - observing the client sync ${INCREMENTAL_HEIGHT} blocks on top of the snapshot from the server"
echo " - observing the client validate the snapshot chain via background IBD"
echo
read -p "Press [enter] to continue" _
echo
echo "-- Starting the demo. You might want to run the two following commands in"
echo " separate terminal windows:"
echo
echo " watch -n0.1 tail -n 30 $SERVER_DATADIR/debug.log"
echo " watch -n0.1 tail -n 30 $CLIENT_DATADIR/debug.log"
echo
read -p "Press [enter] to continue" _
echo
echo "-- IBDing the blocks (height=$BASE_HEIGHT) required to the server node..."
# shellcheck disable=SC2086
./src/bitcoind -logthreadnames=1 $SERVER_PORTS \
-datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -stopatheight="$BASE_HEIGHT" >/dev/null
echo
echo "-- Creating snapshot at ~ height $BASE_HEIGHT ($UTXO_DAT_FILE)..."
server_sleep_til_shutdown # wait for stopatheight to be hit
# shellcheck disable=SC2086
./src/bitcoind -logthreadnames=1 $SERVER_PORTS \
-datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -connect=0 -listen=0 >/dev/null &
SERVER_PID="$!"
server_sleep_til_boot
server_rpc dumptxoutset "$UTXO_DAT_FILE" > "$DUMP_OUTPUT"
cat "$DUMP_OUTPUT"
kill -9 "$SERVER_PID"
RPC_BASE_HEIGHT=$(jq -r .base_height < "$DUMP_OUTPUT")
RPC_AU=$(jq -r .txoutset_hash < "$DUMP_OUTPUT")
RPC_NCHAINTX=$(jq -r .nchaintx < "$DUMP_OUTPUT")
RPC_BLOCKHASH=$(jq -r .base_hash < "$DUMP_OUTPUT")
server_sleep_til_shutdown
echo
echo "-- Now: add the following to CMainParams::m_assumeutxo_data"
echo " in src/kernel/chainparams.cpp, and recompile:"
echo
echo " {${RPC_BASE_HEIGHT}, AssumeutxoHash{uint256S(\"0x${RPC_AU}\")}, ${RPC_NCHAINTX}, uint256S(\"0x${RPC_BLOCKHASH}\")},"
echo
echo
echo "-- IBDing more blocks to the server node (height=$FINAL_HEIGHT) so there is a diff between snapshot and tip..."
# shellcheck disable=SC2086
./src/bitcoind $SERVER_PORTS -logthreadnames=1 -datadir="$SERVER_DATADIR" \
$EARLY_IBD_FLAGS -stopatheight="$FINAL_HEIGHT" >/dev/null
echo
echo "-- Starting the server node to provide blocks to the client node..."
# shellcheck disable=SC2086
./src/bitcoind $SERVER_PORTS -logthreadnames=1 -debug=net -datadir="$SERVER_DATADIR" \
$EARLY_IBD_FLAGS -connect=0 -listen=1 >/dev/null &
SERVER_PID="$!"
server_sleep_til_boot
echo
echo "-- Okay, what you're about to see is the client starting up and activating the snapshot."
echo " I'm going to display the top 14 log lines from the client on top of an RPC called"
echo " getchainstates, which is like getblockchaininfo but for both the snapshot and "
echo " background validation chainstates."
echo
echo " You're going to first see the snapshot chainstate sync to the server's tip, then"
echo " the background IBD chain kicks in to validate up to the base of the snapshot."
echo
echo " Once validation of the snapshot is done, you should see log lines indicating"
echo " that we've deleted the background validation chainstate."
echo
echo " Once everything completes, exit the watch command with CTRL+C."
echo
read -p "When you're ready for all this, hit [enter]" _
echo
echo "-- Starting the client node to get headers from the server, then load the snapshot..."
# shellcheck disable=SC2086
./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" \
-connect=0 -addnode=127.0.0.1:$SERVER_PORT -debug=net $EARLY_IBD_FLAGS >/dev/null &
CLIENT_PID="$!"
client_sleep_til_boot
echo
echo "-- Initial state of the client:"
client_rpc getchainstates
echo
echo "-- Loading UTXO snapshot into client..."
client_rpc loadtxoutset "$UTXO_DAT_FILE"
watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat"
echo
echo "-- Okay, now I'm going to restart the client to make sure that the snapshot chain reloads "
echo " as the main chain properly..."
echo
echo " Press CTRL+C after you're satisfied to exit the demo"
echo
read -p "Press [enter] to continue"
client_sleep_til_boot
# shellcheck disable=SC2086
./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" -connect=0 \
-addnode=127.0.0.1:$SERVER_PORT "$EARLY_IBD_FLAGS" >/dev/null &
CLIENT_PID="$!"
client_sleep_til_boot
watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat"
echo
echo "-- Done!"
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/circular-dependencies.py | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys
import re
MAPPING = {
'core_read.cpp': 'core_io.cpp',
'core_write.cpp': 'core_io.cpp',
}
# Directories with header-based modules, where the assumption that .cpp files
# define functions and variables declared in corresponding .h files is
# incorrect.
HEADER_MODULE_PATHS = [
'interfaces/'
]
def module_name(path):
if path in MAPPING:
path = MAPPING[path]
if any(path.startswith(dirpath) for dirpath in HEADER_MODULE_PATHS):
return path
if path.endswith(".h"):
return path[:-2]
if path.endswith(".c"):
return path[:-2]
if path.endswith(".cpp"):
return path[:-4]
return None
files = dict()
deps: dict[str, set[str]] = dict()
RE = re.compile("^#include <(.*)>")
# Iterate over files, and create list of modules
for arg in sys.argv[1:]:
module = module_name(arg)
if module is None:
print("Ignoring file %s (does not constitute module)\n" % arg)
else:
files[arg] = module
deps[module] = set()
# Iterate again, and build list of direct dependencies for each module
# TODO: implement support for multiple include directories
for arg in sorted(files.keys()):
module = files[arg]
with open(arg, 'r', encoding="utf8") as f:
for line in f:
match = RE.match(line)
if match:
include = match.group(1)
included_module = module_name(include)
if included_module is not None and included_module in deps and included_module != module:
deps[module].add(included_module)
# Loop to find the shortest (remaining) circular dependency
have_cycle: bool = False
while True:
shortest_cycle = None
for module in sorted(deps.keys()):
# Build the transitive closure of dependencies of module
closure: dict[str, list[str]] = dict()
for dep in deps[module]:
closure[dep] = []
while True:
old_size = len(closure)
old_closure_keys = sorted(closure.keys())
for src in old_closure_keys:
for dep in deps[src]:
if dep not in closure:
closure[dep] = closure[src] + [src]
if len(closure) == old_size:
break
# If module is in its own transitive closure, it's a circular dependency; check if it is the shortest
if module in closure and (shortest_cycle is None or len(closure[module]) + 1 < len(shortest_cycle)):
shortest_cycle = [module] + closure[module]
if shortest_cycle is None:
break
# We have the shortest circular dependency; report it
module = shortest_cycle[0]
print("Circular dependency: %s" % (" -> ".join(shortest_cycle + [module])))
# And then break the dependency to avoid repeating in other cycles
deps[shortest_cycle[-1]] = deps[shortest_cycle[-1]] - set([module])
have_cycle = True
sys.exit(1 if have_cycle else 0)
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/test_deterministic_coverage.sh | #!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test for deterministic coverage across unit test runs.
export LC_ALL=C
# Use GCOV_EXECUTABLE="gcov" if compiling with gcc.
# Use GCOV_EXECUTABLE="llvm-cov gcov" if compiling with clang.
GCOV_EXECUTABLE="gcov"
# Disable tests known to cause non-deterministic behaviour and document the source or point of non-determinism.
NON_DETERMINISTIC_TESTS=(
"blockfilter_index_tests/blockfilter_index_initial_sync" # src/checkqueue.h: In CCheckQueue::Loop(): while (queue.empty()) { ... }
"coinselector_tests/knapsack_solver_test" # coinselector_tests.cpp: if (equal_sets(setCoinsRet, setCoinsRet2))
"fs_tests/fsbridge_fstream" # deterministic test failure?
"miner_tests/CreateNewBlock_validity" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"scheduler_tests/manythreads" # scheduler.cpp: CScheduler::serviceQueue()
"scheduler_tests/singlethreadedscheduler_ordered" # scheduler.cpp: CScheduler::serviceQueue()
"txvalidationcache_tests/checkinputs_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"txvalidationcache_tests/tx_mempool_block_doublespend" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"txindex_tests/txindex_initial_sync" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"txvalidation_tests/tx_mempool_reject_coinbase" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"validation_block_tests/processnewblock_signals_ordering" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"wallet_tests/coin_mark_dirty_immature_credit" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"wallet_tests/dummy_input_size_test" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"wallet_tests/importmulti_rescan" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"wallet_tests/importwallet_rescan" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"wallet_tests/ListCoins" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"wallet_tests/scan_for_wallet_transactions" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
"wallet_tests/wallet_disableprivkeys" # validation.cpp: if (GetMainSignals().CallbacksPending() > 10)
)
TEST_BITCOIN_BINARY="src/test/test_bitcoin"
print_usage() {
echo "Usage: $0 [custom test filter (default: all but known non-deterministic tests)] [number of test runs (default: 2)]"
}
N_TEST_RUNS=2
BOOST_TEST_RUN_FILTERS=""
if [[ $# != 0 ]]; then
if [[ $1 == "--help" ]]; then
print_usage
exit
fi
PARSED_ARGUMENTS=0
if [[ $1 =~ [a-z] ]]; then
BOOST_TEST_RUN_FILTERS=$1
PARSED_ARGUMENTS=$((PARSED_ARGUMENTS + 1))
shift
fi
if [[ $1 =~ ^[0-9]+$ ]]; then
N_TEST_RUNS=$1
PARSED_ARGUMENTS=$((PARSED_ARGUMENTS + 1))
shift
fi
if [[ ${PARSED_ARGUMENTS} == 0 || $# -gt 2 || ${N_TEST_RUNS} -lt 2 ]]; then
print_usage
exit
fi
fi
if [[ ${BOOST_TEST_RUN_FILTERS} == "" ]]; then
BOOST_TEST_RUN_FILTERS="$(IFS=":"; echo "!${NON_DETERMINISTIC_TESTS[*]}" | sed 's/:/:!/g')"
else
echo "Using Boost test filter: ${BOOST_TEST_RUN_FILTERS}"
echo
fi
if ! command -v gcov > /dev/null; then
echo "Error: gcov not installed. Exiting."
exit 1
fi
if ! command -v gcovr > /dev/null; then
echo "Error: gcovr not installed. Exiting."
exit 1
fi
if [[ ! -e ${TEST_BITCOIN_BINARY} ]]; then
echo "Error: Executable ${TEST_BITCOIN_BINARY} not found. Run \"./configure --enable-lcov\" and compile."
exit 1
fi
get_file_suffix_count() {
find src/ -type f -name "*.$1" | wc -l
}
if [[ $(get_file_suffix_count gcno) == 0 ]]; then
echo "Error: Could not find any *.gcno files. The *.gcno files are generated by the compiler. Run \"./configure --enable-lcov\" and re-compile."
exit 1
fi
get_covr_filename() {
echo "gcovr.run-$1.txt"
}
TEST_RUN_ID=0
while [[ ${TEST_RUN_ID} -lt ${N_TEST_RUNS} ]]; do
TEST_RUN_ID=$((TEST_RUN_ID + 1))
echo "[$(date +"%Y-%m-%d %H:%M:%S")] Measuring coverage, run #${TEST_RUN_ID} of ${N_TEST_RUNS}"
find src/ -type f -name "*.gcda" -exec rm {} \;
if [[ $(get_file_suffix_count gcda) != 0 ]]; then
echo "Error: Stale *.gcda files found. Exiting."
exit 1
fi
TEST_OUTPUT_TEMPFILE=$(mktemp)
if ! BOOST_TEST_RUN_FILTERS="${BOOST_TEST_RUN_FILTERS}" ${TEST_BITCOIN_BINARY} > "${TEST_OUTPUT_TEMPFILE}" 2>&1; then
cat "${TEST_OUTPUT_TEMPFILE}"
rm "${TEST_OUTPUT_TEMPFILE}"
exit 1
fi
rm "${TEST_OUTPUT_TEMPFILE}"
if [[ $(get_file_suffix_count gcda) == 0 ]]; then
echo "Error: Running the test suite did not create any *.gcda files. The gcda files are generated when the instrumented test programs are executed. Run \"./configure --enable-lcov\" and re-compile."
exit 1
fi
GCOVR_TEMPFILE=$(mktemp)
if ! gcovr --gcov-executable "${GCOV_EXECUTABLE}" -r src/ > "${GCOVR_TEMPFILE}"; then
echo "Error: gcovr failed. Output written to ${GCOVR_TEMPFILE}. Exiting."
exit 1
fi
GCOVR_FILENAME=$(get_covr_filename ${TEST_RUN_ID})
mv "${GCOVR_TEMPFILE}" "${GCOVR_FILENAME}"
if grep -E "^TOTAL *0 *0 " "${GCOVR_FILENAME}"; then
echo "Error: Spurious gcovr output. Make sure the correct GCOV_EXECUTABLE variable is set in $0 (\"gcov\" for gcc, \"llvm-cov gcov\" for clang)."
exit 1
fi
if [[ ${TEST_RUN_ID} != 1 ]]; then
COVERAGE_DIFF=$(diff -u "$(get_covr_filename 1)" "${GCOVR_FILENAME}")
if [[ ${COVERAGE_DIFF} != "" ]]; then
echo
echo "The line coverage is non-deterministic between runs. Exiting."
echo
echo "The test suite must be deterministic in the sense that the set of lines executed at least"
echo "once must be identical between runs. This is a necessary condition for meaningful"
echo "coverage measuring."
echo
echo "${COVERAGE_DIFF}"
exit 1
fi
rm "${GCOVR_FILENAME}"
fi
done
echo
echo "Coverage test passed: Deterministic coverage across ${N_TEST_RUNS} runs."
exit
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/README.md | Contents
========
This directory contains tools for developers working on this repository.
clang-format-diff.py
===================
A script to format unified git diffs according to [.clang-format](../../src/.clang-format).
Requires `clang-format`, installed e.g. via `brew install clang-format` on macOS,
or `sudo apt install clang-format` on Debian/Ubuntu.
For instance, to format the last commit with 0 lines of context,
the script should be called from the git root folder as follows.
```
git diff -U0 HEAD~1.. | ./contrib/devtools/clang-format-diff.py -p1 -i -v
```
copyright\_header.py
====================
Provides utilities for managing copyright headers of `The Bitcoin Core
developers` in repository source files. It has three subcommands:
```
$ ./copyright_header.py report <base_directory> [verbose]
$ ./copyright_header.py update <base_directory>
$ ./copyright_header.py insert <file>
```
Running these subcommands without arguments displays a usage string.
copyright\_header.py report \<base\_directory\> [verbose]
---------------------------------------------------------
Produces a report of all copyright header notices found inside the source files
of a repository. Useful to quickly visualize the state of the headers.
Specifying `verbose` will list the full filenames of files of each category.
copyright\_header.py update \<base\_directory\> [verbose]
---------------------------------------------------------
Updates all the copyright headers of `The Bitcoin Core developers` which were
changed in a year more recent than is listed. For example:
```
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
```
will be updated to:
```
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
```
where `<lastModifiedYear>` is obtained from the `git log` history.
This subcommand also handles copyright headers that have only a single year. In
those cases:
```
// Copyright (c) <year> The Bitcoin Core developers
```
will be updated to:
```
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
```
where the update is appropriate.
copyright\_header.py insert \<file\>
------------------------------------
Inserts a copyright header for `The Bitcoin Core developers` at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has `#!` starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be `<year_introduced>-<current_year>` where
`<year_introduced>` is according to the `git log` history. If
`<year_introduced>` is equal to `<current_year>`, it will be set as a single
year rather than two hyphenated years.
If the file already has a copyright for `The Bitcoin Core developers`, the
script will exit.
gen-manpages.py
===============
A small script to automatically create manpages in ../../doc/man by running the release binaries with the -help option.
This requires help2man which can be found at: https://www.gnu.org/software/help2man/
With in-tree builds this tool can be run from any directory within the
repository. To use this tool with out-of-tree builds set `BUILDDIR`. For
example:
```bash
BUILDDIR=$PWD/build contrib/devtools/gen-manpages.py
```
headerssync-params.py
=====================
A script to generate optimal parameters for the headerssync module (src/headerssync.cpp). It takes no command-line
options, as all its configuration is set at the top of the file. It runs many times faster inside PyPy. Invocation:
```bash
pypy3 contrib/devtools/headerssync-params.py
```
gen-bitcoin-conf.sh
===================
Generates a bitcoin.conf file in `share/examples/` by parsing the output from `bitcoind --help`. This script is run during the
release process to include a bitcoin.conf with the release binaries and can also be run by users to generate a file locally.
When generating a file as part of the release process, make sure to commit the changes after running the script.
With in-tree builds this tool can be run from any directory within the
repository. To use this tool with out-of-tree builds set `BUILDDIR`. For
example:
```bash
BUILDDIR=$PWD/build contrib/devtools/gen-bitcoin-conf.sh
```
security-check.py and test-security-check.py
============================================
Perform basic security checks on a series of executables.
symbol-check.py
===============
A script to check that release executables only contain
certain symbols and are only linked against allowed libraries.
For Linux this means checking for allowed gcc, glibc and libstdc++ version symbols.
This makes sure they are still compatible with the minimum supported distribution versions.
For macOS and Windows we check that the executables are only linked against libraries we allow.
Example usage:
find ../path/to/executables -type f -executable | xargs python3 contrib/devtools/symbol-check.py
If no errors occur the return value will be 0 and the output will be empty.
If there are any errors the return value will be 1 and output like this will be printed:
.../64/test_bitcoin: symbol memcpy from unsupported version GLIBC_2.14
.../64/test_bitcoin: symbol __fdelt_chk from unsupported version GLIBC_2.15
.../64/test_bitcoin: symbol std::out_of_range::~out_of_range() from unsupported version GLIBCXX_3.4.15
.../64/test_bitcoin: symbol _ZNSt8__detail15_List_nod from unsupported version GLIBCXX_3.4.15
circular-dependencies.py
========================
Run this script from the root of the source tree (`src/`) to find circular dependencies in the source code.
This looks only at which files include other files, treating the `.cpp` and `.h` file as one unit.
Example usage:
cd .../src
../contrib/devtools/circular-dependencies.py {*,*/*,*/*/*}.{h,cpp}
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/utils.py | #!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Common utility functions
'''
import shutil
import sys
import os
def determine_wellknown_cmd(envvar, progname) -> list[str]:
maybe_env = os.getenv(envvar)
maybe_which = shutil.which(progname)
if maybe_env:
return maybe_env.split(' ') # Well-known vars are often meant to be word-split
elif maybe_which:
return [ maybe_which ]
else:
sys.exit(f"{progname} not found")
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/gen-manpages.py | #!/usr/bin/env python3
# Copyright (c) 2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import subprocess
import sys
import tempfile
BINARIES = [
'src/bitcoind',
'src/bitcoin-cli',
'src/bitcoin-tx',
'src/bitcoin-wallet',
'src/bitcoin-util',
'src/qt/bitcoin-qt',
]
# Paths to external utilities.
git = os.getenv('GIT', 'git')
help2man = os.getenv('HELP2MAN', 'help2man')
# If not otherwise specified, get top directory from git.
topdir = os.getenv('TOPDIR')
if not topdir:
r = subprocess.run([git, 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE, check=True, text=True)
topdir = r.stdout.rstrip()
# Get input and output directories.
builddir = os.getenv('BUILDDIR', topdir)
mandir = os.getenv('MANDIR', os.path.join(topdir, 'doc/man'))
# Verify that all the required binaries are usable, and extract copyright
# message in a first pass.
versions = []
for relpath in BINARIES:
abspath = os.path.join(builddir, relpath)
try:
r = subprocess.run([abspath, "--version"], stdout=subprocess.PIPE, check=True, text=True)
except IOError:
print(f'{abspath} not found or not an executable', file=sys.stderr)
sys.exit(1)
# take first line (which must contain version)
verstr = r.stdout.splitlines()[0]
# last word of line is the actual version e.g. v22.99.0-5c6b3d5b3508
verstr = verstr.split()[-1]
assert verstr.startswith('v')
# remaining lines are copyright
copyright = r.stdout.split('\n')[1:]
assert copyright[0].startswith('Copyright (C)')
versions.append((abspath, verstr, copyright))
if any(verstr.endswith('-dirty') for (_, verstr, _) in versions):
print("WARNING: Binaries were built from a dirty tree.")
print('man pages generated from dirty binaries should NOT be committed.')
print('To properly generate man pages, please commit your changes (or discard them), rebuild, then run this script again.')
print()
with tempfile.NamedTemporaryFile('w', suffix='.h2m') as footer:
# Create copyright footer, and write it to a temporary include file.
# Copyright is the same for all binaries, so just use the first.
footer.write('[COPYRIGHT]\n')
footer.write('\n'.join(versions[0][2]).strip())
footer.flush()
# Call the binaries through help2man to produce a manual page for each of them.
for (abspath, verstr, _) in versions:
outname = os.path.join(mandir, os.path.basename(abspath) + '.1')
print(f'Generating {outname}…')
subprocess.run([help2man, '-N', '--version-string=' + verstr, '--include=' + footer.name, '-o', outname, abspath], check=True)
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/headerssync-params.py | #!/usr/bin/env python3
# Copyright (c) 2022 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Script to find the optimal parameters for the headerssync module through simulation."""
from math import log, exp, sqrt
from datetime import datetime, timedelta
import random
# Parameters:
# Aim for still working fine at some point in the future. [datetime]
TIME = datetime(2026, 10, 5)
# Expected block interval. [timedelta]
BLOCK_INTERVAL = timedelta(seconds=600)
# The number of headers corresponding to the minchainwork parameter. [headers]
MINCHAINWORK_HEADERS = 804000
# Combined processing bandwidth from all attackers to one victim. [bit/s]
# 6 Gbit/s is approximately the speed at which a single thread of a Ryzen 5950X CPU thread can hash
# headers. In practice, the victim's network bandwidth and network processing overheads probably
# impose a far lower number, but it's a useful upper bound.
ATTACK_BANDWIDTH = 6000000000
# How much additional permanent memory usage are attackers (jointly) allowed to cause in the victim,
# expressed as fraction of the normal memory usage due to mainchain growth, for the duration the
# attack is sustained. [unitless]
# 0.2 means that attackers, while they keep up the attack, can cause permanent memory usage due to
# headers storage to grow at 1.2 header per BLOCK_INTERVAL.
ATTACK_FRACTION = 0.2
# When this is set, the mapping from period size to memory usage (at optimal buffer size for that
# period) is assumed to be convex. This greatly speeds up the computation, and does not appear
# to influence the outcome. Set to False for a stronger guarantee to get the optimal result.
ASSUME_CONVEX = True
# Explanation:
#
# The headerssync module implements a DoS protection against low-difficulty header spam which does
# not rely on checkpoints. In short it works as follows:
#
# - (initial) header synchronization is split into two phases:
# - A commitment phase, in which headers are downloaded from the peer, and a very compact
# commitment to them is remembered in per-peer memory. The commitment phase ends when the
# received chain's combined work reaches a predetermined threshold.
# - A redownload phase, during which the headers are downloaded a second time from the same peer,
# and compared against the commitment constructed in the first phase. If there is a match, the
# redownloaded headers are fed to validation and accepted into permanent storage.
#
# This separation guarantees that no headers are accepted into permanent storage without
# requiring the peer to first prove the chain actually has sufficient work.
#
# - To actually implement this commitment mechanism, the following approach is used:
# - Keep a *1 bit* commitment (constructed using a salted hash function), for every block whose
# height is a multiple of {period} plus an offset value. If RANDOMIZE_OFFSET, the offset,
# like the salt, is chosen randomly when the synchronization starts and kept fixed afterwards.
# - When redownloading, headers are fed through a per-peer queue that holds {bufsize} headers,
# before passing them to validation. All the headers in this queue are verified against the
# commitment bits created in the first phase before any header is released from it. This means
# {bufsize/period} bits are checked "on top of" each header before actually processing it,
# which results in a commitment structure with roughly {bufsize/period} bits of security, as
# once a header is modified, due to the prevhash inclusion, all future headers necessarily
# change as well.
#
# The question is what these {period} and {bufsize} parameters need to be set to. This program
# exhaustively tests a range of values to find the optimal choice, taking into account:
#
# - Minimizing the (maximum of) two scenarios that trigger per-peer memory usage:
#
# - When downloading a (likely honest) chain that reaches the chainwork threshold after {n}
# blocks, and then redownloads them, we will consume per-peer memory that is sufficient to
# store {n/period} commitment bits and {bufsize} headers. We only consider attackers without
# sufficient hashpower (as otherwise they are from a PoW perspective not attackers), which
# means {n} is restricted to the honest chain's length before reaching minchainwork.
#
# - When downloading a (likely false) chain of {n} headers that never reaches the chainwork
# threshold, we will consume per-peer memory that is sufficient to store {n/period}
# commitment bits. Such a chain may be very long, by exploiting the timewarp bug to avoid
# ramping up difficulty. There is however an absolute limit on how long such a chain can be: 6
# blocks per second since genesis, due to the increasing MTP consensus rule.
#
# - Not gratuitously preventing synchronizing any valid chain, however difficult such a chain may
# be to construct. In particular, the above scenario with an enormous timewarp-expoiting chain
# cannot simply be ignored, as it is legal that the honest main chain is like that. We however
# do not bother minimizing the memory usage in that case (because a billion-header long honest
# chain will inevitably use far larger amounts of memory than designed for).
#
# - Keep the rate at which attackers can get low-difficulty headers accepted to the block index
# negligible. Specifically, the possibility exists for an attacker to send the honest main
# chain's headers during the commitment phase, but then start deviating at an attacker-chosen
# point by sending novel low-difficulty headers instead. Depending on how high we set the
# {bufsize/period} ratio, we can make the probability that such a header makes it in
# arbitrarily small, but at the cost of higher memory during the redownload phase. It turns out,
# some rate of memory usage growth is expected anyway due to chain growth, so permitting the
# attacker to increase that rate by a small factor isn't concerning. The attacker may start
# somewhat later than genesis, as long as the difficulty doesn't get too high. This reduces
# the attacker bandwidth required at the cost of higher PoW needed for constructing the
# alternate chain. This trade-off is ignored here, as it results in at most a small constant
# factor in attack rate.
# System properties:
# Headers in the redownload buffer are stored without prevhash. [bits]
COMPACT_HEADER_SIZE = 48 * 8
# How many bits a header uses in P2P protocol. [bits]
NET_HEADER_SIZE = 81 * 8
# How many headers are sent at once. [headers]
HEADER_BATCH_COUNT = 2000
# Whether or not the offset of which blocks heights get checksummed is randomized.
RANDOMIZE_OFFSET = True
# Timestamp of the genesis block
GENESIS_TIME = datetime(2009, 1, 3)
# Derived values:
# What rate of headers worth of RAM attackers are allowed to cause in the victim. [headers/s]
LIMIT_HEADERRATE = ATTACK_FRACTION / BLOCK_INTERVAL.total_seconds()
# How many headers can attackers (jointly) send a victim per second. [headers/s]
NET_HEADERRATE = ATTACK_BANDWIDTH / NET_HEADER_SIZE
# What fraction of headers sent by attackers can at most be accepted by a victim [unitless]
LIMIT_FRACTION = LIMIT_HEADERRATE / NET_HEADERRATE
# How many headers we permit attackers to cause being accepted per attack. [headers/attack]
ATTACK_HEADERS = LIMIT_FRACTION * MINCHAINWORK_HEADERS
def find_max_headers(when):
"""Compute the maximum number of headers a valid Bitcoin chain can have at given time."""
# When exploiting the timewarp attack, this can be up to 6 per second since genesis.
return 6 * ((when - GENESIS_TIME) // timedelta(seconds=1))
def lambert_w(value):
"""Solve the equation x*exp(x)=value (x > 0, value > 0)."""
# Initial approximation.
approx = max(log(value), 0.0)
for _ in range(10):
# Newton-Rhapson iteration steps.
approx += (value * exp(-approx) - approx) / (approx + 1.0)
return approx
def attack_rate(period, bufsize, limit=None):
"""Compute maximal accepted headers per attack in (period, bufsize) configuration.
If limit is provided, the computation is stopped early when the result is known to exceed the
value in limit.
"""
max_rate = None
max_honest = None
# Let the current batch 0 being received be the first one in which the attacker starts lying.
# They will only ever start doing so right after a commitment block, but where that is can be
# in a number of places. Let honest be the number of honest headers in this current batch,
# preceding the forged ones.
for honest in range(HEADER_BATCH_COUNT):
# The number of headers the attack under consideration will on average get accepted.
# This is the number being computed.
rate = 0
# Iterate over the possible alignments of commitments w.r.t. the first batch. In case
# the alignments are randomized, try all values. If not, the attacker can know/choose
# the alignment, and will always start forging right after a commitment.
if RANDOMIZE_OFFSET:
align_choices = list(range(period))
else:
align_choices = [(honest - 1) % period]
# Now loop over those possible alignment values, computing the average attack rate
# over them by dividing each contribution by len(align_choices).
for align in align_choices:
# These state variables capture the situation after receiving the first batch.
# - The number of headers received after the last commitment for an honest block:
after_good_commit = HEADER_BATCH_COUNT - honest + ((honest - align - 1) % period)
# - The number of forged headers in the redownload buffer:
forged_in_buf = HEADER_BATCH_COUNT - honest
# Now iterate over the next batches of headers received, adding contributions to the
# rate variable.
while True:
# Process the first HEADER_BATCH_COUNT headers in the buffer:
accept_forged_headers = max(forged_in_buf - bufsize, 0)
forged_in_buf -= accept_forged_headers
if accept_forged_headers:
# The probability the attack has not been detected yet at this point:
prob = 0.5 ** (after_good_commit // period)
# Update attack rate, divided by align_choices to average over the alignments.
rate += accept_forged_headers * prob / len(align_choices)
# If this means we exceed limit, bail out early (performance optimization).
if limit is not None and rate >= limit:
return rate, None
# If the maximal term being added is negligible compared to rate, stop
# iterating.
if HEADER_BATCH_COUNT * prob < 1.0e-16 * rate * len(align_choices):
break
# Update state from a new incoming batch (which is all forged)
after_good_commit += HEADER_BATCH_COUNT
forged_in_buf += HEADER_BATCH_COUNT
if max_rate is None or rate > max_rate:
max_rate = rate
max_honest = honest
return max_rate, max_honest
def memory_usage(period, bufsize, when):
"""How much memory (max,mainchain,timewarp) does the (period,bufsize) configuration need?"""
# Per-peer memory usage for a timewarp chain that never meets minchainwork
mem_timewarp = find_max_headers(when) // period
# Per-peer memory usage for being fed the main chain
mem_mainchain = (MINCHAINWORK_HEADERS // period) + bufsize * COMPACT_HEADER_SIZE
# Maximum per-peer memory usage
max_mem = max(mem_timewarp, mem_mainchain)
return max_mem, mem_mainchain, mem_timewarp
def find_bufsize(period, attack_headers, when, max_mem=None, min_bufsize=1):
"""Determine how big bufsize needs to be given a specific period length.
Given a period, find the smallest value of bufsize such that the attack rate against the
(period, bufsize) configuration is below attack_headers. If max_mem is provided, and no
such bufsize exists that needs less than max_mem bits of memory, None is returned.
min_bufsize is the minimal result to be considered."""
if max_mem is None:
succ_buf = min_bufsize - 1
fail_buf = min_bufsize
# First double iteratively until an upper bound for failure is found.
while True:
if attack_rate(period, fail_buf, attack_headers)[0] < attack_headers:
break
succ_buf, fail_buf = fail_buf, 3 * fail_buf - 2 * succ_buf
else:
# If a long low-work header chain exists that exceeds max_mem already, give up.
if find_max_headers(when) // period > max_mem:
return None
# Otherwise, verify that the maximal buffer size that permits a mainchain sync with less
# than max_mem memory is sufficient to get the attack rate below attack_headers. If not,
# also give up.
max_buf = (max_mem - (MINCHAINWORK_HEADERS // period)) // COMPACT_HEADER_SIZE
if max_buf < min_bufsize:
return None
if attack_rate(period, max_buf, attack_headers)[0] >= attack_headers:
return None
# If it is sufficient, that's an upper bound to start our search.
succ_buf = min_bufsize - 1
fail_buf = max_buf
# Then perform a bisection search to narrow it down.
while fail_buf > succ_buf + 1:
try_buf = (succ_buf + fail_buf) // 2
if attack_rate(period, try_buf, attack_headers)[0] >= attack_headers:
succ_buf = try_buf
else:
fail_buf = try_buf
return fail_buf
def optimize(when):
"""Find the best (period, bufsize) configuration."""
# When period*bufsize = memory_scale, the per-peer memory for a mainchain sync and a maximally
# long low-difficulty header sync are equal.
memory_scale = (find_max_headers(when) - MINCHAINWORK_HEADERS) / COMPACT_HEADER_SIZE
# Compute approximation for {bufsize/period}, using a formula for a simplified problem.
approx_ratio = lambert_w(log(4) * memory_scale / ATTACK_HEADERS**2) / log(4)
# Use those for a first attempt.
print("Searching configurations:")
period = int(sqrt(memory_scale / approx_ratio) + 0.5)
bufsize = find_bufsize(period, ATTACK_HEADERS, when)
mem = memory_usage(period, bufsize, when)
best = (period, bufsize, mem)
maps = [(period, bufsize), (MINCHAINWORK_HEADERS + 1, None)]
print(f"- Initial: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB")
# Consider all period values between 1 and MINCHAINWORK_HEADERS, except the one just tried.
periods = [iv for iv in range(1, MINCHAINWORK_HEADERS + 1) if iv != period]
# Iterate, picking a random element from periods, computing its corresponding bufsize, and
# then using the result to shrink the period.
while True:
# Remove all periods whose memory usage for low-work long chain sync exceed the best
# memory usage we've found so far.
periods = [p for p in periods if find_max_headers(when) // p < best[2][0]]
# Stop if there is nothing left to try.
if len(periods) == 0:
break
# Pick a random remaining option for period size, and compute corresponding bufsize.
period = periods.pop(random.randrange(len(periods)))
# The buffer size (at a given attack level) cannot shrink as the period grows. Find the
# largest period smaller than the selected one we know the buffer size for, and use that
# as a lower bound to find_bufsize.
min_bufsize = max([(p, b) for p, b in maps if p < period] + [(0,0)])[1]
bufsize = find_bufsize(period, ATTACK_HEADERS, when, best[2][0], min_bufsize)
if bufsize is not None:
# We found a (period, bufsize) configuration with better memory usage than our best
# so far. Remember it for future lower bounds.
maps.append((period, bufsize))
mem = memory_usage(period, bufsize, when)
assert mem[0] <= best[2][0]
if ASSUME_CONVEX:
# Remove all periods that are on the other side of the former best as the new
# best.
periods = [p for p in periods if (p < best[0]) == (period < best[0])]
best = (period, bufsize, mem)
print(f"- New best: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB")
else:
# The (period, bufsize) configuration we found is worse than what we already had.
if ASSUME_CONVEX:
# Remove all periods that are on the other side of the tried configuration as the
# best one.
periods = [p for p in periods if (p < period) == (best[0] < period)]
# Return the result.
period, bufsize, _ = best
return period, bufsize
def analyze(when):
"""Find the best configuration and print it out."""
period, bufsize = optimize(when)
# Compute accurate statistics for the best found configuration.
_, mem_mainchain, mem_timewarp = memory_usage(period, bufsize, when)
headers_per_attack, _ = attack_rate(period, bufsize)
attack_volume = NET_HEADER_SIZE * MINCHAINWORK_HEADERS
# And report them.
print()
print("Optimal configuration:")
print()
print("//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks.")
print(f"constexpr size_t HEADER_COMMITMENT_PERIOD{{{period}}};")
print()
print("//! Only feed headers to validation once this many headers on top have been")
print("//! received and validated against commitments.")
print(f"constexpr size_t REDOWNLOAD_BUFFER_SIZE{{{bufsize}}};"
f" // {bufsize}/{period} = ~{bufsize/period:.1f} commitments")
print()
print("Properties:")
print(f"- Per-peer memory for mainchain sync: {mem_mainchain / 8192:.3f} KiB")
print(f"- Per-peer memory for timewarp attack: {mem_timewarp / 8192:.3f} KiB")
print(f"- Attack rate: {1/headers_per_attack:.1f} attacks for 1 header of memory growth")
print(f" (where each attack costs {attack_volume / 8388608:.3f} MiB bandwidth)")
analyze(TIME)
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/utxo_snapshot.sh | #!/usr/bin/env bash
#
# Copyright (c) 2019-2023 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
export LC_ALL=C
set -ueo pipefail
NETWORK_DISABLED=false
if (( $# < 3 )); then
echo 'Usage: utxo_snapshot.sh <generate-at-height> <snapshot-out-path> <bitcoin-cli-call ...>'
echo
echo " if <snapshot-out-path> is '-', don't produce a snapshot file but instead print the "
echo " expected assumeutxo hash"
echo
echo 'Examples:'
echo
echo " ./contrib/devtools/utxo_snapshot.sh 570000 utxo.dat ./src/bitcoin-cli -datadir=\$(pwd)/testdata"
echo ' ./contrib/devtools/utxo_snapshot.sh 570000 - ./src/bitcoin-cli'
exit 1
fi
GENERATE_AT_HEIGHT="${1}"; shift;
OUTPUT_PATH="${1}"; shift;
# Most of the calls we make take a while to run, so pad with a lengthy timeout.
BITCOIN_CLI_CALL="${*} -rpcclienttimeout=9999999"
# Check if the node is pruned and get the pruned block height
PRUNED=$( ${BITCOIN_CLI_CALL} getblockchaininfo | awk '/pruneheight/ {print $2}' | tr -d ',' )
if (( GENERATE_AT_HEIGHT < PRUNED )); then
echo "Error: The requested snapshot height (${GENERATE_AT_HEIGHT}) should be greater than the pruned block height (${PRUNED})."
exit 1
fi
# Early exit if file at OUTPUT_PATH already exists
if [[ -e "$OUTPUT_PATH" ]]; then
(>&2 echo "Error: $OUTPUT_PATH already exists or is not a valid path.")
exit 1
fi
# Validate that the path is correct
if [[ "${OUTPUT_PATH}" != "-" && ! -d "$(dirname "${OUTPUT_PATH}")" ]]; then
(>&2 echo "Error: The directory $(dirname "${OUTPUT_PATH}") does not exist.")
exit 1
fi
function cleanup {
(>&2 echo "Restoring chain to original height; this may take a while")
${BITCOIN_CLI_CALL} reconsiderblock "${PIVOT_BLOCKHASH}"
if $NETWORK_DISABLED; then
(>&2 echo "Restoring network activity")
${BITCOIN_CLI_CALL} setnetworkactive true
fi
}
function early_exit {
(>&2 echo "Exiting due to Ctrl-C")
cleanup
exit 1
}
# Prompt the user to disable network activity
read -p "Do you want to disable network activity (setnetworkactive false) before running invalidateblock? (Y/n): " -r
if [[ "$REPLY" =~ ^[Yy]*$ || -z "$REPLY" ]]; then
# User input is "Y", "y", or Enter key, proceed with the action
NETWORK_DISABLED=true
(>&2 echo "Disabling network activity")
${BITCOIN_CLI_CALL} setnetworkactive false
else
(>&2 echo "Network activity remains enabled")
fi
# Block we'll invalidate/reconsider to rewind/fast-forward the chain.
PIVOT_BLOCKHASH=$($BITCOIN_CLI_CALL getblockhash $(( GENERATE_AT_HEIGHT + 1 )) )
# Trap for normal exit and Ctrl-C
trap cleanup EXIT
trap early_exit INT
(>&2 echo "Rewinding chain back to height ${GENERATE_AT_HEIGHT} (by invalidating ${PIVOT_BLOCKHASH}); this may take a while")
${BITCOIN_CLI_CALL} invalidateblock "${PIVOT_BLOCKHASH}"
if [[ "${OUTPUT_PATH}" = "-" ]]; then
(>&2 echo "Generating txoutset info...")
${BITCOIN_CLI_CALL} gettxoutsetinfo | grep hash_serialized_3 | sed 's/^.*: "\(.\+\)\+",/\1/g'
else
(>&2 echo "Generating UTXO snapshot...")
${BITCOIN_CLI_CALL} dumptxoutset "${OUTPUT_PATH}"
fi
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/security-check.py | #!/usr/bin/env python3
# Copyright (c) 2015-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
'''
import sys
import lief
def check_ELF_RELRO(binary) -> bool:
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for segment in binary.segments:
# Note: not checking p_flags == PF_R: here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program
# header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also https://marc.info/?l=binutils&m=1498883354122353
if segment.type == lief.ELF.SEGMENT_TYPES.GNU_RELRO:
have_gnu_relro = True
have_bindnow = False
try:
flags = binary.get(lief.ELF.DYNAMIC_TAGS.FLAGS)
if flags.value & lief.ELF.DYNAMIC_FLAGS.BIND_NOW:
have_bindnow = True
except Exception:
have_bindnow = False
return have_gnu_relro and have_bindnow
def check_ELF_Canary(binary) -> bool:
'''
Check for use of stack canary
'''
return binary.has_symbol('__stack_chk_fail')
def check_ELF_separate_code(binary):
'''
Check that sections are appropriately separated in virtual memory,
based on their permissions. This checks for missing -Wl,-z,separate-code
and potentially other problems.
'''
R = lief.ELF.SEGMENT_FLAGS.R
W = lief.ELF.SEGMENT_FLAGS.W
E = lief.ELF.SEGMENT_FLAGS.X
EXPECTED_FLAGS = {
# Read + execute
'.init': R | E,
'.plt': R | E,
'.plt.got': R | E,
'.plt.sec': R | E,
'.text': R | E,
'.fini': R | E,
# Read-only data
'.interp': R,
'.note.gnu.property': R,
'.note.gnu.build-id': R,
'.note.ABI-tag': R,
'.gnu.hash': R,
'.dynsym': R,
'.dynstr': R,
'.gnu.version': R,
'.gnu.version_r': R,
'.rela.dyn': R,
'.rela.plt': R,
'.rodata': R,
'.eh_frame_hdr': R,
'.eh_frame': R,
'.qtmetadata': R,
'.gcc_except_table': R,
'.stapsdt.base': R,
# Writable data
'.init_array': R | W,
'.fini_array': R | W,
'.dynamic': R | W,
'.got': R | W,
'.data': R | W,
'.bss': R | W,
}
if binary.header.machine_type == lief.ELF.ARCH.PPC64:
# .plt is RW on ppc64 even with separate-code
EXPECTED_FLAGS['.plt'] = R | W
# For all LOAD program headers get mapping to the list of sections,
# and for each section, remember the flags of the associated program header.
flags_per_section = {}
for segment in binary.segments:
if segment.type == lief.ELF.SEGMENT_TYPES.LOAD:
for section in segment.sections:
flags_per_section[section.name] = segment.flags
# Spot-check ELF LOAD program header flags per section
# If these sections exist, check them against the expected R/W/E flags
for (section, flags) in flags_per_section.items():
if section in EXPECTED_FLAGS:
if int(EXPECTED_FLAGS[section]) != int(flags):
return False
return True
def check_ELF_control_flow(binary) -> bool:
'''
Check for control flow instrumentation
'''
main = binary.get_function_address('main')
content = binary.get_content_from_virtual_address(main, 4, lief.Binary.VA_TYPES.AUTO)
if content.tolist() == [243, 15, 30, 250]: # endbr64
return True
return False
def check_PE_DYNAMIC_BASE(binary) -> bool:
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists
# Must support high-entropy 64-bit address space layout randomization
# in addition to DYNAMIC_BASE to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(binary) -> bool:
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
return lief.PE.DLL_CHARACTERISTICS.HIGH_ENTROPY_VA in binary.optional_header.dll_characteristics_lists
def check_PE_RELOC_SECTION(binary) -> bool:
'''Check for a reloc section. This is required for functional ASLR.'''
return binary.has_relocations
def check_PE_control_flow(binary) -> bool:
'''
Check for control flow instrumentation
'''
main = binary.get_symbol('main').value
section_addr = binary.section_from_rva(main).virtual_address
virtual_address = binary.optional_header.imagebase + section_addr + main
content = binary.get_content_from_virtual_address(virtual_address, 4, lief.Binary.VA_TYPES.VA)
if content.tolist() == [243, 15, 30, 250]: # endbr64
return True
return False
def check_PE_Canary(binary) -> bool:
'''
Check for use of stack canary
'''
return binary.has_symbol('__stack_chk_fail')
def check_MACHO_NOUNDEFS(binary) -> bool:
'''
Check for no undefined references.
'''
return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS)
def check_MACHO_FIXUP_CHAINS(binary) -> bool:
'''
Check for use of chained fixups.
'''
return binary.has_dyld_chained_fixups
def check_MACHO_Canary(binary) -> bool:
'''
Check for use of stack canary
'''
return binary.has_symbol('___stack_chk_fail')
def check_PIE(binary) -> bool:
'''
Check for position independent executable (PIE),
allowing for address space randomization.
'''
return binary.is_pie
def check_NX(binary) -> bool:
'''
Check for no stack execution
'''
return binary.has_nx
def check_MACHO_control_flow(binary) -> bool:
'''
Check for control flow instrumentation
'''
content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO)
if content.tolist() == [243, 15, 30, 250]: # endbr64
return True
return False
BASE_ELF = [
('PIE', check_PIE),
('NX', check_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary),
('separate_code', check_ELF_separate_code),
]
BASE_PE = [
('PIE', check_PIE),
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_NX),
('RELOC_SECTION', check_PE_RELOC_SECTION),
('CONTROL_FLOW', check_PE_control_flow),
('Canary', check_PE_Canary),
]
BASE_MACHO = [
('NOUNDEFS', check_MACHO_NOUNDEFS),
('Canary', check_MACHO_Canary),
('FIXUP_CHAINS', check_MACHO_FIXUP_CHAINS),
]
CHECKS = {
lief.EXE_FORMATS.ELF: {
lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_control_flow)],
lief.ARCHITECTURES.ARM: BASE_ELF,
lief.ARCHITECTURES.ARM64: BASE_ELF,
lief.ARCHITECTURES.PPC: BASE_ELF,
lief.ARCHITECTURES.RISCV: BASE_ELF,
},
lief.EXE_FORMATS.PE: {
lief.ARCHITECTURES.X86: BASE_PE,
},
lief.EXE_FORMATS.MACHO: {
lief.ARCHITECTURES.X86: BASE_MACHO + [('PIE', check_PIE),
('NX', check_NX),
('CONTROL_FLOW', check_MACHO_control_flow)],
lief.ARCHITECTURES.ARM64: BASE_MACHO,
}
}
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
binary = lief.parse(filename)
etype = binary.format
arch = binary.abstract.header.architecture
binary.concrete
if etype == lief.EXE_FORMATS.UNKNOWN:
print(f'{filename}: unknown executable format')
retval = 1
continue
if arch == lief.ARCHITECTURES.NONE:
print(f'{filename}: unknown architecture')
retval = 1
continue
failed: list[str] = []
for (name, func) in CHECKS[etype][arch]:
if not func(binary):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
retval = 1
except IOError:
print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
| 0 |
bitcoin/contrib | bitcoin/contrib/devtools/test-security-check.py | #!/usr/bin/env python3
# Copyright (c) 2015-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
import lief
import os
import subprocess
import unittest
from utils import determine_wellknown_cmd
def write_testcode(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def clean_files(source, executable):
os.remove(source)
os.remove(executable)
def call_security_check(cc: str, source: str, executable: str, options) -> tuple:
# This should behave the same as AC_TRY_LINK, so arrange well-known flags
# in the same order as autoconf would.
#
# See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for
# reference.
env_flags: list[str] = []
for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']:
env_flags += filter(None, os.environ.get(var, '').split(' '))
subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True)
p = subprocess.run([os.path.join(os.path.dirname(__file__), 'security-check.py'), executable], stdout=subprocess.PIPE, text=True)
return (p.returncode, p.stdout.rstrip())
def get_arch(cc, source, executable):
subprocess.run([*cc, source, '-o', executable], check=True)
binary = lief.parse(executable)
arch = binary.abstract.header.architecture
os.remove(executable)
return arch
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
write_testcode(source)
arch = get_arch(cc, source, executable)
if arch == lief.ARCHITECTURES.X86:
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE NX RELRO Canary CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO Canary CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
(1, executable+': failed separate_code CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']),
(0, ''))
else:
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
(1, executable+': failed separate_code'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(0, ''))
clean_files(source, executable)
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc')
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--disable-nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fno-stack-protector']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION CONTROL_FLOW Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fstack-protector-all', '-lssp']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-no-pie','-fno-PIE','-fstack-protector-all', '-lssp']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--disable-dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA CONTROL_FLOW')) # -pie -fPIE does nothing unless --dynamicbase is also supplied
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--disable-high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']),
(1, executable+': failed HIGH_ENTROPY_VA CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE','-fstack-protector-all', '-lssp']),
(1, executable+': failed CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE', '-fcf-protection=full','-fstack-protector-all', '-lssp']),
(0, ''))
clean_files(source, executable)
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'clang')
write_testcode(source)
arch = get_arch(cc, source, executable)
if arch == lief.ARCHITECTURES.X86:
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector', '-Wl,-no_fixup_chains']),
(1, executable+': failed NOUNDEFS Canary FIXUP_CHAINS PIE NX CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector', '-Wl,-fixup_chains']),
(1, executable+': failed NOUNDEFS Canary PIE NX CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all', '-Wl,-fixup_chains']),
(1, executable+': failed NOUNDEFS PIE NX CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains']),
(1, executable+': failed NOUNDEFS PIE CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all', '-Wl,-fixup_chains']),
(1, executable+': failed PIE CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all', '-Wl,-fixup_chains']),
(1, executable+': failed PIE CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all', '-fcf-protection=full', '-Wl,-fixup_chains']),
(1, executable+': failed PIE'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-fstack-protector-all', '-fcf-protection=full', '-Wl,-fixup_chains']),
(0, ''))
else:
# arm64 darwin doesn't support non-PIE binaries, control flow or executable stacks
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-no_fixup_chains']),
(1, executable+': failed NOUNDEFS Canary FIXUP_CHAINS'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector', '-Wl,-fixup_chains']),
(1, executable+': failed NOUNDEFS Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all', '-Wl,-fixup_chains']),
(1, executable+': failed NOUNDEFS'))
self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all', '-Wl,-fixup_chains']),
(0, ''))
clean_files(source, executable)
if __name__ == '__main__':
unittest.main()
| 0 |
bitcoin/contrib/devtools | bitcoin/contrib/devtools/iwyu/bitcoin.core.imp | # Fixups / upstreamed changes
[
{ include: [ "<bits/termios-c_lflag.h>", private, "<termios.h>", public ] },
{ include: [ "<bits/termios-struct.h>", private, "<termios.h>", public ] },
{ include: [ "<bits/termios-tcflow.h>", private, "<termios.h>", public ] },
{ include: [ "<bits/chrono.h>", private, "<chrono>", public ] },
]
| 0 |
bitcoin/contrib/devtools | bitcoin/contrib/devtools/bitcoin-tidy/CMakeLists.txt | cmake_minimum_required(VERSION 3.9)
project(bitcoin-tidy VERSION 1.0.0 DESCRIPTION "clang-tidy checks for Bitcoin Core")
include(GNUInstallDirs)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED True)
set(CMAKE_CXX_EXTENSIONS False)
# TODO: Figure out how to avoid the terminfo check
find_package(LLVM REQUIRED CONFIG)
find_program(CLANG_TIDY_EXE NAMES "clang-tidy-${LLVM_VERSION_MAJOR}" "clang-tidy" HINTS ${LLVM_TOOLS_BINARY_DIR})
message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}")
message(STATUS "Found clang-tidy: ${CLANG_TIDY_EXE}")
add_library(bitcoin-tidy MODULE bitcoin-tidy.cpp logprintf.cpp)
target_include_directories(bitcoin-tidy SYSTEM PRIVATE ${LLVM_INCLUDE_DIRS})
# Disable RTTI and exceptions as necessary
if (MSVC)
target_compile_options(bitcoin-tidy PRIVATE /GR-)
else()
target_compile_options(bitcoin-tidy PRIVATE -fno-rtti)
target_compile_options(bitcoin-tidy PRIVATE -fno-exceptions)
endif()
if(CMAKE_HOST_APPLE)
# ld64 expects no undefined symbols by default
target_link_options(bitcoin-tidy PRIVATE -Wl,-flat_namespace)
target_link_options(bitcoin-tidy PRIVATE -Wl,-undefined -Wl,suppress)
endif()
# Add warnings
if (MSVC)
target_compile_options(bitcoin-tidy PRIVATE /W4)
else()
target_compile_options(bitcoin-tidy PRIVATE -Wall)
target_compile_options(bitcoin-tidy PRIVATE -Wextra)
endif()
if(CMAKE_VERSION VERSION_LESS 3.27)
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "--load=${CMAKE_BINARY_DIR}/${CMAKE_SHARED_MODULE_PREFIX}bitcoin-tidy${CMAKE_SHARED_MODULE_SUFFIX}" "-checks=-*,bitcoin-*")
else()
# CLANG_TIDY_COMMAND supports generator expressions as of 3.27
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "--load=$<TARGET_FILE:bitcoin-tidy>" "-checks=-*,bitcoin-*")
endif()
# Create a dummy library that runs clang-tidy tests as a side-effect of building
add_library(bitcoin-tidy-tests OBJECT EXCLUDE_FROM_ALL example_logprintf.cpp)
add_dependencies(bitcoin-tidy-tests bitcoin-tidy)
set_target_properties(bitcoin-tidy-tests PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}")
install(TARGETS bitcoin-tidy LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
| 0 |
bitcoin/contrib/devtools | bitcoin/contrib/devtools/bitcoin-tidy/bitcoin-tidy.cpp | // Copyright (c) 2023 Bitcoin Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "logprintf.h"
#include <clang-tidy/ClangTidyModule.h>
#include <clang-tidy/ClangTidyModuleRegistry.h>
class BitcoinModule final : public clang::tidy::ClangTidyModule
{
public:
void addCheckFactories(clang::tidy::ClangTidyCheckFactories& CheckFactories) override
{
CheckFactories.registerCheck<bitcoin::LogPrintfCheck>("bitcoin-unterminated-logprintf");
}
};
static clang::tidy::ClangTidyModuleRegistry::Add<BitcoinModule>
X("bitcoin-module", "Adds bitcoin checks.");
volatile int BitcoinModuleAnchorSource = 0;
| 0 |
bitcoin/contrib/devtools | bitcoin/contrib/devtools/bitcoin-tidy/example_logprintf.cpp | // Copyright (c) 2023 Bitcoin Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <string>
// Test for bitcoin-unterminated-logprintf
enum LogFlags {
NONE
};
enum Level {
None
};
template <typename... Args>
static inline void LogPrintf_(const std::string& logging_function, const std::string& source_file, const int source_line, const LogFlags flag, const Level level, const char* fmt, const Args&... args)
{
}
#define LogPrintLevel_(category, level, ...) LogPrintf_(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
#define LogPrintf(...) LogPrintLevel_(LogFlags::NONE, Level::None, __VA_ARGS__)
#define LogPrint(category, ...) \
do { \
LogPrintf(__VA_ARGS__); \
} while (0)
class CWallet
{
std::string GetDisplayName() const
{
return "default wallet";
}
public:
template <typename... Params>
void WalletLogPrintf(const char* fmt, Params... parameters) const
{
LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...);
};
};
struct ScriptPubKeyMan
{
std::string GetDisplayName() const
{
return "default wallet";
}
template <typename... Params>
void WalletLogPrintf(const char* fmt, Params... parameters) const
{
LogPrintf(("%s " + std::string{fmt}).c_str(), GetDisplayName(), parameters...);
};
};
void good_func()
{
LogPrintf("hello world!\n");
}
void good_func2()
{
CWallet wallet;
wallet.WalletLogPrintf("hi\n");
ScriptPubKeyMan spkm;
spkm.WalletLogPrintf("hi\n");
const CWallet& walletref = wallet;
walletref.WalletLogPrintf("hi\n");
auto* walletptr = new CWallet();
walletptr->WalletLogPrintf("hi\n");
delete walletptr;
}
void bad_func()
{
LogPrintf("hello world!");
}
void bad_func2()
{
LogPrintf("");
}
void bad_func3()
{
// Ending in "..." has no special meaning.
LogPrintf("hello world!...");
}
void bad_func4_ignored()
{
LogPrintf("hello world!"); // NOLINT(bitcoin-unterminated-logprintf)
}
void bad_func5()
{
CWallet wallet;
wallet.WalletLogPrintf("hi");
ScriptPubKeyMan spkm;
spkm.WalletLogPrintf("hi");
const CWallet& walletref = wallet;
walletref.WalletLogPrintf("hi");
auto* walletptr = new CWallet();
walletptr->WalletLogPrintf("hi");
delete walletptr;
}
| 0 |
bitcoin/contrib/devtools | bitcoin/contrib/devtools/bitcoin-tidy/README | # Bitcoin Tidy
Example Usage:
```bash
cmake -S . -B build -DLLVM_DIR=$(llvm-config --cmakedir) -DCMAKE_BUILD_TYPE=Release
cmake --build build -j$(nproc)
cmake --build build --target bitcoin-tidy-tests -j$(nproc)
```
| 0 |
bitcoin/contrib/devtools | bitcoin/contrib/devtools/bitcoin-tidy/logprintf.h | // Copyright (c) 2023 Bitcoin Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef LOGPRINTF_CHECK_H
#define LOGPRINTF_CHECK_H
#include <clang-tidy/ClangTidyCheck.h>
namespace bitcoin {
// Warn about any use of LogPrintf that does not end with a newline.
class LogPrintfCheck final : public clang::tidy::ClangTidyCheck
{
public:
LogPrintfCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context)
: clang::tidy::ClangTidyCheck(Name, Context) {}
bool isLanguageVersionSupported(const clang::LangOptions& LangOpts) const override
{
return LangOpts.CPlusPlus;
}
void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override;
void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override;
};
} // namespace bitcoin
#endif // LOGPRINTF_CHECK_H
| 0 |
bitcoin/contrib/devtools | bitcoin/contrib/devtools/bitcoin-tidy/logprintf.cpp | // Copyright (c) 2023 Bitcoin Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "logprintf.h"
#include <clang/AST/ASTContext.h>
#include <clang/ASTMatchers/ASTMatchFinder.h>
namespace {
AST_MATCHER(clang::StringLiteral, unterminated)
{
size_t len = Node.getLength();
if (len > 0 && Node.getCodeUnit(len - 1) == '\n') {
return false;
}
return true;
}
} // namespace
namespace bitcoin {
void LogPrintfCheck::registerMatchers(clang::ast_matchers::MatchFinder* finder)
{
using namespace clang::ast_matchers;
/*
Logprintf(..., ..., ..., ..., ..., "foo", ...)
*/
finder->addMatcher(
callExpr(
callee(functionDecl(hasName("LogPrintf_"))),
hasArgument(5, stringLiteral(unterminated()).bind("logstring"))),
this);
/*
auto walletptr = &wallet;
wallet.WalletLogPrintf("foo");
wallet->WalletLogPrintf("foo");
*/
finder->addMatcher(
cxxMemberCallExpr(
callee(cxxMethodDecl(hasName("WalletLogPrintf"))),
hasArgument(0, stringLiteral(unterminated()).bind("logstring"))),
this);
}
void LogPrintfCheck::check(const clang::ast_matchers::MatchFinder::MatchResult& Result)
{
if (const clang::StringLiteral* lit = Result.Nodes.getNodeAs<clang::StringLiteral>("logstring")) {
const clang::ASTContext& ctx = *Result.Context;
const auto user_diag = diag(lit->getEndLoc(), "Unterminated format string used with LogPrintf");
const auto& loc = lit->getLocationOfByte(lit->getByteLength(), *Result.SourceManager, ctx.getLangOpts(), ctx.getTargetInfo());
user_diag << clang::FixItHint::CreateInsertion(loc, "\\n");
}
}
} // namespace bitcoin
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/INSTALL.md | # Guix Installation and Setup
This only needs to be done once per machine. If you have already completed the
installation and setup, please proceed to [perform a build](./README.md).
Otherwise, you may choose from one of the following options to install Guix:
1. Using the official **shell installer script** [⤓ skip to section][install-script]
- Maintained by Guix developers
- Easiest (automatically performs *most* setup)
- Works on nearly all Linux distributions
- Only installs latest release
- Binary installation only, requires high level of trust
- Note: The script needs to be run as root, so it should be inspected before it's run
2. Using the official **binary tarball** [⤓ skip to section][install-bin-tarball]
- Maintained by Guix developers
- Normal difficulty (full manual setup required)
- Works on nearly all Linux distributions
- Installs any release
- Binary installation only, requires high level of trust
3. Using fanquake's **Docker image** [↗︎ external instructions][install-fanquake-docker]
- Maintained by fanquake
- Easy (automatically performs *some* setup)
- Works wherever Docker images work
- Installs any release
- Binary installation only, requires high level of trust
4. Using a **distribution-maintained package** [⤓ skip to section][install-distro-pkg]
- Maintained by distribution's Guix package maintainer
- Normal difficulty (manual setup required)
- Works only on distributions with Guix packaged, see: https://repology.org/project/guix/versions
- Installs a release decided on by package maintainer
- Source or binary installation depending on the distribution
5. Building **from source** [⤓ skip to section][install-source]
- Maintained by you
- Hard, but rewarding
- Can be made to work on most Linux distributions
- Installs any commit (more granular)
- Source installation, requires lower level of trust
## Options 1 and 2: Using the official shell installer script or binary tarball
The installation instructions for both the official shell installer script and
the binary tarballs can be found in the GNU Guix Manual's [Binary Installation
section](https://guix.gnu.org/manual/en/html_node/Binary-Installation.html).
Note that running through the binary tarball installation steps is largely
equivalent to manually performing what the shell installer script does.
Note that at the time of writing (July 5th, 2021), the shell installer script
automatically creates an `/etc/profile.d` entry which the binary tarball
installation instructions do not ask you to create. However, you will likely
need this entry for better desktop integration. Please see [this
section](#add-an-etcprofiled-entry) for instructions on how to add a
`/etc/profile.d/guix.sh` entry.
Regardless of which installation option you chose, the changes to
`/etc/profile.d` will not take effect until the next shell or desktop session,
so you should log out and log back in.
## Option 3: Using fanquake's Docker image
Please refer to fanquake's instructions
[here](https://github.com/fanquake/core-review/tree/master/guix).
Note that the `Dockerfile` is largely equivalent to running through the binary
tarball installation steps.
## Option 4: Using a distribution-maintained package
Note that this section is based on the distro packaging situation at the time of
writing (July 2021). Guix is expected to be more widely packaged over time. For
an up-to-date view on Guix's package status/version across distros, please see:
https://repology.org/project/guix/versions
### Debian / Ubuntu
Guix v1.2.0 is available as a distribution package starting in [Debian
11](https://packages.debian.org/bullseye/guix) and [Ubuntu
21.04](https://packages.ubuntu.com/search?keywords=guix).
Note that if you intend on using Guix without using any substitutes (more
details [here][security-model]), v1.2.0 has a known problem when building GnuTLS
from source. Solutions and workarounds are documented
[here](#gnutls-test-suite-fail-status-request-revoked).
To install:
```sh
sudo apt install guix
```
For up-to-date information on Debian and Ubuntu's release history:
- [Debian release history](https://www.debian.org/releases/)
- [Ubuntu release history](https://ubuntu.com/about/release-cycle)
### Arch Linux
Guix is available in the AUR as
[`guix`](https://aur.archlinux.org/packages/guix/), please follow the
installation instructions in the Arch Linux Wiki ([live
link](https://wiki.archlinux.org/index.php/Guix#AUR_Package_Installation),
[2021/03/30
permalink](https://wiki.archlinux.org/index.php?title=Guix&oldid=637559#AUR_Package_Installation))
to install Guix.
At the time of writing (2021/03/30), the `check` phase will fail if the path to
guix's build directory is longer than 36 characters due to an anachronistic
character limit on the shebang line. Since the `check` phase happens after the
`build` phase, which may take quite a long time, it is recommended that users
either:
1. Skip the `check` phase
- For `makepkg`: `makepkg --nocheck ...`
- For `yay`: `yay --mflags="--nocheck" ...`
- For `paru`: `paru --nocheck ...`
2. Or, check their build directory's length beforehand
- For those building with `makepkg`: `pwd | wc -c`
## Option 5: Building from source
Building Guix from source is a rather involved process but a rewarding one for
those looking to minimize trust and maximize customizability (e.g. building a
particular commit of Guix). Previous experience with using autotools-style build
systems to build packages from source will be helpful. *hic sunt dracones.*
I strongly urge you to at least skim through the entire section once before you
start issuing commands, as it will save you a lot of unnecessary pain and
anguish.
### Installing common build tools
There are a few basic build tools that are required for most things we'll build,
so let's install them now:
Text transformation/i18n:
- `autopoint` (sometimes packaged in `gettext`)
- `help2man`
- `po4a`
- `texinfo`
Build system tools:
- `g++` w/ C++11 support
- `libtool`
- `autoconf`
- `automake`
- `pkg-config` (sometimes packaged as `pkgconf`)
- `make`
- `cmake`
Miscellaneous:
- `git`
- `gnupg`
- `python3`
### Building and Installing Guix's dependencies
In order to build Guix itself from source, we need to first make sure that the
necessary dependencies are installed and discoverable. The most up-to-date list
of Guix's dependencies is kept in the ["Requirements"
section](https://guix.gnu.org/manual/en/html_node/Requirements.html) of the Guix
Reference Manual.
Depending on your distribution, most or all of these dependencies may already be
packaged and installable without manually building and installing.
For reference, the graphic below outlines Guix v1.3.0's dependency graph:
![bootstrap map](https://user-images.githubusercontent.com/6399679/125064185-a9a59880-e0b0-11eb-82c1-9b8e5dc9950d.png)
#### Consider /tmp on tmpfs
If you use an NVME (SSD) drive, you may encounter [cryptic build errors](#coreutils-fail-teststail-2inotify-dir-recreate). Mounting a [tmpfs at /tmp](https://ubuntu.com/blog/data-driven-analysis-tmp-on-tmpfs) should prevent this and may improve performance as a bonus.
#### Guile
##### Choosing a Guile version and sticking to it
One of the first things you need to decide is which Guile version you want to
use: Guile v2.2 or Guile v3.0. Unlike the python2 to python3 transition, Guile
v2.2 and Guile v3.0 are largely compatible, as evidenced by the fact that most
Guile packages and even [Guix
itself](https://guix.gnu.org/en/blog/2020/guile-3-and-guix/) support running on
both.
What is important here is that you **choose one**, and you **remain consistent**
with your choice throughout **all Guile-related packages**, no matter if they
are installed via the distribution's package manager or installed from source.
This is because the files for Guile packages are installed to directories which
are separated based on the Guile version.
###### Example: Checking that Ubuntu's `guile-git` is compatible with your chosen Guile version
On Ubuntu Focal:
```sh
$ apt show guile-git
Package: guile-git
...
Depends: guile-2.2, guile-bytestructures, libgit2-dev
...
```
As you can see, the package `guile-git` depends on `guile-2.2`, meaning that it
was likely built for Guile v2.2. This means that if you decided to use Guile
v3.0 on Ubuntu Focal, you would need to build guile-git from source instead of
using the distribution package.
On Ubuntu Hirsute:
```sh
$ apt show guile-git
Package: guile-git
...
Depends: guile-3.0 | guile-2.2, guile-bytestructures (>= 1.0.7-3~), libgit2-dev (>= 1.0)
...
```
In this case, `guile-git` depends on either `guile-3.0` or `guile-2.2`, meaning
that it would work no matter what Guile version you decided to use.
###### Corner case: Multiple versions of Guile on one system
It is recommended to only install one version of Guile, so that build systems do
not get confused about which Guile to use.
However, if you insist on having both Guile v2.2 and Guile v3.0 installed on
your system, then you need to **consistently** specify one of
`GUILE_EFFECTIVE_VERSION=3.0` or `GUILE_EFFECTIVE_VERSION=2.2` to all
`./configure` invocations for Guix and its dependencies.
##### Installing Guile
Guile is most likely already packaged for your distribution, so after you have
[chosen a Guile version](#choosing-a-guile-version-and-sticking-to-it), install
it via your distribution's package manager.
If your distribution splits packages into `-dev`-suffixed and
non-`-dev`-suffixed sub-packages (as is the case for Debian-derived
distributions), please make sure to install both. For example, to install Guile
v2.2 on Debian/Ubuntu:
```sh
apt install guile-2.2 guile-2.2-dev
```
#### Mixing distribution packages and source-built packages
At the time of writing, most distributions have _some_ of Guix's dependencies
packaged, but not all. This means that you may want to install the distribution
package for some dependencies, and manually build-from-source for others.
Distribution packages usually install to `/usr`, which is different from the
default `./configure` prefix of source-built packages: `/usr/local`.
This means that if you mix-and-match distribution packages and source-built
packages and do not specify exactly `--prefix=/usr` to `./configure` for
source-built packages, you will need to augment the `GUILE_LOAD_PATH` and
`GUILE_LOAD_COMPILED_PATH` environment variables so that Guile will look
under the right prefix and find your source-built packages.
For example, if you are using Guile v2.2, and have Guile packages in the
`/usr/local` prefix, either add the following lines to your `.profile` or
`.bash_profile` so that the environment variable is properly set for all future
shell logins, or paste the lines into a POSIX-style shell to temporarily modify
the environment variables of your current shell session.
```sh
# Help Guile v2.2.x find packages in /usr/local
export GUILE_LOAD_PATH="/usr/local/share/guile/site/2.2${GUILE_LOAD_PATH:+:}$GUILE_LOAD_PATH"
export GUILE_LOAD_COMPILED_PATH="/usr/local/lib/guile/2.2/site-ccache${GUILE_LOAD_COMPILED_PATH:+:}$GUILE_COMPILED_LOAD_PATH"
```
Note that these environment variables are used to check for packages during
`./configure`, so they should be set as soon as possible should you want to use
a prefix other than `/usr`.
#### Building and installing source-built packages
***IMPORTANT**: A few dependencies have non-obvious quirks/errata which are
documented in the sub-sections immediately below. Please read these sections
before proceeding to build and install these packages.*
Although you should always refer to the README or INSTALL files for the most
accurate information, most of these dependencies use autoconf-style build
systems (check if there's a `configure.ac` file), and will likely do the right
thing with the following:
Clone the repository and check out the latest release:
```sh
git clone <git-repo-of-dependency>/<dependency>.git
cd <dependency>
git tag -l # check for the latest release
git checkout <latest-release>
```
For autoconf-based build systems (if `./autogen.sh` or `configure.ac` exists at
the root of the repository):
```sh
./autogen.sh || autoreconf -vfi
./configure --prefix=<prefix>
make
sudo make install
```
For CMake-based build systems (if `CMakeLists.txt` exists at the root of the
repository):
```sh
mkdir build && cd build
cmake .. -DCMAKE_INSTALL_PREFIX=<prefix>
sudo cmake --build . --target install
```
If you choose not to specify exactly `--prefix=/usr` to `./configure`, please
make sure you've carefully read the [previous section] on mixing distribution
packages and source-built packages.
##### Binding packages require `-dev`-suffixed packages
Relevant for:
- Everyone
When building bindings, the `-dev`-suffixed version of the original package
needs to be installed. For example, building `Guile-zlib` on Debian-derived
distributions requires that `zlib1g-dev` is installed.
When using bindings, the `-dev`-suffixed version of the original package still
needs to be installed. This is particularly problematic when distribution
packages are mispackaged like `guile-sqlite3` is in Ubuntu Focal such that
installing `guile-sqlite3` does not automatically install `libsqlite3-dev` as a
dependency.
Below is a list of relevant Guile bindings and their corresponding `-dev`
packages in Debian at the time of writing.
| Guile binding package | -dev Debian package |
|-----------------------|---------------------|
| guile-gcrypt | libgcrypt-dev |
| guile-git | libgit2-dev |
| guile-gnutls | (none) |
| guile-json | (none) |
| guile-lzlib | liblz-dev |
| guile-ssh | libssh-dev |
| guile-sqlite3 | libsqlite3-dev |
| guile-zlib | zlib1g-dev |
##### `guile-git` actually depends on `libgit2 >= 1.1`
Relevant for:
- Those building `guile-git` from source against `libgit2 < 1.1`
- Those installing `guile-git` from their distribution where `guile-git` is
built against `libgit2 < 1.1`
As of v0.4.0, `guile-git` claims to only require `libgit2 >= 0.28.0`, however,
it actually requires `libgit2 >= 1.1`, otherwise, it will be confused by a
reference of `origin/keyring`: instead of interpreting the reference as "the
'keyring' branch of the 'origin' remote", the reference is interpreted as "the
branch literally named 'origin/keyring'"
This is especially notable because Ubuntu Focal packages `libgit2 v0.28.4`, and
`guile-git` is built against it.
Should you be in this situation, you need to build both `libgit2 v1.1.x` and
`guile-git` from source.
Source: https://logs.guix.gnu.org/guix/2020-11-12.log#232527
##### `{scheme,guile}-bytestructures` v1.0.8 and v1.0.9 are broken for Guile v2.2
Relevant for:
- Those building `{scheme,guile}-bytestructures` from source against Guile v2.2
Commit
[707eea3](https://github.com/TaylanUB/scheme-bytestructures/commit/707eea3a85e1e375e86702229ebf73d496377669)
introduced a regression for Guile v2.2 and was first included in v1.0.8, this
was later corrected in commit
[ec9a721](https://github.com/TaylanUB/scheme-bytestructures/commit/ec9a721957c17bcda13148f8faa5f06934431ff7)
and included in v1.1.0.
TL;DR If you decided to use Guile v2.2, do not use `{scheme,guile}-bytestructures` v1.0.8 or v1.0.9.
### Building and Installing Guix itself
Start by cloning Guix:
```
git clone https://git.savannah.gnu.org/git/guix.git
cd guix
```
You will likely want to build the latest release.
```
git branch -a -l 'origin/version-*' # check for the latest release
git checkout <latest-release>
```
Bootstrap the build system:
```
./bootstrap
```
Configure with the recommended `--localstatedir` flag:
```
./configure --localstatedir=/var
```
Note: If you intend to hack on Guix in the future, you will need to supply the
same `--localstatedir=` flag for all future Guix `./configure` invocations. See
the last paragraph of this
[section](https://guix.gnu.org/manual/en/html_node/Requirements.html) for more
details.
Build Guix (this will take a while):
```
make -j$(nproc)
```
Install Guix:
```
sudo make install
```
### Post-"build from source" Setup
#### Creating and starting a `guix-daemon-original` service with a fixed `argv[0]`
At this point, guix will be installed to `${bindir}`, which is likely
`/usr/local/bin` if you did not override directory variables at
`./configure`-time. More information on standard Automake directory variables
can be found
[here](https://www.gnu.org/software/automake/manual/html_node/Standard-Directory-Variables.html).
However, the Guix init scripts and service configurations for Upstart, systemd,
SysV, and OpenRC are installed (in `${libdir}`) to launch
`${localstatedir}/guix/profiles/per-user/root/current-guix/bin/guix-daemon`,
which does not yet exist, and will only exist after [`root` performs their first
`guix pull`](#guix-pull-as-root).
We need to create a `-original` version of these init scripts that's pointed to
the binaries we just built and `make install`'ed in `${bindir}` (normally,
`/usr/local/bin`).
Example for `systemd`, run as `root`:
```sh
# Create guix-daemon-original.service by modifying guix-daemon.service
libdir=# set according to your PREFIX (default is /usr/local/lib)
bindir="$(dirname $(command -v guix-daemon))"
sed -E -e "s|/\S*/guix/profiles/per-user/root/current-guix/bin/guix-daemon|${bindir}/guix-daemon|" "${libdir}"/systemd/system/guix-daemon.service > /etc/systemd/system/guix-daemon-original.service
chmod 664 /etc/systemd/system/guix-daemon-original.service
# Make systemd recognize the new service
systemctl daemon-reload
# Make sure that the non-working guix-daemon.service is stopped and disabled
systemctl stop guix-daemon
systemctl disable guix-daemon
# Make sure that the working guix-daemon-original.service is started and enabled
systemctl enable guix-daemon-original
systemctl start guix-daemon-original
```
#### Creating `guix-daemon` users / groups
Please see the [relevant
section](https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html)
in the Guix Reference Manual for more details.
## Optional setup
At this point, you are set up to [use Guix to build Bitcoin
Core](./README.md#usage). However, if you want to polish your setup a bit and
make it "what Guix intended", then read the next few subsections.
### Add an `/etc/profile.d` entry
This section definitely does not apply to you if you installed Guix using:
1. The shell installer script
2. fanquake's Docker image
3. Debian's `guix` package
#### Background
Although Guix knows how to update itself and its packages, it does so in a
non-invasive way (it does not modify `/usr/local/bin/guix`).
Instead, it does the following:
- After a `guix pull`, it updates
`/var/guix/profiles/per-user/$USER/current-guix`, and creates a symlink
targeting this directory at `$HOME/.config/guix/current`
- After a `guix install`, it updates
`/var/guix/profiles/per-user/$USER/guix-profile`, and creates a symlink
targeting this directory at `$HOME/.guix-profile`
Therefore, in order for these operations to affect your shell/desktop sessions
(and for the principle of least astonishment to hold), their corresponding
directories have to be added to well-known environment variables like `$PATH`,
`$INFOPATH`, `$XDG_DATA_DIRS`, etc.
In other words, if `$HOME/.config/guix/current/bin` does not exist in your
`$PATH`, a `guix pull` will have no effect on what `guix` you are using. Same
goes for `$HOME/.guix-profile/bin`, `guix install`, and installed packages.
Helpfully, after a `guix pull` or `guix install`, a message will be printed like
so:
```
hint: Consider setting the necessary environment variables by running:
GUIX_PROFILE="$HOME/.guix-profile"
. "$GUIX_PROFILE/etc/profile"
Alternately, see `guix package --search-paths -p "$HOME/.guix-profile"'.
```
However, this is somewhat tedious to do for both `guix pull` and `guix install`
for each user on the system that wants to properly use `guix`. I recommend that
you instead add an entry to `/etc/profile.d` instead. This is done by default
when installing the Debian package later than 1.2.0-4 and when using the shell
script installer.
#### Instructions
Create `/etc/profile.d/guix.sh` with the following content:
```sh
# _GUIX_PROFILE: `guix pull` profile
_GUIX_PROFILE="$HOME/.config/guix/current"
if [ -L $_GUIX_PROFILE ]; then
export PATH="$_GUIX_PROFILE/bin${PATH:+:}$PATH"
# Export INFOPATH so that the updated info pages can be found
# and read by both /usr/bin/info and/or $GUIX_PROFILE/bin/info
# When INFOPATH is unset, add a trailing colon so that Emacs
# searches 'Info-default-directory-list'.
export INFOPATH="$_GUIX_PROFILE/share/info:$INFOPATH"
fi
# GUIX_PROFILE: User's default profile
GUIX_PROFILE="$HOME/.guix-profile"
[ -L $GUIX_PROFILE ] || return
GUIX_LOCPATH="$GUIX_PROFILE/lib/locale"
export GUIX_PROFILE GUIX_LOCPATH
[ -f "$GUIX_PROFILE/etc/profile" ] && . "$GUIX_PROFILE/etc/profile"
# set XDG_DATA_DIRS to include Guix installations
export XDG_DATA_DIRS="$GUIX_PROFILE/share:${XDG_DATA_DIRS:-/usr/local/share/:/usr/share/}"
```
Please note that this will not take effect until the next shell or desktop
session (log out and log back in).
### `guix pull` as root
Before you do this, you need to read the section on [choosing your security
model][security-model] and adjust `guix` and `guix-daemon` flags according to
your choice, as invoking `guix pull` may pull substitutes from substitute
servers (which you may not want).
As mentioned in a previous section, Guix expects
`${localstatedir}/guix/profiles/per-user/root/current-guix` to be populated with
`root`'s Guix profile, `guix pull`-ed and built by some former version of Guix.
However, this is not the case when we build from source. Therefore, we need to
perform a `guix pull` as `root`:
```sh
sudo --login guix pull --branch=version-<latest-release-version>
# or
sudo --login guix pull --commit=<particular-commit>
```
`guix pull` is quite a long process (especially if you're using
`--no-substitutes`). If you encounter build problems, please refer to the
[troubleshooting section](#troubleshooting).
Note that running a bare `guix pull` with no commit or branch specified will
pull the latest commit on Guix's master branch, which is likely fine, but not
recommended.
If you installed Guix from source, you may get an error like the following:
```sh
error: while creating symlink '/root/.config/guix/current' No such file or directory
```
To resolve this, simply:
```
sudo mkdir -p /root/.config/guix
```
Then try the `guix pull` command again.
After the `guix pull` finishes successfully,
`${localstatedir}/guix/profiles/per-user/root/current-guix` should be populated.
#### Using the newly-pulled `guix` by restarting the daemon
Depending on how you installed Guix, you should now make sure that your init
scripts and service configurations point to the newly-pulled `guix-daemon`.
##### If you built Guix from source
If you followed the instructions for [fixing argv\[0\]][fix-argv0], you can now
do the following:
```sh
systemctl stop guix-daemon-original
systemctl disable guix-daemon-original
systemctl enable guix-daemon
systemctl start guix-daemon
```
Remember to set `--no-substitutes` in `$libdir/systemd/system/guix-daemon.service` and other customizations if you used them for `guix-daemon-original.service`.
##### If you installed Guix via the Debian/Ubuntu distribution packages
You will need to create a `guix-daemon-latest` service which points to the new
`guix` rather than a pinned one.
```sh
# Create guix-daemon-latest.service by modifying guix-daemon.service
sed -E -e "s|/usr/bin/guix-daemon|/var/guix/profiles/per-user/root/current-guix/bin/guix-daemon|" /etc/systemd/system/guix-daemon.service > /lib/systemd/system/guix-daemon-latest.service
chmod 664 /lib/systemd/system/guix-daemon-latest.service
# Make systemd recognize the new service
systemctl daemon-reload
# Make sure that the old guix-daemon.service is stopped and disabled
systemctl stop guix-daemon
systemctl disable guix-daemon
# Make sure that the new guix-daemon-latest.service is started and enabled
systemctl enable guix-daemon-latest
systemctl start guix-daemon-latest
```
##### If you installed Guix via lantw44's Arch Linux AUR package
At the time of writing (July 5th, 2021) the systemd unit for "updated Guix" is
`guix-daemon-latest.service`, therefore, you should do the following:
```sh
systemctl stop guix-daemon
systemctl disable guix-daemon
systemctl enable guix-daemon-latest
systemctl start guix-daemon-latest
```
##### Otherwise...
Simply do:
```sh
systemctl restart guix-daemon
```
### Checking everything
If you followed all the steps above to make your Guix setup "prim and proper,"
you can check that you did everything properly by running through this
checklist.
1. `/etc/profile.d/guix.sh` should exist and be sourced at each shell login
2. `guix describe` should not print `guix describe: error: failed to determine
origin`, but rather something like:
```
Generation 38 Feb 22 2021 16:39:31 (current)
guix f350df4
repository URL: https://git.savannah.gnu.org/git/guix.git
branch: version-1.2.0
commit: f350df405fbcd5b9e27e6b6aa500da7f101f41e7
```
3. `guix-daemon` should be running from `${localstatedir}/guix/profiles/per-user/root/current-guix`
# Troubleshooting
## Derivation failed to build
When you see a build failure like below:
```
building /gnu/store/...-foo-3.6.12.drv...
/ 'check' phasenote: keeping build directory `/tmp/guix-build-foo-3.6.12.drv-0'
builder for `/gnu/store/...-foo-3.6.12.drv' failed with exit code 1
build of /gnu/store/...-foo-3.6.12.drv failed
View build log at '/var/log/guix/drvs/../...-foo-3.6.12.drv.bz2'.
cannot build derivation `/gnu/store/...-qux-7.69.1.drv': 1 dependencies couldn't be built
cannot build derivation `/gnu/store/...-bar-3.16.5.drv': 1 dependencies couldn't be built
cannot build derivation `/gnu/store/...-baz-2.0.5.drv': 1 dependencies couldn't be built
guix time-machine: error: build of `/gnu/store/...-baz-2.0.5.drv' failed
```
It means that `guix` failed to build a package named `foo`, which was a
dependency of `qux`, `bar`, and `baz`. Importantly, note that the last "failed"
line is not necessarily the root cause, the first "failed" line is.
Most of the time, the build failure is due to a spurious test failure or the
package's build system/test suite breaking when running multi-threaded. To
rebuild _just_ this derivation in a single-threaded fashion (please don't forget
to add other `guix` flags like `--no-substitutes` as appropriate):
```sh
$ guix build --cores=1 /gnu/store/...-foo-3.6.12.drv
```
If the single-threaded rebuild did not succeed, you may need to dig deeper.
You may view `foo`'s build logs in `less` like so (please replace paths with the
path you see in the build failure output):
```sh
$ bzcat /var/log/guix/drvs/../...-foo-3.6.12.drv.bz2 | less
```
`foo`'s build directory is also preserved and available at
`/tmp/guix-build-foo-3.6.12.drv-0`. However, if you fail to build `foo` multiple
times, it may be `/tmp/...drv-1` or `/tmp/...drv-2`. Always consult the build
failure output for the most accurate, up-to-date information.
### openssl-1.1.1l and openssl-1.1.1n
OpenSSL includes tests that will fail once some certificate has expired. A workaround
is to change your system clock:
```sh
sudo timedatectl set-ntp no
sudo date --set "28 may 2022 15:00:00"
sudo --login guix build --cores=1 /gnu/store/g9alz81w4q03ncm542487xd001s6akd4-openssl-1.1.1l.drv
sudo --login guix build --cores=1 /gnu/store/mw6ax0gk33gh082anrdrxp2flrbskxv6-openssl-1.1.1n.drv
sudo timedatectl set-ntp yes
```
### python(-minimal): [Errno 84] Invalid or incomplete multibyte or wide character
This error occurs when your `$TMPDIR` (default: /tmp) exists on a filesystem
which rejects characters not present in the UTF-8 character code set. An example
is ZFS with the utf8only=on option set.
More information: https://bugs.python.org/issue37584
### GnuTLS: test-suite FAIL: status-request-revoked
*The derivation is likely identified by: `/gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv`*
This unfortunate error is most common for non-substitute builders who installed
Guix v1.2.0. The problem stems from the fact that one of GnuTLS's tests uses a
hardcoded certificate which expired on 2020-10-24.
What's more unfortunate is that this GnuTLS derivation is somewhat special in
Guix's dependency graph and is not affected by the package transformation flags
like `--without-tests=`.
The easiest solution for those encountering this problem is to install a newer
version of Guix. However, there are ways to work around this issue:
#### Workaround 1: Using substitutes for this single derivation
If you've authorized the official Guix build farm's key (more info
[here](./README.md#step-1-authorize-the-signing-keys)), then you can use
substitutes just for this single derivation by invoking the following:
```sh
guix build --substitute-urls="https://ci.guix.gnu.org" /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv
```
See [this section](./README.md#removing-authorized-keys) for instructions on how
to remove authorized keys if you don't want to keep the build farm's key
authorized.
#### Workaround 2: Temporarily setting the system clock back
This workaround was described [here](https://issues.guix.gnu.org/44559#5).
Basically:
1. Turn off networking
2. Turn off NTP
3. Set system time to 2020-10-01
4. guix build --no-substitutes /gnu/store/vhphki5sg9xkdhh2pbc8gi6vhpfzryf0-gnutls-3.6.12.drv
5. Set system time back to accurate current time
6. Turn NTP back on
7. Turn networking back on
### coreutils: FAIL: tests/tail-2/inotify-dir-recreate
The inotify-dir-create test fails on "remote" filesystems such as overlayfs
(Docker's default filesystem) due to the filesystem being mistakenly recognized
as non-remote.
A relatively easy workaround to this is to make sure that a somewhat traditional
filesystem is mounted at `/tmp` (where `guix-daemon` performs its builds), see [/tmp on tmpfs](#consider-tmp-on-tmpfs). For
Docker users, this might mean [using a volume][docker/volumes], [binding
mounting][docker/bind-mnt] from host, or (for those with enough RAM and swap)
[mounting a tmpfs][docker/tmpfs] using the `--tmpfs` flag.
Please see the following links for more details:
- An upstream coreutils bug has been filed: [debbugs#47940](https://debbugs.gnu.org/cgi/bugreport.cgi?bug=47940)
- A Guix bug detailing the underlying problem has been filed: [guix-issues#47935](https://issues.guix.gnu.org/47935), [guix-issues#49985](https://issues.guix.gnu.org/49985#5)
- A commit to skip this test in Guix has been merged into the core-updates branch:
[savannah/guix@6ba1058](https://git.savannah.gnu.org/cgit/guix.git/commit/?id=6ba1058df0c4ce5611c2367531ae5c3cdc729ab4)
[install-script]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball
[install-bin-tarball]: #options-1-and-2-using-the-official-shell-installer-script-or-binary-tarball
[install-fanquake-docker]: #option-3-using-fanquakes-docker-image
[install-distro-pkg]: #option-4-using-a-distribution-maintained-package
[install-source]: #option-5-building-from-source
[fix-argv0]: #creating-and-starting-a-guix-daemon-original-service-with-a-fixed-argv0
[security-model]: ./README.md#choosing-your-security-model
[docker/volumes]: https://docs.docker.com/storage/volumes/
[docker/bind-mnt]: https://docs.docker.com/storage/bind-mounts/
[docker/tmpfs]: https://docs.docker.com/storage/tmpfs/
# Purging/Uninstalling Guix
In the extraordinarily rare case where you messed up your Guix installation in
an irreversible way, you may want to completely purge Guix from your system and
start over.
1. Uninstall Guix itself according to the way you installed it (e.g. `sudo apt
purge guix` for Ubuntu packaging, `sudo make uninstall` for a build from source).
2. Remove all build users and groups
You may check for relevant users and groups using:
```
getent passwd | grep guix
getent group | grep guix
```
Then, you may remove users and groups using:
```
sudo userdel <user>
sudo groupdel <group>
```
3. Remove all possible Guix-related directories
- `/var/guix/`
- `/var/log/guix/`
- `/gnu/`
- `/etc/guix/`
- `/home/*/.config/guix/`
- `/home/*/.cache/guix/`
- `/home/*/.guix-profile/`
- `/root/.config/guix/`
- `/root/.cache/guix/`
- `/root/.guix-profile/`
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/README.md | # Bootstrappable Bitcoin Core Builds
This directory contains the files necessary to perform bootstrappable Bitcoin
Core builds.
[Bootstrappability][b17e] furthers our binary security guarantees by allowing us
to _audit and reproduce_ our toolchain instead of blindly _trusting_ binary
downloads.
We achieve bootstrappability by using Guix as a functional package manager.
# Requirements
Conservatively, you will need an x86_64 machine with:
- 16GB of free disk space on the partition that /gnu/store will reside in
- 8GB of free disk space **per platform triple** you're planning on building
(see the `HOSTS` [environment variable description][env-vars-list])
# Installation and Setup
If you don't have Guix installed and set up, please follow the instructions in
[INSTALL.md](./INSTALL.md)
# Usage
If you haven't considered your security model yet, please read [the relevant
section](#choosing-your-security-model) before proceeding to perform a build.
## Making the Xcode SDK available for macOS cross-compilation
In order to perform a build for macOS (which is included in the default set of
platform triples to build), you'll need to extract the macOS SDK tarball using
tools found in the [`macdeploy` directory](../macdeploy/README.md).
You can then either point to the SDK using the `SDK_PATH` environment variable:
```sh
# Extract the SDK tarball to /path/to/parent/dir/of/extracted/SDK/Xcode-<foo>-<bar>-extracted-SDK-with-libcxx-headers
tar -C /path/to/parent/dir/of/extracted/SDK -xaf /path/to/Xcode-<foo>-<bar>-extracted-SDK-with-libcxx-headers.tar.gz
# Indicate where to locate the SDK tarball
export SDK_PATH=/path/to/parent/dir/of/extracted/SDK
```
or extract it into `depends/SDKs`:
```sh
mkdir -p depends/SDKs
tar -C depends/SDKs -xaf /path/to/SDK/tarball
```
## Building
*The author highly recommends at least reading over the [common usage patterns
and examples](#common-guix-build-invocation-patterns-and-examples) section below
before starting a build. For a full list of customization options, see the
[recognized environment variables][env-vars-list] section.*
To build Bitcoin Core reproducibly with all default options, invoke the
following from the top of a clean repository:
```sh
./contrib/guix/guix-build
```
## Codesigning build outputs
The `guix-codesign` command attaches codesignatures (produced by codesigners) to
existing non-codesigned outputs. Please see the [release process
documentation](/doc/release-process.md) for more context.
It respects many of the same environment variable flags as `guix-build`, with 2
crucial differences:
1. Since only Windows and macOS build outputs require codesigning, the `HOSTS`
environment variable will have a sane default value of `x86_64-w64-mingw32
x86_64-apple-darwin arm64-apple-darwin` instead of all the platforms.
2. The `guix-codesign` command ***requires*** a `DETACHED_SIGS_REPO` flag.
* _**DETACHED_SIGS_REPO**_
Set the directory where detached codesignatures can be found for the current
Bitcoin Core version being built.
_REQUIRED environment variable_
An invocation with all default options would look like:
```
env DETACHED_SIGS_REPO=<path/to/bitcoin-detached-sigs> ./contrib/guix/guix-codesign
```
## Cleaning intermediate work directories
By default, `guix-build` leaves all intermediate files or "work directories"
(e.g. `depends/work`, `guix-build-*/distsrc-*`) intact at the end of a build so
that they are available to the user (to aid in debugging, etc.). However, these
directories usually take up a large amount of disk space. Therefore, a
`guix-clean` convenience script is provided which cleans the current `git`
worktree to save disk space:
```
./contrib/guix/guix-clean
```
## Attesting to build outputs
Much like how Gitian build outputs are attested to in a `gitian.sigs`
repository, Guix build outputs are attested to in the [`guix.sigs`
repository](https://github.com/bitcoin-core/guix.sigs).
After you've cloned the `guix.sigs` repository, to attest to the current
worktree's commit/tag:
```
env GUIX_SIGS_REPO=<path/to/guix.sigs> SIGNER=<gpg-key-name> ./contrib/guix/guix-attest
```
See `./contrib/guix/guix-attest --help` for more information on the various ways
`guix-attest` can be invoked.
## Verifying build output attestations
After at least one other signer has uploaded their signatures to the `guix.sigs`
repository:
```
git -C <path/to/guix.sigs> pull
env GUIX_SIGS_REPO=<path/to/guix.sigs> ./contrib/guix/guix-verify
```
## Common `guix-build` invocation patterns and examples
### Keeping caches and SDKs outside of the worktree
If you perform a lot of builds and have a bunch of worktrees, you may find it
more efficient to keep the depends tree's download cache, build cache, and SDKs
outside of the worktrees to avoid duplicate downloads and unnecessary builds. To
help with this situation, the `guix-build` script honours the `SOURCES_PATH`,
`BASE_CACHE`, and `SDK_PATH` environment variables and will pass them on to the
depends tree so that you can do something like:
```sh
env SOURCES_PATH="$HOME/depends-SOURCES_PATH" BASE_CACHE="$HOME/depends-BASE_CACHE" SDK_PATH="$HOME/macOS-SDKs" ./contrib/guix/guix-build
```
Note that the paths that these environment variables point to **must be
directories**, and **NOT symlinks to directories**.
See the [recognized environment variables][env-vars-list] section for more
details.
### Building a subset of platform triples
Sometimes you only want to build a subset of the supported platform triples, in
which case you can override the default list by setting the space-separated
`HOSTS` environment variable:
```sh
env HOSTS='x86_64-w64-mingw32 x86_64-apple-darwin' ./contrib/guix/guix-build
```
See the [recognized environment variables][env-vars-list] section for more
details.
### Controlling the number of threads used by `guix` build commands
Depending on your system's RAM capacity, you may want to decrease the number of
threads used to decrease RAM usage or vice versa.
By default, the scripts under `./contrib/guix` will invoke all `guix` build
commands with `--cores="$JOBS"`. Note that `$JOBS` defaults to `$(nproc)` if not
specified. However, astute manual readers will also notice that `guix` build
commands also accept a `--max-jobs=` flag (which defaults to 1 if unspecified).
Here is the difference between `--cores=` and `--max-jobs=`:
> Note: When I say "derivation," think "package"
`--cores=`
- controls the number of CPU cores to build each derivation. This is the value
passed to `make`'s `--jobs=` flag.
`--max-jobs=`
- controls how many derivations can be built in parallel
- defaults to 1
Therefore, the default is for `guix` build commands to build one derivation at a
time, utilizing `$JOBS` threads.
Specifying the `$JOBS` environment variable will only modify `--cores=`, but you
can also modify the value for `--max-jobs=` by specifying
`$ADDITIONAL_GUIX_COMMON_FLAGS`. For example, if you have a LOT of memory, you
may want to set:
```sh
export ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8'
```
Which allows for a maximum of 8 derivations to be built at the same time, each
utilizing `$JOBS` threads.
Or, if you'd like to avoid spurious build failures caused by issues with
parallelism within a single package, but would still like to build multiple
packages when the dependency graph allows for it, you may want to try:
```sh
export JOBS=1 ADDITIONAL_GUIX_COMMON_FLAGS='--max-jobs=8'
```
See the [recognized environment variables][env-vars-list] section for more
details.
## Recognized environment variables
* _**HOSTS**_
Override the space-separated list of platform triples for which to perform a
bootstrappable build.
_(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu
riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
x86\_64-w64-mingw32 x86\_64-apple-darwin arm64-apple-darwin")_
* _**SOURCES_PATH**_
Set the depends tree download cache for sources. This is passed through to the
depends tree. Setting this to the same directory across multiple builds of the
depends tree can eliminate unnecessary redownloading of package sources.
The path that this environment variable points to **must be a directory**, and
**NOT a symlink to a directory**.
* _**BASE_CACHE**_
Set the depends tree cache for built packages. This is passed through to the
depends tree. Setting this to the same directory across multiple builds of the
depends tree can eliminate unnecessary building of packages.
The path that this environment variable points to **must be a directory**, and
**NOT a symlink to a directory**.
* _**SDK_PATH**_
Set the path where _extracted_ SDKs can be found. This is passed through to
the depends tree. Note that this is should be set to the _parent_ directory of
the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of
`$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`).
The path that this environment variable points to **must be a directory**, and
**NOT a symlink to a directory**.
* _**JOBS**_
Override the number of jobs to run simultaneously, you might want to do so on
a memory-limited machine. This may be passed to:
- `guix` build commands as in `guix shell --cores="$JOBS"`
- `make` as in `make --jobs="$JOBS"`
- `xargs` as in `xargs -P"$JOBS"`
See [here](#controlling-the-number-of-threads-used-by-guix-build-commands) for
more details.
_(defaults to the value of `nproc` outside the container)_
* _**SOURCE_DATE_EPOCH**_
Override the reference UNIX timestamp used for bit-for-bit reproducibility,
the variable name conforms to [standard][r12e/source-date-epoch].
_(defaults to the output of `$(git log --format=%at -1)`)_
* _**V**_
If non-empty, will pass `V=1` to all `make` invocations, making `make` output
verbose.
Note that any given value is ignored. The variable is only checked for
emptiness. More concretely, this means that `V=` (setting `V` to the empty
string) is interpreted the same way as not setting `V` at all, and that `V=0`
has the same effect as `V=1`.
* _**SUBSTITUTE_URLS**_
A whitespace-delimited list of URLs from which to download pre-built packages.
A URL is only used if its signing key is authorized (refer to the [substitute
servers section](#option-1-building-with-substitutes) for more details).
* _**ADDITIONAL_GUIX_COMMON_FLAGS**_
Additional flags to be passed to all `guix` commands.
* _**ADDITIONAL_GUIX_TIMEMACHINE_FLAGS**_
Additional flags to be passed to `guix time-machine`.
* _**ADDITIONAL_GUIX_ENVIRONMENT_FLAGS**_
Additional flags to be passed to the invocation of `guix shell` inside
`guix time-machine`.
# Choosing your security model
No matter how you installed Guix, you need to decide on your security model for
building packages with Guix.
Guix allows us to achieve better binary security by using our CPU time to build
everything from scratch. However, it doesn't sacrifice user choice in pursuit of
this: users can decide whether or not to use **substitutes** (pre-built
packages).
## Option 1: Building with substitutes
### Step 1: Authorize the signing keys
Depending on the installation procedure you followed, you may have already
authorized the Guix build farm key. In particular, the official shell installer
script asks you if you want the key installed, and the debian distribution
package authorized the key during installation.
You can check the current list of authorized keys at `/etc/guix/acl`.
At the time of writing, a `/etc/guix/acl` with just the Guix build farm key
authorized looks something like:
```lisp
(acl
(entry
(public-key
(ecc
(curve Ed25519)
(q #8D156F295D24B0D9A86FA5741A840FF2D24F60F7B6C4134814AD55625971B394#)
)
)
(tag
(guix import)
)
)
)
```
If you've determined that the official Guix build farm key hasn't been
authorized, and you would like to authorize it, run the following as root:
```
guix archive --authorize < /var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub
```
If
`/var/guix/profiles/per-user/root/current-guix/share/guix/ci.guix.gnu.org.pub`
doesn't exist, try:
```sh
guix archive --authorize < <PREFIX>/share/guix/ci.guix.gnu.org.pub
```
Where `<PREFIX>` is likely:
- `/usr` if you installed from a distribution package
- `/usr/local` if you installed Guix from source and didn't supply any
prefix-modifying flags to Guix's `./configure`
For dongcarl's substitute server at https://guix.carldong.io, run as root:
```sh
wget -qO- 'https://guix.carldong.io/signing-key.pub' | guix archive --authorize
```
#### Removing authorized keys
To remove previously authorized keys, simply edit `/etc/guix/acl` and remove the
`(entry (public-key ...))` entry.
### Step 2: Specify the substitute servers
Once its key is authorized, the official Guix build farm at
https://ci.guix.gnu.org is automatically used unless the `--no-substitutes` flag
is supplied. This default list of substitute servers is overridable both on a
`guix-daemon` level and when you invoke `guix` commands. See examples below for
the various ways of adding dongcarl's substitute server after having [authorized
his signing key](#step-1-authorize-the-signing-keys).
Change the **default list** of substitute servers by starting `guix-daemon` with
the `--substitute-urls` option (you will likely need to edit your init script):
```sh
guix-daemon <cmd> --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org'
```
Override the default list of substitute servers by passing the
`--substitute-urls` option for invocations of `guix` commands:
```sh
guix <cmd> --substitute-urls='https://guix.carldong.io https://ci.guix.gnu.org'
```
For scripts under `./contrib/guix`, set the `SUBSTITUTE_URLS` environment
variable:
```sh
export SUBSTITUTE_URLS='https://guix.carldong.io https://ci.guix.gnu.org'
```
## Option 2: Disabling substitutes on an ad-hoc basis
If you prefer not to use any substitutes, make sure to supply `--no-substitutes`
like in the following snippet. The first build will take a while, but the
resulting packages will be cached for future builds.
For direct invocations of `guix`:
```sh
guix <cmd> --no-substitutes
```
For the scripts under `./contrib/guix/`:
```sh
export ADDITIONAL_GUIX_COMMON_FLAGS='--no-substitutes'
```
## Option 3: Disabling substitutes by default
`guix-daemon` accepts a `--no-substitutes` flag, which will make sure that,
unless otherwise overridden by a command line invocation, no substitutes will be
used.
If you start `guix-daemon` using an init script, you can edit said script to
supply this flag.
[b17e]: https://bootstrappable.org/
[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/
[env-vars-list]: #recognized-environment-variables
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/guix-codesign | #!/usr/bin/env bash
export LC_ALL=C
set -e -o pipefail
# Source the common prelude, which:
# 1. Checks if we're at the top directory of the Bitcoin Core repository
# 2. Defines a few common functions and variables
#
# shellcheck source=libexec/prelude.bash
source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash"
###################
## SANITY CHECKS ##
###################
################
# Required non-builtin commands should be invocable
################
check_tools cat mkdir git guix
################
# Required env vars should be non-empty
################
cmd_usage() {
cat <<EOF
Synopsis:
env DETACHED_SIGS_REPO=<path/to/bitcoin-detached-sigs> \\
./contrib/guix/guix-codesign
EOF
}
if [ -z "$DETACHED_SIGS_REPO" ]; then
cmd_usage
exit 1
fi
################
# GUIX_BUILD_OPTIONS should be empty
################
#
# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that
# can perform builds. This seems like what we want instead of
# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually
# _appended_ to normal command-line options. Meaning that they will take
# precedence over the command-specific ADDITIONAL_GUIX_<CMD>_FLAGS.
#
# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's
# existence here and direct users of this script to use our (more flexible)
# custom environment variables.
if [ -n "$GUIX_BUILD_OPTIONS" ]; then
cat << EOF
Error: Environment variable GUIX_BUILD_OPTIONS is not empty:
'$GUIX_BUILD_OPTIONS'
Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset
GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options
across guix commands or ADDITIONAL_GUIX_<CMD>_FLAGS to set build options for a
specific guix command.
See contrib/guix/README.md for more details.
EOF
exit 1
fi
################
# The codesignature git worktree should not be dirty
################
if ! git -C "$DETACHED_SIGS_REPO" diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then
cat << EOF
ERR: The DETACHED CODESIGNATURE git worktree is dirty, which may lead to broken builds.
Aborting...
Hint: To make your git worktree clean, You may want to:
1. Commit your changes,
2. Stash your changes, or
3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on
using a dirty worktree
EOF
exit 1
fi
################
# Build directories should not exist
################
# Default to building for all supported HOSTs (overridable by environment)
export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin arm64-apple-darwin}"
# Usage: distsrc_for_host HOST
#
# HOST: The current platform triple we're building for
#
distsrc_for_host() {
echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}-codesigned"
}
# Accumulate a list of build directories that already exist...
hosts_distsrc_exists=""
for host in $HOSTS; do
if [ -e "$(distsrc_for_host "$host")" ]; then
hosts_distsrc_exists+=" ${host}"
fi
done
if [ -n "$hosts_distsrc_exists" ]; then
# ...so that we can print them out nicely in an error message
cat << EOF
ERR: Build directories for this commit already exist for the following platform
triples you're attempting to build, probably because of previous builds.
Please remove, or otherwise deal with them prior to starting another build.
Aborting...
Hint: To blow everything away, you may want to use:
$ ./contrib/guix/guix-clean
Specifically, this will remove all files without an entry in the index,
excluding the SDK directory, the depends download cache, the depends built
packages cache, the garbage collector roots for Guix environments, and the
output directory.
EOF
for host in $hosts_distsrc_exists; do
echo " ${host} '$(distsrc_for_host "$host")'"
done
exit 1
else
mkdir -p "$DISTSRC_BASE"
fi
################
# Unsigned tarballs SHOULD exist
################
# Usage: outdir_for_host HOST SUFFIX
#
# HOST: The current platform triple we're building for
#
outdir_for_host() {
echo "${OUTDIR_BASE}/${1}${2:+-${2}}"
}
unsigned_tarball_for_host() {
case "$1" in
*mingw*)
echo "$(outdir_for_host "$1")/${DISTNAME}-win64-unsigned.tar.gz"
;;
*darwin*)
echo "$(outdir_for_host "$1")/${DISTNAME}-${1}-unsigned.tar.gz"
;;
*)
exit 1
;;
esac
}
# Accumulate a list of build directories that already exist...
hosts_unsigned_tarball_missing=""
for host in $HOSTS; do
if [ ! -e "$(unsigned_tarball_for_host "$host")" ]; then
hosts_unsigned_tarball_missing+=" ${host}"
fi
done
if [ -n "$hosts_unsigned_tarball_missing" ]; then
# ...so that we can print them out nicely in an error message
cat << EOF
ERR: Unsigned tarballs do not exist
...
EOF
for host in $hosts_unsigned_tarball_missing; do
echo " ${host} '$(unsigned_tarball_for_host "$host")'"
done
exit 1
fi
################
# Check that we can connect to the guix-daemon
################
cat << EOF
Checking that we can connect to the guix-daemon...
Hint: If this hangs, you may want to try turning your guix-daemon off and on
again.
EOF
if ! guix gc --list-failures > /dev/null; then
cat << EOF
ERR: Failed to connect to the guix-daemon, please ensure that one is running and
reachable.
EOF
exit 1
fi
# Developer note: we could use `guix repl` for this check and run:
#
# (import (guix store)) (close-connection (open-connection))
#
# However, the internal API is likely to change more than the CLI invocation
#########
# SETUP #
#########
# Determine the maximum number of jobs to run simultaneously (overridable by
# environment)
JOBS="${JOBS:-$(nproc)}"
# Determine the reference time used for determinism (overridable by environment)
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}"
# Make sure an output directory exists for our builds
OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}"
mkdir -p "$OUTDIR_BASE"
# Usage: profiledir_for_host HOST SUFFIX
#
# HOST: The current platform triple we're building for
#
profiledir_for_host() {
echo "${PROFILES_BASE}/${1}${2:+-${2}}"
}
#########
# BUILD #
#########
# Function to be called when codesigning for host ${1} and the user interrupts
# the codesign
int_trap() {
cat << EOF
** INT received while codesigning ${1}, you may want to clean up the relevant
work directories (e.g. distsrc-*) before recodesigning
Hint: To blow everything away, you may want to use:
$ ./contrib/guix/guix-clean
Specifically, this will remove all files without an entry in the index,
excluding the SDK directory, the depends download cache, the depends built
packages cache, the garbage collector roots for Guix environments, and the
output directory.
EOF
}
# Deterministically build Bitcoin Core
# shellcheck disable=SC2153
for host in $HOSTS; do
# Display proper warning when the user interrupts the build
trap 'int_trap ${host}' INT
(
# Required for 'contrib/guix/manifest.scm' to output the right manifest
# for the particular $HOST we're building for
export HOST="$host"
# shellcheck disable=SC2030
cat << EOF
INFO: Codesigning ${VERSION:?not set} for platform triple ${HOST:?not set}:
...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set}
...from worktree directory: '${PWD}'
...bind-mounted in container to: '/bitcoin'
...in build directory: '$(distsrc_for_host "$HOST")'
...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")'
...outputting in: '$(outdir_for_host "$HOST" codesigned)'
...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)'
...using detached signatures in: '${DETACHED_SIGS_REPO:?not set}'
...bind-mounted in container to: '/detached-sigs'
EOF
# Run the build script 'contrib/guix/libexec/build.sh' in the build
# container specified by 'contrib/guix/manifest.scm'.
#
# Explanation of `guix shell` flags:
#
# --container run command within an isolated container
#
# Running in an isolated container minimizes build-time differences
# between machines and improves reproducibility
#
# --pure unset existing environment variables
#
# Same rationale as --container
#
# --no-cwd do not share current working directory with an
# isolated container
#
# When --container is specified, the default behavior is to share
# the current working directory with the isolated container at the
# same exact path (e.g. mapping '/home/satoshi/bitcoin/' to
# '/home/satoshi/bitcoin/'). This means that the $PWD inside the
# container becomes a source of irreproducibility. --no-cwd disables
# this behaviour.
#
# --share=SPEC for containers, share writable host file system
# according to SPEC
#
# --share="$PWD"=/bitcoin
#
# maps our current working directory to /bitcoin
# inside the isolated container, which we later cd
# into.
#
# While we don't want to map our current working directory to the
# same exact path (as this introduces irreproducibility), we do want
# it to be at a _fixed_ path _somewhere_ inside the isolated
# container so that we have something to build. '/bitcoin' was
# chosen arbitrarily.
#
# ${SOURCES_PATH:+--share="$SOURCES_PATH"}
#
# make the downloaded depends sources path available
# inside the isolated container
#
# The isolated container has no network access as it's in a
# different network namespace from the main machine, so we have to
# make the downloaded depends sources available to it. The sources
# should have been downloaded prior to this invocation.
#
# ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"}
#
# fetch substitute from SUBSTITUTE_URLS if they are
# authorized
#
# Depending on the user's security model, it may be desirable to use
# substitutes (pre-built packages) from servers that the user trusts.
# Please read the README.md in the same directory as this file for
# more information.
#
# shellcheck disable=SC2086,SC2031
time-machine shell --manifest="${PWD}/contrib/guix/manifest.scm" \
--container \
--pure \
--no-cwd \
--share="$PWD"=/bitcoin \
--share="$DISTSRC_BASE"=/distsrc-base \
--share="$OUTDIR_BASE"=/outdir-base \
--share="$DETACHED_SIGS_REPO"=/detached-sigs \
--expose="$(git rev-parse --git-common-dir)" \
--expose="$(git -C "$DETACHED_SIGS_REPO" rev-parse --git-common-dir)" \
${SOURCES_PATH:+--share="$SOURCES_PATH"} \
--cores="$JOBS" \
--keep-failed \
--fallback \
--link-profile \
--root="$(profiledir_for_host "${HOST}" codesigned)" \
${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \
${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \
-- env HOST="$host" \
DISTNAME="$DISTNAME" \
JOBS="$JOBS" \
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \
${V:+V=1} \
${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \
DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \
OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST" codesigned)" \
DIST_ARCHIVE_BASE=/outdir-base/dist-archive \
DETACHED_SIGS_REPO=/detached-sigs \
UNSIGNED_TARBALL="$(OUTDIR_BASE=/outdir-base && unsigned_tarball_for_host "$HOST")" \
bash -c "cd /bitcoin && bash contrib/guix/libexec/codesign.sh"
)
done
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/guix-clean | #!/usr/bin/env bash
export LC_ALL=C
set -e -o pipefail
# Source the common prelude, which:
# 1. Checks if we're at the top directory of the Bitcoin Core repository
# 2. Defines a few common functions and variables
#
# shellcheck source=libexec/prelude.bash
source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash"
###################
## Sanity Checks ##
###################
################
# Required non-builtin commands should be invokable
################
check_tools cat mkdir make git guix
#############
## Clean ##
#############
# Usage: under_dir MAYBE_PARENT MAYBE_CHILD
#
# If MAYBE_CHILD is a subdirectory of MAYBE_PARENT, print the relative path
# from MAYBE_PARENT to MAYBE_CHILD. Otherwise, return 1 as the error code.
#
# NOTE: This does not perform any symlink-resolving or path canonicalization.
#
under_dir() {
local path_residue
path_residue="${2##"${1}"}"
if [ -z "$path_residue" ] || [ "$path_residue" = "$2" ]; then
return 1
else
echo "$path_residue"
fi
}
# Usage: dir_under_git_root MAYBE_CHILD
#
# If MAYBE_CHILD is under the current git repository and exists, print the
# relative path from the git repository's top-level directory to MAYBE_CHILD,
# otherwise, exit with an error code.
#
dir_under_git_root() {
local rv
rv="$(under_dir "$(git_root)" "$1")"
[ -n "$rv" ] && echo "$rv"
}
shopt -s nullglob
found_precious_dirs_files=( "${version_base_prefix}"*/"${var_base_basename}/precious_dirs" ) # This expands to an array of directories...
shopt -u nullglob
exclude_flags=()
for precious_dirs_file in "${found_precious_dirs_files[@]}"; do
# Make sure the precious directories (e.g. SOURCES_PATH, BASE_CACHE, SDK_PATH)
# are excluded from git-clean
echo "Found precious_dirs file: '${precious_dirs_file}'"
# Exclude the precious_dirs file itself
if dirs_file_exclude_fragment=$(dir_under_git_root "$(dirname "$precious_dirs_file")"); then
exclude_flags+=( --exclude="${dirs_file_exclude_fragment}/precious_dirs" )
fi
# Read each 'name=dir' pair from the precious_dirs file
while IFS='=' read -r name dir; do
# Add an exclusion flag if the precious directory is under the git root.
if under=$(dir_under_git_root "$dir"); then
echo "Avoiding ${name}: ${under}"
exclude_flags+=( --exclude="$under" )
fi
done < "$precious_dirs_file"
done
git clean -xdff "${exclude_flags[@]}"
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/guix-verify | #!/usr/bin/env bash
export LC_ALL=C
set -e -o pipefail
# Source the common prelude, which:
# 1. Checks if we're at the top directory of the Bitcoin Core repository
# 2. Defines a few common functions and variables
#
# shellcheck source=libexec/prelude.bash
source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash"
###################
## Sanity Checks ##
###################
################
# Required non-builtin commands should be invokable
################
check_tools cat diff gpg
################
# Required env vars should be non-empty
################
cmd_usage() {
cat <<EOF
Synopsis:
env GUIX_SIGS_REPO=<path/to/guix.sigs> [ SIGNER=<signer> ] ./contrib/guix/guix-verify
Example overriding signer's manifest to use as base
env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs SIGNER=achow101 ./contrib/guix/guix-verify
EOF
}
if [ -z "$GUIX_SIGS_REPO" ]; then
cmd_usage
exit 1
fi
################
# GUIX_SIGS_REPO should exist as a directory
################
if [ ! -d "$GUIX_SIGS_REPO" ]; then
cat << EOF
ERR: The specified GUIX_SIGS_REPO is not an existent directory:
'$GUIX_SIGS_REPO'
Hint: Please clone the guix.sigs repository and point to it with the
GUIX_SIGS_REPO environment variable.
EOF
cmd_usage
exit 1
fi
##############
## Verify ##
##############
OUTSIGDIR_BASE="${GUIX_SIGS_REPO}/${VERSION}"
echo "Looking for signature directories in '${OUTSIGDIR_BASE}'"
echo ""
# Usage: verify compare_manifest current_manifest
verify() {
local compare_manifest="$1"
local current_manifest="$2"
if ! gpg --quiet --batch --verify "$current_manifest".asc "$current_manifest" 1>&2; then
echo "ERR: Failed to verify GPG signature in '${current_manifest}'"
echo ""
echo "Hint: Either the signature is invalid or the public key is missing"
echo ""
failure=1
elif ! diff --report-identical "$compare_manifest" "$current_manifest" 1>&2; then
echo "ERR: The SHA256SUMS attestation in these two directories differ:"
echo " '${compare_manifest}'"
echo " '${current_manifest}'"
echo ""
failure=1
else
echo "Verified: '${current_manifest}'"
echo ""
fi
}
shopt -s nullglob
all_noncodesigned=( "$OUTSIGDIR_BASE"/*/noncodesigned.SHA256SUMS )
shopt -u nullglob
echo "--------------------"
echo ""
if (( ${#all_noncodesigned[@]} )); then
compare_noncodesigned="${all_noncodesigned[0]}"
if [[ -n "$SIGNER" ]]; then
signer_noncodesigned="$OUTSIGDIR_BASE/$SIGNER/noncodesigned.SHA256SUMS"
if [[ -f "$signer_noncodesigned" ]]; then
echo "Using $SIGNER's manifest as the base to compare against"
compare_noncodesigned="$signer_noncodesigned"
else
echo "Unable to find $SIGNER's manifest, using the first one found"
fi
else
echo "No SIGNER provided, using the first manifest found"
fi
for current_manifest in "${all_noncodesigned[@]}"; do
verify "$compare_noncodesigned" "$current_manifest"
done
echo "DONE: Checking output signatures for noncodesigned.SHA256SUMS"
echo ""
else
echo "WARN: No signature directories with noncodesigned.SHA256SUMS found"
echo ""
fi
shopt -s nullglob
all_all=( "$OUTSIGDIR_BASE"/*/all.SHA256SUMS )
shopt -u nullglob
echo "--------------------"
echo ""
if (( ${#all_all[@]} )); then
compare_all="${all_all[0]}"
if [[ -n "$SIGNER" ]]; then
signer_all="$OUTSIGDIR_BASE/$SIGNER/all.SHA256SUMS"
if [[ -f "$signer_all" ]]; then
echo "Using $SIGNER's manifest as the base to compare against"
compare_all="$signer_all"
else
echo "Unable to find $SIGNER's manifest, using the first one found"
fi
else
echo "No SIGNER provided, using the first manifest found"
fi
for current_manifest in "${all_all[@]}"; do
verify "$compare_all" "$current_manifest"
done
# Sanity check: there should be no entries that exist in
# noncodesigned.SHA256SUMS that doesn't exist in all.SHA256SUMS
if [[ "$(comm -23 <(sort "$compare_noncodesigned") <(sort "$compare_all") | wc -c)" -ne 0 ]]; then
echo "ERR: There are unique lines in noncodesigned.SHA256SUMS which"
echo " do not exist in all.SHA256SUMS, something went very wrong."
exit 1
fi
echo "DONE: Checking output signatures for all.SHA256SUMS"
echo ""
else
echo "WARN: No signature directories with all.SHA256SUMS found"
echo ""
fi
echo "===================="
echo ""
if (( ${#all_noncodesigned[@]} + ${#all_all[@]} == 0 )); then
echo "ERR: Unable to perform any verifications as no signature directories"
echo " were found"
echo ""
exit 1
fi
if [ -n "$failure" ]; then
exit 1
fi
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/guix-attest | #!/usr/bin/env bash
export LC_ALL=C
set -e -o pipefail
# Source the common prelude, which:
# 1. Checks if we're at the top directory of the Bitcoin Core repository
# 2. Defines a few common functions and variables
#
# shellcheck source=libexec/prelude.bash
source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash"
###################
## Sanity Checks ##
###################
################
# Required non-builtin commands should be invokable
################
check_tools cat env basename mkdir diff sort
if [ -z "$NO_SIGN" ]; then
# make it possible to override the gpg binary
GPG=${GPG:-gpg}
# $GPG can contain extra arguments passed to the binary
# so let's check only the existence of arg[0]
# shellcheck disable=SC2206
GPG_ARRAY=($GPG)
check_tools "${GPG_ARRAY[0]}"
fi
################
# Required env vars should be non-empty
################
cmd_usage() {
cat <<EOF
Synopsis:
env GUIX_SIGS_REPO=<path/to/guix.sigs> \\
SIGNER=GPG_KEY_NAME[=SIGNER_NAME] \\
[ NO_SIGN=1 ]
./contrib/guix/guix-attest
Example w/o overriding signing name:
env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\
SIGNER=achow101 \\
./contrib/guix/guix-attest
Example overriding signing name:
env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs \\
SIGNER=0x96AB007F1A7ED999=dongcarl \\
./contrib/guix/guix-attest
Example w/o signing, just creating SHA256SUMS:
env GUIX_SIGS_REPO=/home/achow101/guix.sigs \\
SIGNER=achow101 \\
NO_SIGN=1 \\
./contrib/guix/guix-attest
EOF
}
if [ -z "$GUIX_SIGS_REPO" ] || [ -z "$SIGNER" ]; then
cmd_usage
exit 1
fi
################
# GUIX_SIGS_REPO should exist as a directory
################
if [ ! -d "$GUIX_SIGS_REPO" ]; then
cat << EOF
ERR: The specified GUIX_SIGS_REPO is not an existent directory:
'$GUIX_SIGS_REPO'
Hint: Please clone the guix.sigs repository and point to it with the
GUIX_SIGS_REPO environment variable.
EOF
cmd_usage
exit 1
fi
################
# The key specified in SIGNER should be usable
################
IFS='=' read -r gpg_key_name signer_name <<< "$SIGNER"
if [ -z "${signer_name}" ]; then
signer_name="$gpg_key_name"
fi
if [ -z "$NO_SIGN" ] && ! ${GPG} --dry-run --list-secret-keys "${gpg_key_name}" >/dev/null 2>&1; then
echo "ERR: GPG can't seem to find any key named '${gpg_key_name}'"
exit 1
fi
################
# We should be able to find at least one output
################
echo "Looking for build output SHA256SUMS fragments in ${OUTDIR_BASE}"
shopt -s nullglob
sha256sum_fragments=( "$OUTDIR_BASE"/*/SHA256SUMS.part ) # This expands to an array of directories...
shopt -u nullglob
noncodesigned_fragments=()
codesigned_fragments=()
if (( ${#sha256sum_fragments[@]} )); then
echo "Found build output SHA256SUMS fragments:"
for outdir in "${sha256sum_fragments[@]}"; do
echo " '$outdir'"
case "$outdir" in
"$OUTDIR_BASE"/*-codesigned/SHA256SUMS.part)
codesigned_fragments+=("$outdir")
;;
*)
noncodesigned_fragments+=("$outdir")
;;
esac
done
echo
else
echo "ERR: Could not find any build output SHA256SUMS fragments in ${OUTDIR_BASE}"
exit 1
fi
##############
## Attest ##
##############
# Usage: out_name $outdir
#
# HOST: The output directory being attested
#
out_name() {
basename "$(dirname "$1")"
}
shasum_already_exists() {
cat <<EOF
--
ERR: An ${1} file already exists for '${VERSION}' and attests
differently. You likely previously attested to a partial build (e.g. one
where you specified the HOST environment variable).
See the diff above for more context.
Hint: You may wish to remove the existing attestations and their signatures by
invoking:
rm '${PWD}/${1}'{,.asc}
Then try running this script again.
EOF
}
echo "Attesting to build outputs for version: '${VERSION}'"
echo ""
# Given a SHA256SUMS file as stdin that has lines like:
# 0ba536819b221a91d3d42e978be016aac918f40984754d74058aa0c921cd3ea6 a/b/d/c/d/s/bitcoin-22.0rc2-riscv64-linux-gnu.tar.gz
# ...
#
# Replace each line's file name with its basename:
# 0ba536819b221a91d3d42e978be016aac918f40984754d74058aa0c921cd3ea6 bitcoin-22.0rc2-riscv64-linux-gnu.tar.gz
# ...
#
basenameify_SHA256SUMS() {
sed -E 's@(^[[:xdigit:]]{64}[[:space:]]+).+/([^/]+$)@\1\2@'
}
outsigdir="$GUIX_SIGS_REPO/$VERSION/$signer_name"
mkdir -p "$outsigdir"
(
cd "$outsigdir"
temp_noncodesigned="$(mktemp)"
trap 'rm -rf -- "$temp_noncodesigned"' EXIT
if (( ${#noncodesigned_fragments[@]} )); then
cat "${noncodesigned_fragments[@]}" \
| sort -u \
| sort -k2 \
| basenameify_SHA256SUMS \
> "$temp_noncodesigned"
if [ -e noncodesigned.SHA256SUMS ]; then
# The SHA256SUMS already exists, make sure it's exactly what we
# expect, error out if not
if diff -u noncodesigned.SHA256SUMS "$temp_noncodesigned"; then
echo "A noncodesigned.SHA256SUMS file already exists for '${VERSION}' and is up-to-date."
else
shasum_already_exists noncodesigned.SHA256SUMS
exit 1
fi
else
mv "$temp_noncodesigned" noncodesigned.SHA256SUMS
fi
else
echo "ERR: No noncodesigned outputs found for '${VERSION}', exiting..."
exit 1
fi
temp_all="$(mktemp)"
trap 'rm -rf -- "$temp_all"' EXIT
if (( ${#codesigned_fragments[@]} )); then
# Note: all.SHA256SUMS attests to all of $sha256sum_fragments, but is
# not needed if there are no $codesigned_fragments
cat "${sha256sum_fragments[@]}" \
| sort -u \
| sort -k2 \
| basenameify_SHA256SUMS \
> "$temp_all"
if [ -e all.SHA256SUMS ]; then
# The SHA256SUMS already exists, make sure it's exactly what we
# expect, error out if not
if diff -u all.SHA256SUMS "$temp_all"; then
echo "An all.SHA256SUMS file already exists for '${VERSION}' and is up-to-date."
else
shasum_already_exists all.SHA256SUMS
exit 1
fi
else
mv "$temp_all" all.SHA256SUMS
fi
else
# It is fine to have the codesigned outputs be missing (perhaps the
# detached codesigs have not been published yet), just print a log
# message instead of erroring out
echo "INFO: No codesigned outputs found for '${VERSION}', skipping..."
fi
if [ -z "$NO_SIGN" ]; then
echo "Signing SHA256SUMS to produce SHA256SUMS.asc"
for i in *.SHA256SUMS; do
if [ ! -e "$i".asc ]; then
${GPG} --detach-sign \
--digest-algo sha256 \
--local-user "$gpg_key_name" \
--armor \
--output "$i".asc "$i"
else
echo "Signature already there"
fi
done
else
echo "Not signing SHA256SUMS as \$NO_SIGN is not empty"
fi
echo ""
)
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/guix-build | #!/usr/bin/env bash
export LC_ALL=C
set -e -o pipefail
# Source the common prelude, which:
# 1. Checks if we're at the top directory of the Bitcoin Core repository
# 2. Defines a few common functions and variables
#
# shellcheck source=libexec/prelude.bash
source "$(dirname "${BASH_SOURCE[0]}")/libexec/prelude.bash"
###################
## SANITY CHECKS ##
###################
################
# Required non-builtin commands should be invocable
################
check_tools cat mkdir make getent curl git guix
################
# GUIX_BUILD_OPTIONS should be empty
################
#
# GUIX_BUILD_OPTIONS is an environment variable recognized by guix commands that
# can perform builds. This seems like what we want instead of
# ADDITIONAL_GUIX_COMMON_FLAGS, but the value of GUIX_BUILD_OPTIONS is actually
# _appended_ to normal command-line options. Meaning that they will take
# precedence over the command-specific ADDITIONAL_GUIX_<CMD>_FLAGS.
#
# This seems like a poor user experience. Thus we check for GUIX_BUILD_OPTIONS's
# existence here and direct users of this script to use our (more flexible)
# custom environment variables.
if [ -n "$GUIX_BUILD_OPTIONS" ]; then
cat << EOF
Error: Environment variable GUIX_BUILD_OPTIONS is not empty:
'$GUIX_BUILD_OPTIONS'
Unfortunately this script is incompatible with GUIX_BUILD_OPTIONS, please unset
GUIX_BUILD_OPTIONS and use ADDITIONAL_GUIX_COMMON_FLAGS to set build options
across guix commands or ADDITIONAL_GUIX_<CMD>_FLAGS to set build options for a
specific guix command.
See contrib/guix/README.md for more details.
EOF
exit 1
fi
################
# The git worktree should not be dirty
################
if ! git diff-index --quiet HEAD -- && [ -z "$FORCE_DIRTY_WORKTREE" ]; then
cat << EOF
ERR: The current git worktree is dirty, which may lead to broken builds.
Aborting...
Hint: To make your git worktree clean, You may want to:
1. Commit your changes,
2. Stash your changes, or
3. Set the 'FORCE_DIRTY_WORKTREE' environment variable if you insist on
using a dirty worktree
EOF
exit 1
fi
mkdir -p "$VERSION_BASE"
################
# Build directories should not exist
################
# Default to building for all supported HOSTs (overridable by environment)
export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
x86_64-w64-mingw32
x86_64-apple-darwin arm64-apple-darwin}"
# Usage: distsrc_for_host HOST
#
# HOST: The current platform triple we're building for
#
distsrc_for_host() {
echo "${DISTSRC_BASE}/distsrc-${VERSION}-${1}"
}
# Accumulate a list of build directories that already exist...
hosts_distsrc_exists=""
for host in $HOSTS; do
if [ -e "$(distsrc_for_host "$host")" ]; then
hosts_distsrc_exists+=" ${host}"
fi
done
if [ -n "$hosts_distsrc_exists" ]; then
# ...so that we can print them out nicely in an error message
cat << EOF
ERR: Build directories for this commit already exist for the following platform
triples you're attempting to build, probably because of previous builds.
Please remove, or otherwise deal with them prior to starting another build.
Aborting...
Hint: To blow everything away, you may want to use:
$ ./contrib/guix/guix-clean
Specifically, this will remove all files without an entry in the index,
excluding the SDK directory, the depends download cache, the depends built
packages cache, the garbage collector roots for Guix environments, and the
output directory.
EOF
for host in $hosts_distsrc_exists; do
echo " ${host} '$(distsrc_for_host "$host")'"
done
exit 1
else
mkdir -p "$DISTSRC_BASE"
fi
################
# When building for darwin, the macOS SDK should exist
################
for host in $HOSTS; do
case "$host" in
*darwin*)
OSX_SDK="$(make -C "${PWD}/depends" --no-print-directory HOST="$host" print-OSX_SDK | sed 's@^[^=]\+=@@g')"
if [ -e "$OSX_SDK" ]; then
echo "Found macOS SDK at '${OSX_SDK}', using..."
break
else
echo "macOS SDK does not exist at '${OSX_SDK}', please place the extracted, untarred SDK there to perform darwin builds, or define SDK_PATH environment variable. Exiting..."
exit 1
fi
;;
esac
done
################
# VERSION_BASE should have enough space
################
avail_KiB="$(df -Pk "$VERSION_BASE" | sed 1d | tr -s ' ' | cut -d' ' -f4)"
total_required_KiB=0
for host in $HOSTS; do
case "$host" in
*darwin*) required_KiB=440000 ;;
*mingw*) required_KiB=7600000 ;;
*) required_KiB=6400000 ;;
esac
total_required_KiB=$((total_required_KiB+required_KiB))
done
if (( total_required_KiB > avail_KiB )); then
total_required_GiB=$((total_required_KiB / 1048576))
avail_GiB=$((avail_KiB / 1048576))
echo "Performing a Bitcoin Core Guix build for the selected HOSTS requires ${total_required_GiB} GiB, however, only ${avail_GiB} GiB is available. Please free up some disk space before performing the build."
exit 1
fi
################
# Check that we can connect to the guix-daemon
################
cat << EOF
Checking that we can connect to the guix-daemon...
Hint: If this hangs, you may want to try turning your guix-daemon off and on
again.
EOF
if ! guix gc --list-failures > /dev/null; then
cat << EOF
ERR: Failed to connect to the guix-daemon, please ensure that one is running and
reachable.
EOF
exit 1
fi
# Developer note: we could use `guix repl` for this check and run:
#
# (import (guix store)) (close-connection (open-connection))
#
# However, the internal API is likely to change more than the CLI invocation
################
# Services database must have basic entries
################
if ! getent services http https ftp > /dev/null 2>&1; then
cat << EOF
ERR: Your system's C library cannot find service database entries for at least
one of the following services: http, https, ftp.
Hint: Most likely, /etc/services does not exist yet (common for docker images
and minimal distros), or you don't have permissions to access it.
If /etc/services does not exist yet, you may want to install the
appropriate package for your distro which provides it.
On Debian/Ubuntu: netbase
On Arch Linux: iana-etc
For more information, see: getent(1), services(5)
EOF
fi
#########
# SETUP #
#########
# Determine the maximum number of jobs to run simultaneously (overridable by
# environment)
JOBS="${JOBS:-$(nproc)}"
# Usage: host_to_commonname HOST
#
# HOST: The current platform triple we're building for
#
host_to_commonname() {
case "$1" in
*darwin*) echo osx ;;
*mingw*) echo win ;;
*linux*) echo linux ;;
*) exit 1 ;;
esac
}
# Determine the reference time used for determinism (overridable by environment)
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}"
# Precious directories are those which should not be cleaned between successive
# guix builds
depends_precious_dir_names='SOURCES_PATH BASE_CACHE SDK_PATH'
precious_dir_names="${depends_precious_dir_names} OUTDIR_BASE PROFILES_BASE"
# Usage: contains IFS-SEPARATED-LIST ITEM
contains() {
for i in ${1}; do
if [ "$i" = "${2}" ]; then
return 0 # Found!
fi
done
return 1
}
# If the user explicitly specified a precious directory, create it so we
# can map it into the container
for precious_dir_name in $precious_dir_names; do
precious_dir_path="${!precious_dir_name}"
if [ -n "$precious_dir_path" ]; then
if [ ! -e "$precious_dir_path" ]; then
mkdir -p "$precious_dir_path"
elif [ -L "$precious_dir_path" ]; then
echo "ERR: ${precious_dir_name} cannot be a symbolic link"
exit 1
elif [ ! -d "$precious_dir_path" ]; then
echo "ERR: ${precious_dir_name} must be a directory"
exit 1
fi
fi
done
mkdir -p "$VAR_BASE"
# Record the _effective_ values of precious directories such that guix-clean can
# avoid clobbering them if appropriate.
#
# shellcheck disable=SC2046,SC2086
{
# Get depends precious dir definitions from depends
make -C "${PWD}/depends" \
--no-print-directory \
-- $(printf "print-%s\n" $depends_precious_dir_names)
# Get remaining precious dir definitions from the environment
for precious_dir_name in $precious_dir_names; do
precious_dir_path="${!precious_dir_name}"
if ! contains "$depends_precious_dir_names" "$precious_dir_name"; then
echo "${precious_dir_name}=${precious_dir_path}"
fi
done
} > "${VAR_BASE}/precious_dirs"
# Make sure an output directory exists for our builds
OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}"
mkdir -p "$OUTDIR_BASE"
# Download the depends sources now as we won't have internet access in the build
# container
for host in $HOSTS; do
make -C "${PWD}/depends" -j"$JOBS" download-"$(host_to_commonname "$host")" ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"}
done
# Usage: outdir_for_host HOST SUFFIX
#
# HOST: The current platform triple we're building for
#
outdir_for_host() {
echo "${OUTDIR_BASE}/${1}${2:+-${2}}"
}
# Usage: profiledir_for_host HOST SUFFIX
#
# HOST: The current platform triple we're building for
#
profiledir_for_host() {
echo "${PROFILES_BASE}/${1}${2:+-${2}}"
}
#########
# BUILD #
#########
# Function to be called when building for host ${1} and the user interrupts the
# build
int_trap() {
cat << EOF
** INT received while building ${1}, you may want to clean up the relevant
work directories (e.g. distsrc-*) before rebuilding
Hint: To blow everything away, you may want to use:
$ ./contrib/guix/guix-clean
Specifically, this will remove all files without an entry in the index,
excluding the SDK directory, the depends download cache, the depends built
packages cache, the garbage collector roots for Guix environments, and the
output directory.
EOF
}
# Deterministically build Bitcoin Core
# shellcheck disable=SC2153
for host in $HOSTS; do
# Display proper warning when the user interrupts the build
trap 'int_trap ${host}' INT
(
# Required for 'contrib/guix/manifest.scm' to output the right manifest
# for the particular $HOST we're building for
export HOST="$host"
# shellcheck disable=SC2030
cat << EOF
INFO: Building ${VERSION:?not set} for platform triple ${HOST:?not set}:
...using reference timestamp: ${SOURCE_DATE_EPOCH:?not set}
...running at most ${JOBS:?not set} jobs
...from worktree directory: '${PWD}'
...bind-mounted in container to: '/bitcoin'
...in build directory: '$(distsrc_for_host "$HOST")'
...bind-mounted in container to: '$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")'
...outputting in: '$(outdir_for_host "$HOST")'
...bind-mounted in container to: '$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")'
EOF
# Run the build script 'contrib/guix/libexec/build.sh' in the build
# container specified by 'contrib/guix/manifest.scm'.
#
# Explanation of `guix shell` flags:
#
# --container run command within an isolated container
#
# Running in an isolated container minimizes build-time differences
# between machines and improves reproducibility
#
# --pure unset existing environment variables
#
# Same rationale as --container
#
# --no-cwd do not share current working directory with an
# isolated container
#
# When --container is specified, the default behavior is to share
# the current working directory with the isolated container at the
# same exact path (e.g. mapping '/home/satoshi/bitcoin/' to
# '/home/satoshi/bitcoin/'). This means that the $PWD inside the
# container becomes a source of irreproducibility. --no-cwd disables
# this behaviour.
#
# --share=SPEC for containers, share writable host file system
# according to SPEC
#
# --share="$PWD"=/bitcoin
#
# maps our current working directory to /bitcoin
# inside the isolated container, which we later cd
# into.
#
# While we don't want to map our current working directory to the
# same exact path (as this introduces irreproducibility), we do want
# it to be at a _fixed_ path _somewhere_ inside the isolated
# container so that we have something to build. '/bitcoin' was
# chosen arbitrarily.
#
# ${SOURCES_PATH:+--share="$SOURCES_PATH"}
#
# make the downloaded depends sources path available
# inside the isolated container
#
# The isolated container has no network access as it's in a
# different network namespace from the main machine, so we have to
# make the downloaded depends sources available to it. The sources
# should have been downloaded prior to this invocation.
#
# --keep-failed keep build tree of failed builds
#
# When builds of the Guix environment itself (not Bitcoin Core)
# fail, it is useful for the build tree to be kept for debugging
# purposes.
#
# ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"}
#
# fetch substitute from SUBSTITUTE_URLS if they are
# authorized
#
# Depending on the user's security model, it may be desirable to use
# substitutes (pre-built packages) from servers that the user trusts.
# Please read the README.md in the same directory as this file for
# more information.
#
# shellcheck disable=SC2086,SC2031
time-machine shell --manifest="${PWD}/contrib/guix/manifest.scm" \
--container \
--pure \
--no-cwd \
--share="$PWD"=/bitcoin \
--share="$DISTSRC_BASE"=/distsrc-base \
--share="$OUTDIR_BASE"=/outdir-base \
--expose="$(git rev-parse --git-common-dir)" \
${SOURCES_PATH:+--share="$SOURCES_PATH"} \
${BASE_CACHE:+--share="$BASE_CACHE"} \
${SDK_PATH:+--share="$SDK_PATH"} \
--cores="$JOBS" \
--keep-failed \
--fallback \
--link-profile \
--root="$(profiledir_for_host "${HOST}")" \
${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \
${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \
-- env HOST="$host" \
DISTNAME="$DISTNAME" \
JOBS="$JOBS" \
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \
${V:+V=1} \
${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \
${BASE_CACHE:+BASE_CACHE="$BASE_CACHE"} \
${SDK_PATH:+SDK_PATH="$SDK_PATH"} \
DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \
OUTDIR="$(OUTDIR_BASE=/outdir-base && outdir_for_host "$HOST")" \
DIST_ARCHIVE_BASE=/outdir-base/dist-archive \
bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh"
)
done
| 0 |
bitcoin/contrib | bitcoin/contrib/guix/manifest.scm | (use-modules (gnu packages)
(gnu packages autotools)
((gnu packages bash) #:select (bash-minimal))
(gnu packages bison)
((gnu packages certs) #:select (nss-certs))
((gnu packages cmake) #:select (cmake-minimal))
(gnu packages commencement)
(gnu packages compression)
(gnu packages cross-base)
(gnu packages file)
(gnu packages gawk)
(gnu packages gcc)
((gnu packages installers) #:select (nsis-x86_64))
((gnu packages linux) #:select (linux-libre-headers-6.1 util-linux))
(gnu packages llvm)
(gnu packages mingw)
(gnu packages moreutils)
(gnu packages pkg-config)
((gnu packages python) #:select (python-minimal))
((gnu packages python-build) #:select (python-tomli))
((gnu packages python-crypto) #:select (python-asn1crypto))
((gnu packages tls) #:select (openssl))
((gnu packages version-control) #:select (git-minimal))
(guix build-system cmake)
(guix build-system gnu)
(guix build-system python)
(guix build-system trivial)
(guix gexp)
(guix git-download)
((guix licenses) #:prefix license:)
(guix packages)
((guix utils) #:select (substitute-keyword-arguments)))
(define-syntax-rule (search-our-patches file-name ...)
"Return the list of absolute file names corresponding to each
FILE-NAME found in ./patches relative to the current file."
(parameterize
((%patch-path (list (string-append (dirname (current-filename)) "/patches"))))
(list (search-patch file-name) ...)))
(define building-on (string-append "--build=" (list-ref (string-split (%current-system) #\-) 0) "-guix-linux-gnu"))
(define (make-cross-toolchain target
base-gcc-for-libc
base-kernel-headers
base-libc
base-gcc)
"Create a cross-compilation toolchain package for TARGET"
(let* ((xbinutils (cross-binutils target))
;; 1. Build a cross-compiling gcc without targeting any libc, derived
;; from BASE-GCC-FOR-LIBC
(xgcc-sans-libc (cross-gcc target
#:xgcc base-gcc-for-libc
#:xbinutils xbinutils))
;; 2. Build cross-compiled kernel headers with XGCC-SANS-LIBC, derived
;; from BASE-KERNEL-HEADERS
(xkernel (cross-kernel-headers target
#:linux-headers base-kernel-headers
#:xgcc xgcc-sans-libc
#:xbinutils xbinutils))
;; 3. Build a cross-compiled libc with XGCC-SANS-LIBC and XKERNEL,
;; derived from BASE-LIBC
(xlibc (cross-libc target
#:libc base-libc
#:xgcc xgcc-sans-libc
#:xbinutils xbinutils
#:xheaders xkernel))
;; 4. Build a cross-compiling gcc targeting XLIBC, derived from
;; BASE-GCC
(xgcc (cross-gcc target
#:xgcc base-gcc
#:xbinutils xbinutils
#:libc xlibc)))
;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and
;; XGCC
(package
(name (string-append target "-toolchain"))
(version (package-version xgcc))
(source #f)
(build-system trivial-build-system)
(arguments '(#:builder (begin (mkdir %output) #t)))
(propagated-inputs
(list xbinutils
xlibc
xgcc
`(,xlibc "static")
`(,xgcc "lib")))
(synopsis (string-append "Complete GCC tool chain for " target))
(description (string-append "This package provides a complete GCC tool
chain for " target " development."))
(home-page (package-home-page xgcc))
(license (package-license xgcc)))))
(define base-gcc gcc-10)
(define base-linux-kernel-headers linux-libre-headers-6.1)
(define* (make-bitcoin-cross-toolchain target
#:key
(base-gcc-for-libc linux-base-gcc)
(base-kernel-headers base-linux-kernel-headers)
(base-libc glibc-2.27)
(base-gcc linux-base-gcc))
"Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values
desirable for building Bitcoin Core release binaries."
(make-cross-toolchain target
base-gcc-for-libc
base-kernel-headers
base-libc
base-gcc))
(define (gcc-mingw-patches gcc)
(package-with-extra-patches gcc
(search-our-patches "gcc-remap-guix-store.patch"
"vmov-alignment.patch")))
(define (make-mingw-pthreads-cross-toolchain target)
"Create a cross-compilation toolchain package for TARGET"
(let* ((xbinutils (cross-binutils target))
(pthreads-xlibc mingw-w64-x86_64-winpthreads)
(pthreads-xgcc (cross-gcc target
#:xgcc (gcc-mingw-patches mingw-w64-base-gcc)
#:xbinutils xbinutils
#:libc pthreads-xlibc)))
;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and
;; XGCC
(package
(name (string-append target "-posix-toolchain"))
(version (package-version pthreads-xgcc))
(source #f)
(build-system trivial-build-system)
(arguments '(#:builder (begin (mkdir %output) #t)))
(propagated-inputs
(list xbinutils
pthreads-xlibc
pthreads-xgcc
`(,pthreads-xgcc "lib")))
(synopsis (string-append "Complete GCC tool chain for " target))
(description (string-append "This package provides a complete GCC tool
chain for " target " development."))
(home-page (package-home-page pthreads-xgcc))
(license (package-license pthreads-xgcc)))))
;; While LIEF is packaged in Guix, we maintain our own package,
;; to simplify building, and more easily apply updates.
;; Moreover, the Guix's package uses cmake, which caused build
;; failure; see https://github.com/bitcoin/bitcoin/pull/27296.
(define-public python-lief
(package
(name "python-lief")
(version "0.13.2")
(source (origin
(method git-fetch)
(uri (git-reference
(url "https://github.com/lief-project/LIEF")
(commit version)))
(file-name (git-file-name name version))
(modules '((guix build utils)))
(snippet
'(begin
;; Configure build for Python bindings.
(substitute* "api/python/config-default.toml"
(("(ninja = )true" all m)
(string-append m "false"))
(("(parallel-jobs = )0" all m)
(string-append m (number->string (parallel-job-count)))))))
(sha256
(base32
"0y48x358ppig5xp97ahcphfipx7cg9chldj2q5zrmn610fmi4zll"))))
(build-system python-build-system)
(native-inputs (list cmake-minimal python-tomli))
(arguments
(list
#:tests? #f ;needs network
#:phases #~(modify-phases %standard-phases
(add-before 'build 'change-directory
(lambda _
(chdir "api/python")))
(replace 'build
(lambda _
(invoke "python" "setup.py" "build"))))))
(home-page "https://github.com/lief-project/LIEF")
(synopsis "Library to instrument executable formats")
(description
"@code{python-lief} is a cross platform library which can parse, modify
and abstract ELF, PE and MachO formats.")
(license license:asl2.0)))
(define osslsigncode
(package
(name "osslsigncode")
(version "2.5")
(source (origin
(method git-fetch)
(uri (git-reference
(url "https://github.com/mtrojnar/osslsigncode")
(commit version)))
(sha256
(base32
"1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz"))))
(build-system cmake-build-system)
(inputs (list openssl))
(home-page "https://github.com/mtrojnar/osslsigncode")
(synopsis "Authenticode signing and timestamping tool")
(description "osslsigncode is a small tool that implements part of the
functionality of the Microsoft tool signtool.exe - more exactly the Authenticode
signing and timestamping. But osslsigncode is based on OpenSSL and cURL, and
thus should be able to compile on most platforms where these exist.")
(license license:gpl3+))) ; license is with openssl exception
(define-public python-elfesteem
(let ((commit "2eb1e5384ff7a220fd1afacd4a0170acff54fe56"))
(package
(name "python-elfesteem")
(version (git-version "0.1" "1" commit))
(source
(origin
(method git-fetch)
(uri (git-reference
(url "https://github.com/LRGH/elfesteem")
(commit commit)))
(file-name (git-file-name name commit))
(sha256
(base32
"07x6p8clh11z8s1n2kdxrqwqm2almgc5qpkcr9ckb6y5ivjdr5r6"))))
(build-system python-build-system)
;; There are no tests, but attempting to run python setup.py test leads to
;; PYTHONPATH problems, just disable the test
(arguments '(#:tests? #f))
(home-page "https://github.com/LRGH/elfesteem")
(synopsis "ELF/PE/Mach-O parsing library")
(description "elfesteem parses ELF, PE and Mach-O files.")
(license license:lgpl2.1))))
(define-public python-oscrypto
(package
(name "python-oscrypto")
(version "1.3.0")
(source
(origin
(method git-fetch)
(uri (git-reference
(url "https://github.com/wbond/oscrypto")
(commit version)))
(file-name (git-file-name name version))
(sha256
(base32
"1v5wkmzcyiqy39db8j2dvkdrv2nlsc48556h73x4dzjwd6kg4q0a"))
(patches (search-our-patches "oscrypto-hard-code-openssl.patch"))))
(build-system python-build-system)
(native-search-paths
(list (search-path-specification
(variable "SSL_CERT_FILE")
(file-type 'regular)
(separator #f) ;single entry
(files '("etc/ssl/certs/ca-certificates.crt")))))
(propagated-inputs
(list python-asn1crypto openssl))
(arguments
`(#:phases
(modify-phases %standard-phases
(add-after 'unpack 'hard-code-path-to-libscrypt
(lambda* (#:key inputs #:allow-other-keys)
(let ((openssl (assoc-ref inputs "openssl")))
(substitute* "oscrypto/__init__.py"
(("@GUIX_OSCRYPTO_USE_OPENSSL@")
(string-append openssl "/lib/libcrypto.so" "," openssl "/lib/libssl.so")))
#t)))
(add-after 'unpack 'disable-broken-tests
(lambda _
;; This test is broken as there is no keyboard interrupt.
(substitute* "tests/test_trust_list.py"
(("^(.*)class TrustListTests" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
(substitute* "tests/test_tls.py"
(("^(.*)class TLSTests" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
#t))
(replace 'check
(lambda _
(invoke "python" "run.py" "tests")
#t)))))
(home-page "https://github.com/wbond/oscrypto")
(synopsis "Compiler-free Python crypto library backed by the OS")
(description "oscrypto is a compilation-free, always up-to-date encryption library for Python.")
(license license:expat)))
(define-public python-oscryptotests
(package (inherit python-oscrypto)
(name "python-oscryptotests")
(propagated-inputs
(list python-oscrypto))
(arguments
`(#:tests? #f
#:phases
(modify-phases %standard-phases
(add-after 'unpack 'hard-code-path-to-libscrypt
(lambda* (#:key inputs #:allow-other-keys)
(chdir "tests")
#t)))))))
(define-public python-certvalidator
(let ((commit "a145bf25eb75a9f014b3e7678826132efbba6213"))
(package
(name "python-certvalidator")
(version (git-version "0.1" "1" commit))
(source
(origin
(method git-fetch)
(uri (git-reference
(url "https://github.com/achow101/certvalidator")
(commit commit)))
(file-name (git-file-name name commit))
(sha256
(base32
"1qw2k7xis53179lpqdqyylbcmp76lj7sagp883wmxg5i7chhc96k"))))
(build-system python-build-system)
(propagated-inputs
(list python-asn1crypto
python-oscrypto
python-oscryptotests)) ;; certvalidator tests import oscryptotests
(arguments
`(#:phases
(modify-phases %standard-phases
(add-after 'unpack 'disable-broken-tests
(lambda _
(substitute* "tests/test_certificate_validator.py"
(("^(.*)class CertificateValidatorTests" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
(substitute* "tests/test_crl_client.py"
(("^(.*)def test_fetch_crl" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
(substitute* "tests/test_ocsp_client.py"
(("^(.*)def test_fetch_ocsp" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
(substitute* "tests/test_registry.py"
(("^(.*)def test_build_paths" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
(substitute* "tests/test_validate.py"
(("^(.*)def test_revocation_mode_hard" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
(substitute* "tests/test_validate.py"
(("^(.*)def test_revocation_mode_soft" line indent)
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
#t))
(replace 'check
(lambda _
(invoke "python" "run.py" "tests")
#t)))))
(home-page "https://github.com/wbond/certvalidator")
(synopsis "Python library for validating X.509 certificates and paths")
(description "certvalidator is a Python library for validating X.509
certificates or paths. Supports various options, including: validation at a
specific moment in time, whitelisting and revocation checks.")
(license license:expat))))
(define-public python-signapple
(let ((commit "62155712e7417aba07565c9780a80e452823ae6a"))
(package
(name "python-signapple")
(version (git-version "0.1" "1" commit))
(source
(origin
(method git-fetch)
(uri (git-reference
(url "https://github.com/achow101/signapple")
(commit commit)))
(file-name (git-file-name name commit))
(sha256
(base32
"1nm6rm4h4m7kbq729si4cm8rzild62mk4ni8xr5zja7l33fhv3gb"))))
(build-system python-build-system)
(propagated-inputs
(list python-asn1crypto
python-oscrypto
python-certvalidator
python-elfesteem))
;; There are no tests, but attempting to run python setup.py test leads to
;; problems, just disable the test
(arguments '(#:tests? #f))
(home-page "https://github.com/achow101/signapple")
(synopsis "Mach-O binary signature tool")
(description "signapple is a Python tool for creating, verifying, and
inspecting signatures in Mach-O binaries.")
(license license:expat))))
(define-public mingw-w64-base-gcc
(package
(inherit base-gcc)
(arguments
(substitute-keyword-arguments (package-arguments base-gcc)
((#:configure-flags flags)
`(append ,flags
;; https://gcc.gnu.org/install/configure.html
(list "--enable-threads=posix",
"--enable-default-ssp=yes",
building-on)))))))
(define-public linux-base-gcc
(package
(inherit base-gcc)
(arguments
(substitute-keyword-arguments (package-arguments base-gcc)
((#:configure-flags flags)
`(append ,flags
;; https://gcc.gnu.org/install/configure.html
(list "--enable-initfini-array=yes",
"--enable-default-ssp=yes",
"--enable-default-pie=yes",
building-on)))
((#:phases phases)
`(modify-phases ,phases
;; Given a XGCC package, return a modified package that replace each instance of
;; -rpath in the default system spec that's inserted by Guix with -rpath-link
(add-after 'pre-configure 'replace-rpath-with-rpath-link
(lambda _
(substitute* (cons "gcc/config/rs6000/sysv4.h"
(find-files "gcc/config"
"^gnu-user.*\\.h$"))
(("-rpath=") "-rpath-link="))
#t))))))))
(define-public glibc-2.27
(package
(inherit glibc-2.31)
(version "2.27")
(source (origin
(method git-fetch)
(uri (git-reference
(url "https://sourceware.org/git/glibc.git")
(commit "73886db6218e613bd6d4edf529f11e008a6c2fa6")))
(file-name (git-file-name "glibc" "73886db6218e613bd6d4edf529f11e008a6c2fa6"))
(sha256
(base32
"0azpb9cvnbv25zg8019rqz48h8i2257ngyjg566dlnp74ivrs9vq"))
(patches (search-our-patches "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch"
"glibc-2.27-fcommon.patch"
"glibc-2.27-guix-prefix.patch"
"glibc-2.27-no-librt.patch"
"glibc-2.27-powerpc-ldbrx.patch"))))
(arguments
(substitute-keyword-arguments (package-arguments glibc)
((#:configure-flags flags)
`(append ,flags
;; https://www.gnu.org/software/libc/manual/html_node/Configuring-and-compiling.html
(list "--enable-stack-protector=all",
"--enable-bind-now",
"--disable-werror",
building-on)))
((#:phases phases)
`(modify-phases ,phases
(add-before 'configure 'set-etc-rpc-installation-directory
(lambda* (#:key outputs #:allow-other-keys)
;; Install the rpc data base file under `$out/etc/rpc'.
;; Otherwise build will fail with "Permission denied."
(let ((out (assoc-ref outputs "out")))
(substitute* "sunrpc/Makefile"
(("^\\$\\(inst_sysconfdir\\)/rpc(.*)$" _ suffix)
(string-append out "/etc/rpc" suffix "\n"))
(("^install-others =.*$")
(string-append "install-others = " out "/etc/rpc\n"))))))))))))
(packages->manifest
(append
(list ;; The Basics
bash-minimal
which
coreutils-minimal
util-linux
;; File(system) inspection
file
grep
diffutils
findutils
;; File transformation
patch
gawk
sed
moreutils
;; Compression and archiving
tar
bzip2
gzip
xz
;; Build tools
gnu-make
libtool
autoconf-2.71
automake
pkg-config
bison
;; Native GCC 10 toolchain
gcc-toolchain-10
(list gcc-toolchain-10 "static")
;; Scripting
python-minimal ;; (3.10)
;; Git
git-minimal
;; Tests
python-lief)
(let ((target (getenv "HOST")))
(cond ((string-suffix? "-mingw32" target)
;; Windows
(list zip
(make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32")
nsis-x86_64
nss-certs
osslsigncode))
((string-contains target "-linux-")
(list (make-bitcoin-cross-toolchain target)))
((string-contains target "darwin")
(list clang-toolchain-17 binutils cmake-minimal python-signapple zip))
(else '())))))
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/gcc-remap-guix-store.patch | From aad25427e74f387412e8bc9a9d7bbc6c496c792f Mon Sep 17 00:00:00 2001
From: Andrew Chow <achow101-github@achow101.com>
Date: Wed, 6 Jul 2022 16:49:41 -0400
Subject: [PATCH] guix: remap guix store paths to /usr
---
libgcc/Makefile.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/libgcc/Makefile.in b/libgcc/Makefile.in
index 851e7657d07..476c2becd1c 100644
--- a/libgcc/Makefile.in
+++ b/libgcc/Makefile.in
@@ -854,7 +854,7 @@ endif
# libgcc_eh.a, only LIB2ADDEH matters. If we do, only LIB2ADDEHSTATIC and
# LIB2ADDEHSHARED matter. (Usually all three are identical.)
-c_flags := -fexceptions
+c_flags := -fexceptions $(shell find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;)
ifeq ($(enable_shared),yes)
--
2.37.0
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/oscrypto-hard-code-openssl.patch | diff --git a/oscrypto/__init__.py b/oscrypto/__init__.py
index eb27313..371ab24 100644
--- a/oscrypto/__init__.py
+++ b/oscrypto/__init__.py
@@ -302,3 +302,8 @@ def load_order():
'oscrypto._win.tls',
'oscrypto.tls',
]
+
+
+paths = '@GUIX_OSCRYPTO_USE_OPENSSL@'.split(',')
+assert len(paths) == 2, 'Value for OSCRYPTO_USE_OPENSSL env var must be two paths separated by a comma'
+use_openssl(*paths)
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/glibc-2.27-no-librt.patch | This patch can be dropped when we are building with glibc 2.30+.
commit 6e41ef56c9baab719a02f1377b1e7ce7bff61e73
Author: Florian Weimer <fweimer@redhat.com>
Date: Fri Feb 8 10:21:56 2019 +0100
rt: Turn forwards from librt to libc into compat symbols [BZ #24194]
As the result of commit 6e6249d0b461b952d0f544792372663feb6d792a
("BZ#14743: Move clock_* symbols from librt to libc."), in glibc 2.17,
clock_gettime, clock_getres, clock_settime, clock_getcpuclockid,
clock_nanosleep were added to libc, and the file rt/clock-compat.c
was added with forwarders to the actual implementations in libc.
These forwarders were wrapped in
#if SHLIB_COMPAT (librt, GLIBC_2_2, GLIBC_2_17)
so that they are not present for newer architectures (such as
powerpc64le) with a 2.17 or later ABI baseline. But the forwarders
were not marked as compatibility symbols. As a result, on older
architectures, historic configure checks such as
AC_CHECK_LIB(rt, clock_gettime)
still cause linking against librt, even though this is completely
unnecessary. It also creates a needless porting hazard because
architectures behave differently when it comes to symbol availability.
Reviewed-by: Carlos O'Donell <carlos@redhat.com>
diff --git a/rt/clock-compat.c b/rt/clock-compat.c
index f816973c05..11e71aa890 100644
--- a/rt/clock-compat.c
+++ b/rt/clock-compat.c
@@ -30,14 +30,16 @@
#if HAVE_IFUNC
# undef INIT_ARCH
# define INIT_ARCH()
-# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name)
+# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name) \
+ compat_symbol (librt, name, name, GLIBC_2_2);
#else
# define COMPAT_REDIRECT(name, proto, arglist) \
int \
name proto \
{ \
return __##name arglist; \
- }
+ } \
+ compat_symbol (librt, name, name, GLIBC_2_2);
#endif
COMPAT_REDIRECT (clock_getres,
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/vmov-alignment.patch | Description: Use unaligned VMOV instructions
Author: Stephen Kitt <skitt@debian.org>
Bug-Debian: https://bugs.debian.org/939559
See also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
Based on a patch originally by Claude Heiland-Allen <claude@mathr.co.uk>
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -1058,17 +1058,11 @@
{
if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
{
- if (misaligned_operand (operands[1], <MODE>mode))
- return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
- else
- return "vmova<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
+ return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
}
else
{
- if (misaligned_operand (operands[1], <MODE>mode))
- return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
- else
- return "vmovdqa<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
+ return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
}
}
[(set_attr "type" "ssemov")
@@ -1184,17 +1178,11 @@
{
if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
{
- if (misaligned_operand (operands[0], <MODE>mode))
- return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
- else
- return "vmova<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
+ return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
}
else
{
- if (misaligned_operand (operands[0], <MODE>mode))
- return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
- else
- return "vmovdqa<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
+ return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
}
}
[(set_attr "type" "ssemov")
@@ -7806,7 +7794,7 @@
"TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"@
%vmovlps\t{%1, %0|%q0, %1}
- %vmovaps\t{%1, %0|%0, %1}
+ %vmovups\t{%1, %0|%0, %1}
%vmovlps\t{%1, %d0|%d0, %q1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
@@ -13997,29 +13985,15 @@
switch (<MODE>mode)
{
case E_V8DFmode:
- if (misaligned_operand (operands[2], <ssequartermode>mode))
- return "vmovupd\t{%2, %x0|%x0, %2}";
- else
- return "vmovapd\t{%2, %x0|%x0, %2}";
+ return "vmovupd\t{%2, %x0|%x0, %2}";
case E_V16SFmode:
- if (misaligned_operand (operands[2], <ssequartermode>mode))
- return "vmovups\t{%2, %x0|%x0, %2}";
- else
- return "vmovaps\t{%2, %x0|%x0, %2}";
+ return "vmovups\t{%2, %x0|%x0, %2}";
case E_V8DImode:
- if (misaligned_operand (operands[2], <ssequartermode>mode))
- return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
+ return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
: "vmovdqu\t{%2, %x0|%x0, %2}";
- else
- return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}"
- : "vmovdqa\t{%2, %x0|%x0, %2}";
case E_V16SImode:
- if (misaligned_operand (operands[2], <ssequartermode>mode))
- return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
+ return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
: "vmovdqu\t{%2, %x0|%x0, %2}";
- else
- return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}"
- : "vmovdqa\t{%2, %x0|%x0, %2}";
default:
gcc_unreachable ();
}
@@ -21225,63 +21199,27 @@
switch (get_attr_mode (insn))
{
case MODE_V16SF:
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
- return "vmovups\t{%1, %t0|%t0, %1}";
- else
- return "vmovaps\t{%1, %t0|%t0, %1}";
+ return "vmovups\t{%1, %t0|%t0, %1}";
case MODE_V8DF:
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
- return "vmovupd\t{%1, %t0|%t0, %1}";
- else
- return "vmovapd\t{%1, %t0|%t0, %1}";
+ return "vmovupd\t{%1, %t0|%t0, %1}";
case MODE_V8SF:
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
- return "vmovups\t{%1, %x0|%x0, %1}";
- else
- return "vmovaps\t{%1, %x0|%x0, %1}";
+ return "vmovups\t{%1, %x0|%x0, %1}";
case MODE_V4DF:
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
- return "vmovupd\t{%1, %x0|%x0, %1}";
- else
- return "vmovapd\t{%1, %x0|%x0, %1}";
+ return "vmovupd\t{%1, %x0|%x0, %1}";
case MODE_XI:
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
- {
- if (which_alternative == 2)
- return "vmovdqu\t{%1, %t0|%t0, %1}";
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
- return "vmovdqu64\t{%1, %t0|%t0, %1}";
- else
- return "vmovdqu32\t{%1, %t0|%t0, %1}";
- }
+ if (which_alternative == 2)
+ return "vmovdqu\t{%1, %t0|%t0, %1}";
+ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
+ return "vmovdqu64\t{%1, %t0|%t0, %1}";
else
- {
- if (which_alternative == 2)
- return "vmovdqa\t{%1, %t0|%t0, %1}";
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
- return "vmovdqa64\t{%1, %t0|%t0, %1}";
- else
- return "vmovdqa32\t{%1, %t0|%t0, %1}";
- }
+ return "vmovdqu32\t{%1, %t0|%t0, %1}";
case MODE_OI:
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
- {
- if (which_alternative == 2)
- return "vmovdqu\t{%1, %x0|%x0, %1}";
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
- return "vmovdqu64\t{%1, %x0|%x0, %1}";
- else
- return "vmovdqu32\t{%1, %x0|%x0, %1}";
- }
+ if (which_alternative == 2)
+ return "vmovdqu\t{%1, %x0|%x0, %1}";
+ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
+ return "vmovdqu64\t{%1, %x0|%x0, %1}";
else
- {
- if (which_alternative == 2)
- return "vmovdqa\t{%1, %x0|%x0, %1}";
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
- return "vmovdqa64\t{%1, %x0|%x0, %1}";
- else
- return "vmovdqa32\t{%1, %x0|%x0, %1}";
- }
+ return "vmovdqu32\t{%1, %x0|%x0, %1}";
default:
gcc_unreachable ();
}
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -4981,13 +4981,13 @@
switch (type)
{
case opcode_int:
- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
+ opcode = "vmovdqu32";
break;
case opcode_float:
- opcode = misaligned_p ? "vmovups" : "vmovaps";
+ opcode = "vmovups";
break;
case opcode_double:
- opcode = misaligned_p ? "vmovupd" : "vmovapd";
+ opcode = "vmovupd";
break;
}
}
@@ -4996,16 +4996,16 @@
switch (scalar_mode)
{
case E_SFmode:
- opcode = misaligned_p ? "%vmovups" : "%vmovaps";
+ opcode = "%vmovups";
break;
case E_DFmode:
- opcode = misaligned_p ? "%vmovupd" : "%vmovapd";
+ opcode = "%vmovupd";
break;
case E_TFmode:
if (evex_reg_p)
- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
+ opcode = "vmovdqu64";
else
- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
+ opcode = "%vmovdqu";
break;
default:
gcc_unreachable ();
@@ -5017,48 +5017,32 @@
{
case E_QImode:
if (evex_reg_p)
- opcode = (misaligned_p
- ? (TARGET_AVX512BW
- ? "vmovdqu8"
- : "vmovdqu64")
- : "vmovdqa64");
+ opcode = TARGET_AVX512BW ? "vmovdqu8" : "vmovdqu64";
else
- opcode = (misaligned_p
- ? (TARGET_AVX512BW
- ? "vmovdqu8"
- : "%vmovdqu")
- : "%vmovdqa");
+ opcode = TARGET_AVX512BW ? "vmovdqu8" : "%vmovdqu";
break;
case E_HImode:
if (evex_reg_p)
- opcode = (misaligned_p
- ? (TARGET_AVX512BW
- ? "vmovdqu16"
- : "vmovdqu64")
- : "vmovdqa64");
+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64";
else
- opcode = (misaligned_p
- ? (TARGET_AVX512BW
- ? "vmovdqu16"
- : "%vmovdqu")
- : "%vmovdqa");
+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "%vmovdqu";
break;
case E_SImode:
if (evex_reg_p)
- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
+ opcode = "vmovdqu32";
else
- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
+ opcode = "%vmovdqu";
break;
case E_DImode:
case E_TImode:
case E_OImode:
if (evex_reg_p)
- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
+ opcode = "vmovdqu64";
else
- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
+ opcode = "%vmovdqu";
break;
case E_XImode:
- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
+ opcode = "vmovdqu64";
break;
default:
gcc_unreachable ();
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/glibc-2.27-guix-prefix.patch | Without ffile-prefix-map, the debug symbols will contain paths for the
guix store which will include the hashes of each package. However, the
hash for the same package will differ when on different architectures.
In order to be reproducible regardless of the architecture used to build
the package, map all guix store prefixes to something fixed, e.g. /usr.
We might be able to drop this in favour of using --with-nonshared-cflags
when we begin using newer versions of glibc.
--- a/Makeconfig
+++ b/Makeconfig
@@ -992,6 +992,10 @@ object-suffixes :=
CPPFLAGS-.o = $(pic-default)
# libc.a must be compiled with -fPIE/-fpie for static PIE.
CFLAGS-.o = $(filter %frame-pointer,$(+cflags)) $(pie-default)
+
+# Map Guix store paths to /usr
+CFLAGS-.o += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;`
+
libtype.o := lib%.a
object-suffixes += .o
ifeq (yes,$(build-shared))
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch | Note that this has been modified from the original commit, to use __has_include
instead of __has_include__, as the later was causing build failures with GCC 10.
See also: http://lists.busybox.net/pipermail/buildroot/2020-July/590376.html.
https://sourceware.org/git/?p=glibc.git;a=commit;h=0b9c84906f653978fb8768c7ebd0ee14a47e662e
This patch can be dropped when we are building with glibc 2.28+.
From 562c52cc81a4e456a62e6455feb32732049e9070 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Mon, 31 Dec 2018 09:26:42 -0800
Subject: [PATCH] riscv: Use __has_include__ to include <asm/syscalls.h> [BZ
#24022]
<asm/syscalls.h> has been removed by
commit 27f8899d6002e11a6e2d995e29b8deab5aa9cc25
Author: David Abdurachmanov <david.abdurachmanov@gmail.com>
Date: Thu Nov 8 20:02:39 2018 +0100
riscv: add asm/unistd.h UAPI header
Marcin Juszkiewicz reported issues while generating syscall table for riscv
using 4.20-rc1. The patch refactors our unistd.h files to match some other
architectures.
- Add asm/unistd.h UAPI header, which has __ARCH_WANT_NEW_STAT only for 64-bit
- Remove asm/syscalls.h UAPI header and merge to asm/unistd.h
- Adjust kernel asm/unistd.h
So now asm/unistd.h UAPI header should show all syscalls for riscv.
<asm/syscalls.h> may be restored by
Subject: [PATCH] riscv: restore asm/syscalls.h UAPI header
Date: Tue, 11 Dec 2018 09:09:35 +0100
UAPI header asm/syscalls.h was merged into UAPI asm/unistd.h header,
which did resolve issue with missing syscalls macros resulting in
glibc (2.28) build failure. It also broke glibc in a different way:
asm/syscalls.h is being used by glibc. I noticed this while doing
Fedora 30/Rawhide mass rebuild.
The patch returns asm/syscalls.h header and incl. it into asm/unistd.h.
I plan to send a patch to glibc to use asm/unistd.h instead of
asm/syscalls.h
In the meantime, we use __has_include__, which was added to GCC 5, to
check if <asm/syscalls.h> exists before including it. Tested with
build-many-glibcs.py for riscv against kernel 4.19.12 and 4.20-rc7.
[BZ #24022]
* sysdeps/unix/sysv/linux/riscv/flush-icache.c: Check if
<asm/syscalls.h> exists with __has_include__ before including it.
---
sysdeps/unix/sysv/linux/riscv/flush-icache.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/sysdeps/unix/sysv/linux/riscv/flush-icache.c b/sysdeps/unix/sysv/linux/riscv/flush-icache.c
index d612ef4c6c..0b2042620b 100644
--- a/sysdeps/unix/sysv/linux/riscv/flush-icache.c
+++ b/sysdeps/unix/sysv/linux/riscv/flush-icache.c
@@ -21,7 +21,11 @@
#include <stdlib.h>
#include <atomic.h>
#include <sys/cachectl.h>
-#include <asm/syscalls.h>
+#if __has_include (<asm/syscalls.h>)
+# include <asm/syscalls.h>
+#else
+# include <asm/unistd.h>
+#endif
typedef int (*func_type) (void *, void *, unsigned long int);
--
2.31.1
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch | From 50b0b3c9ff71ffd7ebbd74ae46844c3566478123 Mon Sep 17 00:00:00 2001
From: "Gabriel F. T. Gomes" <gabrielftg@linux.ibm.com>
Date: Mon, 27 May 2019 15:21:22 -0300
Subject: [PATCH] powerpc: Fix build failures with current GCC
Since GCC commit 271500 (svn), also known as the following commit on the
git mirror:
commit e154242724b084380e3221df7c08fcdbd8460674
Author: amodra <amodra@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Wed May 22 04:34:26 2019 +0000
[RS6000] Don't pass -many to the assembler
glibc builds are failing when an assembly implementation does not
declare the correct '.machine' directive, or when no such directive is
declared at all. For example, when a POWER6 instruction is used, but
'.machine power6' is not declared, the assembler will fail with an error
similar to the following:
../sysdeps/powerpc/powerpc64/power8/strcmp.S: Assembler messages:
24 ../sysdeps/powerpc/powerpc64/power8/strcmp.S:55: Error: unrecognized opcode: `cmpb'
This patch adds '.machine powerN' directives where none existed, as well
as it updates '.machine power7' directives on POWER8 files, because the
minimum binutils version required to build glibc (binutils 2.25) now
provides this machine version. It also adds '-many' to the assembler
command used to build tst-set_ppr.c.
Tested for powerpc, powerpc64, and powerpc64le, as well as with
build-many-glibcs.py for powerpc targets.
Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
---
sysdeps/powerpc/Makefile | 5 +++
sysdeps/powerpc/powerpc64/power4/memcmp.S | 7 ++++
sysdeps/powerpc/powerpc64/power7/strncmp.S | 1 +
.../powerpc/powerpc64/power8/fpu/s_llround.S | 1 +
sysdeps/powerpc/powerpc64/power8/strcasecmp.S | 36 ++++++-------------
sysdeps/powerpc/powerpc64/power8/strcasestr.S | 14 ++------
sysdeps/powerpc/powerpc64/power8/strcmp.S | 1 +
7 files changed, 28 insertions(+), 37 deletions(-)
diff --git a/sysdeps/powerpc/Makefile b/sysdeps/powerpc/Makefile
index 6aa683b03f..23126147df 100644
--- a/sysdeps/powerpc/Makefile
+++ b/sysdeps/powerpc/Makefile
@@ -45,6 +45,11 @@ ifeq ($(subdir),misc)
sysdep_headers += sys/platform/ppc.h
tests += test-gettimebase
tests += tst-set_ppr
+
+# This test is expected to run and exit with EXIT_UNSUPPORTED on
+# processors that do not implement the Power ISA 2.06 or greater.
+# But the test makes use of instructions from Power ISA 2.06 and 2.07.
+CFLAGS-tst-set_ppr.c += -Wa,-many
endif
ifneq (,$(filter %le,$(config-machine)))
diff --git a/sysdeps/powerpc/powerpc64/power4/memcmp.S b/sysdeps/powerpc/powerpc64/power4/memcmp.S
index e5319f101f..38dcf4c9a1 100644
--- a/sysdeps/powerpc/powerpc64/power4/memcmp.S
+++ b/sysdeps/powerpc/powerpc64/power4/memcmp.S
@@ -26,7 +26,14 @@
# define MEMCMP memcmp
#endif
+#ifndef __LITTLE_ENDIAN__
.machine power4
+#else
+/* Little endian is only available since POWER8, so it's safe to
+ specify .machine as power8 (or older), even though this is a POWER4
+ file. Since the little-endian code uses 'ldbrx', power7 is enough. */
+ .machine power7
+#endif
ENTRY_TOCLESS (MEMCMP, 4)
CALL_MCOUNT 3
diff --git a/sysdeps/powerpc/powerpc64/power7/strncmp.S b/sysdeps/powerpc/powerpc64/power7/strncmp.S
index 0c7429d19f..10f898c5a3 100644
--- a/sysdeps/powerpc/powerpc64/power7/strncmp.S
+++ b/sysdeps/powerpc/powerpc64/power7/strncmp.S
@@ -28,6 +28,7 @@
const char *s2 [r4],
size_t size [r5]) */
+ .machine power7
ENTRY_TOCLESS (STRNCMP, 5)
CALL_MCOUNT 3
diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
index a22fc63bb3..84c76ba0f9 100644
--- a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
@@ -26,6 +26,7 @@
/* long long [r3] llround (float x [fp1]) */
+ .machine power8
ENTRY_TOCLESS (__llround)
CALL_MCOUNT 0
frin fp1,fp1 /* Round to nearest +-0.5. */
diff --git a/sysdeps/powerpc/powerpc64/power8/strcasecmp.S b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
index 3a2efe2a64..eeacd40c7f 100644
--- a/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
+++ b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
@@ -91,21 +91,7 @@
3: \
TOLOWER()
-#ifdef _ARCH_PWR8
-# define VCLZD_V8_v7 vclzd v8, v7;
-# define MFVRD_R3_V1 mfvrd r3, v1;
-# define VSUBUDM_V9_V8 vsubudm v9, v9, v8;
-# define VPOPCNTD_V8_V8 vpopcntd v8, v8;
-# define VADDUQM_V7_V8 vadduqm v9, v7, v8;
-#else
-# define VCLZD_V8_v7 .long 0x11003fc2
-# define MFVRD_R3_V1 .long 0x7c230067
-# define VSUBUDM_V9_V8 .long 0x112944c0
-# define VPOPCNTD_V8_V8 .long 0x110047c3
-# define VADDUQM_V7_V8 .long 0x11274100
-#endif
-
- .machine power7
+ .machine power8
ENTRY (__STRCASECMP)
#ifdef USE_AS_STRNCASECMP
@@ -265,15 +251,15 @@ L(different):
#ifdef __LITTLE_ENDIAN__
/* Count trailing zero. */
vspltisb v8, -1
- VADDUQM_V7_V8
+ vadduqm v9, v7, v8
vandc v8, v9, v7
- VPOPCNTD_V8_V8
+ vpopcntd v8, v8
vspltb v6, v8, 15
vcmpequb. v6, v6, v1
blt cr6, L(shift8)
#else
/* Count leading zero. */
- VCLZD_V8_v7
+ vclzd v8, v7
vspltb v6, v8, 7
vcmpequb. v6, v6, v1
blt cr6, L(shift8)
@@ -291,7 +277,7 @@ L(skipsum):
/* Merge and move to GPR. */
vmrglb v6, v6, v7
vslo v1, v6, v1
- MFVRD_R3_V1
+ mfvrd r3, v1
/* Place the characters that are different in first position. */
sldi rSTR2, rRTN, 56
srdi rSTR2, rSTR2, 56
@@ -301,7 +287,7 @@ L(skipsum):
vslo v6, v5, v8
vslo v7, v4, v8
vmrghb v1, v6, v7
- MFVRD_R3_V1
+ mfvrd r3, v1
srdi rSTR2, rRTN, 48
sldi rSTR2, rSTR2, 56
srdi rSTR2, rSTR2, 56
@@ -320,15 +306,15 @@ L(null_found):
#ifdef __LITTLE_ENDIAN__
/* Count trailing zero. */
vspltisb v8, -1
- VADDUQM_V7_V8
+ vadduqm v9, v7, v8
vandc v8, v9, v7
- VPOPCNTD_V8_V8
+ vpopcntd v8, v8
vspltb v6, v8, 15
vcmpequb. v6, v6, v10
blt cr6, L(shift_8)
#else
/* Count leading zero. */
- VCLZD_V8_v7
+ vclzd v8, v7
vspltb v6, v8, 7
vcmpequb. v6, v6, v10
blt cr6, L(shift_8)
@@ -343,10 +329,10 @@ L(skipsum1):
vspltisb v10, 7
vslb v10, v10, v10
vsldoi v9, v0, v10, 1
- VSUBUDM_V9_V8
+ vsubudm v9, v9, v8
vspltisb v8, 8
vsldoi v8, v0, v8, 1
- VSUBUDM_V9_V8
+ vsubudm v9, v9, v8
/* Shift and remove junk after null character. */
#ifdef __LITTLE_ENDIAN__
vslo v5, v5, v9
diff --git a/sysdeps/powerpc/powerpc64/power8/strcasestr.S b/sysdeps/powerpc/powerpc64/power8/strcasestr.S
index 9fc24c29f9..e10f06fd86 100644
--- a/sysdeps/powerpc/powerpc64/power8/strcasestr.S
+++ b/sysdeps/powerpc/powerpc64/power8/strcasestr.S
@@ -73,18 +73,8 @@
vor reg, v8, reg; \
vcmpequb. v6, reg, v4;
-/* TODO: change these to the actual instructions when the minimum required
- binutils allows it. */
-#ifdef _ARCH_PWR8
-#define VCLZD_V8_v7 vclzd v8, v7;
-#else
-#define VCLZD_V8_v7 .long 0x11003fc2
-#endif
-
#define FRAMESIZE (FRAME_MIN_SIZE+48)
-/* TODO: change this to .machine power8 when the minimum required binutils
- allows it. */
- .machine power7
+ .machine power8
ENTRY (STRCASESTR, 4)
CALL_MCOUNT 2
mflr r0 /* Load link register LR to r0. */
@@ -291,7 +281,7 @@ L(nullchk1):
vcmpequb. v6, v0, v7
/* Shift r3 by 16 bytes and proceed. */
blt cr6, L(shift16)
- VCLZD_V8_v7
+ vclzd v8, v7
#ifdef __LITTLE_ENDIAN__
vspltb v6, v8, 15
#else
diff --git a/sysdeps/powerpc/powerpc64/power8/strcmp.S b/sysdeps/powerpc/powerpc64/power8/strcmp.S
index 15e7351d1b..d592266d1d 100644
--- a/sysdeps/powerpc/powerpc64/power8/strcmp.S
+++ b/sysdeps/powerpc/powerpc64/power8/strcmp.S
@@ -31,6 +31,7 @@
64K as default, the page cross handling assumes minimum page size of
4k. */
+ .machine power8
ENTRY_TOCLESS (STRCMP, 4)
li r0,0
--
2.41.0
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/patches/glibc-2.27-fcommon.patch | commit 264a4a0dbe1f4369db315080034b500bed66016c
Author: fanquake <fanquake@gmail.com>
Date: Fri May 6 11:03:04 2022 +0100
build: use -fcommon to retain legacy behaviour with GCC 10
GCC 10 started using -fno-common by default, which causes issues with
the powerpc builds using gibc 2.27. A patch was commited to glibc to fix
the issue, 18363b4f010da9ba459b13310b113ac0647c2fcc but is non-trvial
to backport, and was broken in at least one way, see the followup in
commit 7650321ce037302bfc2f026aa19e0213b8d02fe6.
For now, retain the legacy GCC behaviour by passing -fcommon when
building glibc.
https://gcc.gnu.org/onlinedocs/gcc/Code-Gen-Options.html.
https://sourceware.org/git/?p=glibc.git;a=commit;h=18363b4f010da9ba459b13310b113ac0647c2fcc
https://sourceware.org/git/?p=glibc.git;a=commit;h=7650321ce037302bfc2f026aa19e0213b8d02fe6
This patch can be dropped when we are building with glibc 2.31+.
diff --git a/Makeconfig b/Makeconfig
index 86a71e5802..aa2166be60 100644
--- a/Makeconfig
+++ b/Makeconfig
@@ -896,7 +896,7 @@ ifeq "$(strip $(+cflags))" ""
endif # $(+cflags) == ""
+cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) \
- $(+stack-protector)
+ $(+stack-protector) -fcommon
+gcc-nowarn := -w
# Don't duplicate options if we inherited variables from the parent.
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/libexec/build.sh | #!/usr/bin/env bash
# Copyright (c) 2019-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
set -e -o pipefail
export TZ=UTC
# Although Guix _does_ set umask when building its own packages (in our case,
# this is all packages in manifest.scm), it does not set it for `guix
# shell`. It does make sense for at least `guix shell --container`
# to set umask, so if that change gets merged upstream and we bump the
# time-machine to a commit which includes the aforementioned change, we can
# remove this line.
#
# This line should be placed before any commands which creates files.
umask 0022
if [ -n "$V" ]; then
# Print both unexpanded (-v) and expanded (-x) forms of commands as they are
# read from this file.
set -vx
# Set VERBOSE for CMake-based builds
export VERBOSE="$V"
fi
# Check that required environment variables are set
cat << EOF
Required environment variables as seen inside the container:
DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set}
DISTNAME: ${DISTNAME:?not set}
HOST: ${HOST:?not set}
SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set}
JOBS: ${JOBS:?not set}
DISTSRC: ${DISTSRC:?not set}
OUTDIR: ${OUTDIR:?not set}
EOF
ACTUAL_OUTDIR="${OUTDIR}"
OUTDIR="${DISTSRC}/output"
#####################
# Environment Setup #
#####################
# The depends folder also serves as a base-prefix for depends packages for
# $HOSTs after successfully building.
BASEPREFIX="${PWD}/depends"
# Given a package name and an output name, return the path of that output in our
# current guix environment
store_path() {
grep --extended-regexp "/[^-]{32}-${1}-[^-]+${2:+-${2}}" "${GUIX_ENVIRONMENT}/manifest" \
| head --lines=1 \
| sed --expression='s|\x29*$||' \
--expression='s|^[[:space:]]*"||' \
--expression='s|"[[:space:]]*$||'
}
# Set environment variables to point the NATIVE toolchain to the right
# includes/libs
NATIVE_GCC="$(store_path gcc-toolchain)"
NATIVE_GCC_STATIC="$(store_path gcc-toolchain static)"
unset LIBRARY_PATH
unset CPATH
unset C_INCLUDE_PATH
unset CPLUS_INCLUDE_PATH
unset OBJC_INCLUDE_PATH
unset OBJCPLUS_INCLUDE_PATH
export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC_STATIC}/lib"
export C_INCLUDE_PATH="${NATIVE_GCC}/include"
export CPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include"
export OBJC_INCLUDE_PATH="${NATIVE_GCC}/include"
export OBJCPLUS_INCLUDE_PATH="${NATIVE_GCC}/include/c++:${NATIVE_GCC}/include"
# Set environment variables to point the CROSS toolchain to the right
# includes/libs for $HOST
case "$HOST" in
*mingw*)
# Determine output paths to use in CROSS_* environment variables
CROSS_GLIBC="$(store_path "mingw-w64-x86_64-winpthreads")"
CROSS_GCC="$(store_path "gcc-cross-${HOST}")"
CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)"
CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories...
CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one)
# The search path ordering is generally:
# 1. gcc-related search paths
# 2. libc-related search paths
# 2. kernel-header-related search paths (not applicable to mingw-w64 hosts)
export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include"
export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}"
export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib"
;;
*darwin*)
# The CROSS toolchain for darwin uses the SDK and ignores environment variables.
# See depends/hosts/darwin.mk for more details.
;;
*linux*)
CROSS_GLIBC="$(store_path "glibc-cross-${HOST}")"
CROSS_GLIBC_STATIC="$(store_path "glibc-cross-${HOST}" static)"
CROSS_KERNEL="$(store_path "linux-libre-headers-cross-${HOST}")"
CROSS_GCC="$(store_path "gcc-cross-${HOST}")"
CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)"
CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories...
CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one)
export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include"
export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}"
export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib"
;;
*)
exit 1 ;;
esac
# Sanity check CROSS_*_PATH directories
IFS=':' read -ra PATHS <<< "${CROSS_C_INCLUDE_PATH}:${CROSS_CPLUS_INCLUDE_PATH}:${CROSS_LIBRARY_PATH}"
for p in "${PATHS[@]}"; do
if [ -n "$p" ] && [ ! -d "$p" ]; then
echo "'$p' doesn't exist or isn't a directory... Aborting..."
exit 1
fi
done
# Disable Guix ld auto-rpath behavior
case "$HOST" in
*darwin*)
# The auto-rpath behavior is necessary for darwin builds as some native
# tools built by depends refer to and depend on Guix-built native
# libraries
#
# After the native packages in depends are built, the ld wrapper should
# no longer affect our build, as clang would instead reach for
# x86_64-apple-darwin-ld from cctools
;;
*) export GUIX_LD_WRAPPER_DISABLE_RPATH=yes ;;
esac
# Make /usr/bin if it doesn't exist
[ -e /usr/bin ] || mkdir -p /usr/bin
# Symlink file and env to a conventional path
[ -e /usr/bin/file ] || ln -s --no-dereference "$(command -v file)" /usr/bin/file
[ -e /usr/bin/env ] || ln -s --no-dereference "$(command -v env)" /usr/bin/env
# Determine the correct value for -Wl,--dynamic-linker for the current $HOST
case "$HOST" in
*linux*)
glibc_dynamic_linker=$(
case "$HOST" in
x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;;
arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;;
aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;;
riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;;
powerpc64-linux-gnu) echo /lib64/ld64.so.1;;
powerpc64le-linux-gnu) echo /lib64/ld64.so.2;;
*) exit 1 ;;
esac
)
;;
esac
# Environment variables for determinism
export TAR_OPTIONS="--owner=0 --group=0 --numeric-owner --mtime='@${SOURCE_DATE_EPOCH}' --sort=name"
export TZ="UTC"
case "$HOST" in
*darwin*)
# cctools AR, unlike GNU binutils AR, does not have a deterministic mode
# or a configure flag to enable determinism by default, it only
# understands if this env-var is set or not. See:
#
# https://github.com/tpoechtrager/cctools-port/blob/55562e4073dea0fbfd0b20e0bf69ffe6390c7f97/cctools/ar/archive.c#L334
export ZERO_AR_DATE=yes
;;
esac
####################
# Depends Building #
####################
# Build the depends tree, overriding variables that assume multilib gcc
make -C depends --jobs="$JOBS" HOST="$HOST" \
${V:+V=1} \
${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \
${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} \
${SDK_PATH+SDK_PATH="$SDK_PATH"} \
x86_64_linux_CC=x86_64-linux-gnu-gcc \
x86_64_linux_CXX=x86_64-linux-gnu-g++ \
x86_64_linux_AR=x86_64-linux-gnu-gcc-ar \
x86_64_linux_RANLIB=x86_64-linux-gnu-gcc-ranlib \
x86_64_linux_NM=x86_64-linux-gnu-gcc-nm \
x86_64_linux_STRIP=x86_64-linux-gnu-strip \
FORCE_USE_SYSTEM_CLANG=1
###########################
# Source Tarball Building #
###########################
GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}.tar.gz"
# Create the source tarball if not already there
if [ ! -e "$GIT_ARCHIVE" ]; then
mkdir -p "$(dirname "$GIT_ARCHIVE")"
git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD
fi
mkdir -p "$OUTDIR"
###########################
# Binary Tarball Building #
###########################
# CONFIGFLAGS
CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary"
# CFLAGS
HOST_CFLAGS="-O2 -g"
HOST_CFLAGS+=$(find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;)
case "$HOST" in
*linux*) HOST_CFLAGS+=" -ffile-prefix-map=${PWD}=." ;;
*mingw*) HOST_CFLAGS+=" -fno-ident" ;;
*darwin*) unset HOST_CFLAGS ;;
esac
# CXXFLAGS
HOST_CXXFLAGS="$HOST_CFLAGS"
case "$HOST" in
arm-linux-gnueabihf) HOST_CXXFLAGS="${HOST_CXXFLAGS} -Wno-psabi" ;;
esac
# LDFLAGS
case "$HOST" in
*linux*) HOST_LDFLAGS="-Wl,--as-needed -Wl,--dynamic-linker=$glibc_dynamic_linker -static-libstdc++ -Wl,-O2" ;;
*mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;;
esac
# Make $HOST-specific native binaries from depends available in $PATH
export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}"
mkdir -p "$DISTSRC"
(
cd "$DISTSRC"
# Extract the source tarball
tar --strip-components=1 -xf "${GIT_ARCHIVE}"
./autogen.sh
# Configure this DISTSRC for $HOST
# shellcheck disable=SC2086
env CONFIG_SITE="${BASEPREFIX}/${HOST}/share/config.site" \
./configure --prefix=/ \
--disable-ccache \
--disable-maintainer-mode \
--disable-dependency-tracking \
${CONFIGFLAGS} \
${HOST_CFLAGS:+CFLAGS="${HOST_CFLAGS}"} \
${HOST_CXXFLAGS:+CXXFLAGS="${HOST_CXXFLAGS}"} \
${HOST_LDFLAGS:+LDFLAGS="${HOST_LDFLAGS}"}
sed -i.old 's/-lstdc++ //g' config.status libtool
# Build Bitcoin Core
make --jobs="$JOBS" ${V:+V=1}
# Check that symbol/security checks tools are sane.
make test-security-check ${V:+V=1}
# Perform basic security checks on a series of executables.
make -C src --jobs=1 check-security ${V:+V=1}
# Check that executables only contain allowed version symbols.
make -C src --jobs=1 check-symbols ${V:+V=1}
mkdir -p "$OUTDIR"
# Make the os-specific installers
case "$HOST" in
*mingw*)
make deploy ${V:+V=1} BITCOIN_WIN_INSTALLER="${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe"
;;
esac
# Setup the directory where our Bitcoin Core build for HOST will be
# installed. This directory will also later serve as the input for our
# binary tarballs.
INSTALLPATH="${PWD}/installed/${DISTNAME}"
mkdir -p "${INSTALLPATH}"
# Install built Bitcoin Core to $INSTALLPATH
case "$HOST" in
*darwin*)
make install-strip DESTDIR="${INSTALLPATH}" ${V:+V=1}
;;
*)
make install DESTDIR="${INSTALLPATH}" ${V:+V=1}
;;
esac
case "$HOST" in
*darwin*)
make osx_volname ${V:+V=1}
make deploydir ${V:+V=1}
mkdir -p "unsigned-app-${HOST}"
cp --target-directory="unsigned-app-${HOST}" \
osx_volname \
contrib/macdeploy/detached-sig-create.sh
mv --target-directory="unsigned-app-${HOST}" dist
(
cd "unsigned-app-${HOST}"
find . -print0 \
| sort --zero-terminated \
| tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \
| gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" \
|| ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-unsigned.tar.gz" && exit 1 )
)
make deploy ${V:+V=1} OSX_ZIP="${OUTDIR}/${DISTNAME}-${HOST}-unsigned.zip"
;;
esac
(
cd installed
case "$HOST" in
*mingw*)
mv --target-directory="$DISTNAME"/lib/ "$DISTNAME"/bin/*.dll
;;
esac
# Prune libtool and object archives
find . -name "lib*.la" -delete
find . -name "lib*.a" -delete
# Prune pkg-config files
rm -rf "${DISTNAME}/lib/pkgconfig"
case "$HOST" in
*darwin*) ;;
*)
# Split binaries and libraries from their debug symbols
{
find "${DISTNAME}/bin" -type f -executable -print0
find "${DISTNAME}/lib" -type f -print0
} | xargs -0 -P"$JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg
;;
esac
case "$HOST" in
*mingw*)
cp "${DISTSRC}/doc/README_windows.txt" "${DISTNAME}/readme.txt"
;;
*linux*)
cp "${DISTSRC}/README.md" "${DISTNAME}/"
;;
esac
# copy over the example bitcoin.conf file. if contrib/devtools/gen-bitcoin-conf.sh
# has not been run before buildling, this file will be a stub
cp "${DISTSRC}/share/examples/bitcoin.conf" "${DISTNAME}/"
cp -r "${DISTSRC}/share/rpcauth" "${DISTNAME}/share/"
# Finally, deterministically produce {non-,}debug binary tarballs ready
# for release
case "$HOST" in
*mingw*)
find "${DISTNAME}" -not -name "*.dbg" -print0 \
| xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}"
find "${DISTNAME}" -not -name "*.dbg" \
| sort \
| zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}.zip" \
|| ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}.zip" && exit 1 )
find "${DISTNAME}" -name "*.dbg" -print0 \
| xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}"
find "${DISTNAME}" -name "*.dbg" \
| sort \
| zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-debug.zip" \
|| ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-debug.zip" && exit 1 )
;;
*linux*)
find "${DISTNAME}" -not -name "*.dbg" -print0 \
| sort --zero-terminated \
| tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \
| gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" \
|| ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" && exit 1 )
find "${DISTNAME}" -name "*.dbg" -print0 \
| sort --zero-terminated \
| tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \
| gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" \
|| ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}-debug.tar.gz" && exit 1 )
;;
*darwin*)
find "${DISTNAME}" -print0 \
| sort --zero-terminated \
| tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \
| gzip -9n > "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" \
|| ( rm -f "${OUTDIR}/${DISTNAME}-${HOST}.tar.gz" && exit 1 )
;;
esac
) # $DISTSRC/installed
case "$HOST" in
*mingw*)
cp -rf --target-directory=. contrib/windeploy
(
cd ./windeploy
mkdir -p unsigned
cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe"
find . -print0 \
| sort --zero-terminated \
| tar --create --no-recursion --mode='u+rw,go+r-w,a+X' --null --files-from=- \
| gzip -9n > "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" \
|| ( rm -f "${OUTDIR}/${DISTNAME}-win64-unsigned.tar.gz" && exit 1 )
)
;;
esac
) # $DISTSRC
rm -rf "$ACTUAL_OUTDIR"
mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \
|| ( rm -rf "$ACTUAL_OUTDIR" && exit 1 )
(
cd /outdir-base
{
echo "$GIT_ARCHIVE"
find "$ACTUAL_OUTDIR" -type f
} | xargs realpath --relative-base="$PWD" \
| xargs sha256sum \
| sort -k2 \
| sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part
)
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/libexec/codesign.sh | #!/usr/bin/env bash
# Copyright (c) 2021-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
set -e -o pipefail
export TZ=UTC
# Although Guix _does_ set umask when building its own packages (in our case,
# this is all packages in manifest.scm), it does not set it for `guix
# shell`. It does make sense for at least `guix shell --container`
# to set umask, so if that change gets merged upstream and we bump the
# time-machine to a commit which includes the aforementioned change, we can
# remove this line.
#
# This line should be placed before any commands which creates files.
umask 0022
if [ -n "$V" ]; then
# Print both unexpanded (-v) and expanded (-x) forms of commands as they are
# read from this file.
set -vx
# Set VERBOSE for CMake-based builds
export VERBOSE="$V"
fi
# Check that required environment variables are set
cat << EOF
Required environment variables as seen inside the container:
UNSIGNED_TARBALL: ${UNSIGNED_TARBALL:?not set}
DETACHED_SIGS_REPO: ${DETACHED_SIGS_REPO:?not set}
DIST_ARCHIVE_BASE: ${DIST_ARCHIVE_BASE:?not set}
DISTNAME: ${DISTNAME:?not set}
HOST: ${HOST:?not set}
SOURCE_DATE_EPOCH: ${SOURCE_DATE_EPOCH:?not set}
DISTSRC: ${DISTSRC:?not set}
OUTDIR: ${OUTDIR:?not set}
EOF
ACTUAL_OUTDIR="${OUTDIR}"
OUTDIR="${DISTSRC}/output"
git_head_version() {
local recent_tag
if recent_tag="$(git -C "$1" describe --exact-match HEAD 2> /dev/null)"; then
echo "${recent_tag#v}"
else
git -C "$1" rev-parse --short=12 HEAD
fi
}
CODESIGNATURE_GIT_ARCHIVE="${DIST_ARCHIVE_BASE}/${DISTNAME}-codesignatures-$(git_head_version "$DETACHED_SIGS_REPO").tar.gz"
# Create the codesignature tarball if not already there
if [ ! -e "$CODESIGNATURE_GIT_ARCHIVE" ]; then
mkdir -p "$(dirname "$CODESIGNATURE_GIT_ARCHIVE")"
git -C "$DETACHED_SIGS_REPO" archive --output="$CODESIGNATURE_GIT_ARCHIVE" HEAD
fi
mkdir -p "$OUTDIR"
mkdir -p "$DISTSRC"
(
cd "$DISTSRC"
tar -xf "$UNSIGNED_TARBALL"
mkdir -p codesignatures
tar -C codesignatures -xf "$CODESIGNATURE_GIT_ARCHIVE"
case "$HOST" in
*mingw*)
find "$PWD" -name "*-unsigned.exe" | while read -r infile; do
infile_base="$(basename "$infile")"
# Codesigned *-unsigned.exe and output to OUTDIR
osslsigncode attach-signature \
-in "$infile" \
-out "${OUTDIR}/${infile_base/-unsigned}" \
-CAfile "$GUIX_ENVIRONMENT/etc/ssl/certs/ca-certificates.crt" \
-sigin codesignatures/win/"$infile_base".pem
done
;;
*darwin*)
# Apply detached codesignatures to dist/ (in-place)
signapple apply dist/Bitcoin-Qt.app codesignatures/osx/dist
# Make a .zip from dist/
cd dist/
find . -print0 \
| xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}"
find . | sort \
| zip -X@ "${OUTDIR}/${DISTNAME}-${HOST}.zip"
;;
*)
exit 1
;;
esac
) # $DISTSRC
rm -rf "$ACTUAL_OUTDIR"
mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \
|| ( rm -rf "$ACTUAL_OUTDIR" && exit 1 )
(
cd /outdir-base
{
echo "$UNSIGNED_TARBALL"
echo "$CODESIGNATURE_GIT_ARCHIVE"
find "$ACTUAL_OUTDIR" -type f
} | xargs realpath --relative-base="$PWD" \
| xargs sha256sum \
| sort -k2 \
| sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part
)
| 0 |
bitcoin/contrib/guix | bitcoin/contrib/guix/libexec/prelude.bash | #!/usr/bin/env bash
export LC_ALL=C
set -e -o pipefail
# shellcheck source=contrib/shell/realpath.bash
source contrib/shell/realpath.bash
# shellcheck source=contrib/shell/git-utils.bash
source contrib/shell/git-utils.bash
################
# Required non-builtin commands should be invocable
################
check_tools() {
for cmd in "$@"; do
if ! command -v "$cmd" > /dev/null 2>&1; then
echo "ERR: This script requires that '$cmd' is installed and available in your \$PATH"
exit 1
fi
done
}
check_tools cat env readlink dirname basename git
################
# We should be at the top directory of the repository
################
same_dir() {
local resolved1 resolved2
resolved1="$(bash_realpath "${1}")"
resolved2="$(bash_realpath "${2}")"
[ "$resolved1" = "$resolved2" ]
}
if ! same_dir "${PWD}" "$(git_root)"; then
cat << EOF
ERR: This script must be invoked from the top level of the git repository
Hint: This may look something like:
env FOO=BAR ./contrib/guix/guix-<blah>
EOF
exit 1
fi
################
# Execute "$@" in a pinned, possibly older version of Guix, for reproducibility
# across time.
time-machine() {
# shellcheck disable=SC2086
guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \
--commit=d5ca4d4fd713a9f7e17e074a1e37dda99bbb09fc \
--cores="$JOBS" \
--keep-failed \
--fallback \
${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \
${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \
-- "$@"
}
################
# Set common variables
################
VERSION="${FORCE_VERSION:-$(git_head_version)}"
DISTNAME="${DISTNAME:-bitcoin-${VERSION}}"
version_base_prefix="${PWD}/guix-build-"
VERSION_BASE="${version_base_prefix}${VERSION}" # TOP
DISTSRC_BASE="${DISTSRC_BASE:-${VERSION_BASE}}"
OUTDIR_BASE="${OUTDIR_BASE:-${VERSION_BASE}/output}"
var_base_basename="var"
VAR_BASE="${VAR_BASE:-${VERSION_BASE}/${var_base_basename}}"
profiles_base_basename="profiles"
PROFILES_BASE="${PROFILES_BASE:-${VAR_BASE}/${profiles_base_basename}}"
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-binaries/test.py | #!/usr/bin/env python3
import json
import sys
import subprocess
from pathlib import Path
def main():
"""Tests ordered roughly from faster to slower."""
expect_code(run_verify("", "pub", '0.32'), 4, "Nonexistent version should fail")
expect_code(run_verify("", "pub", '0.32.awefa.12f9h'), 11, "Malformed version should fail")
expect_code(run_verify('--min-good-sigs 20', "pub", "22.0"), 9, "--min-good-sigs 20 should fail")
print("- testing verification (22.0)", flush=True)
_220 = run_verify("--json", "pub", "22.0")
try:
result = json.loads(_220.stdout.decode())
except Exception:
print("failed on 22.0 --json:")
print_process_failure(_220)
raise
expect_code(_220, 0, "22.0 should succeed")
v = result['verified_binaries']
assert result['good_trusted_sigs']
assert v['bitcoin-22.0-aarch64-linux-gnu.tar.gz'] == 'ac718fed08570a81b3587587872ad85a25173afa5f9fbbd0c03ba4d1714cfa3e'
assert v['bitcoin-22.0-osx64.tar.gz'] == '2744d199c3343b2d94faffdfb2c94d75a630ba27301a70e47b0ad30a7e0155e9'
assert v['bitcoin-22.0-x86_64-linux-gnu.tar.gz'] == '59ebd25dd82a51638b7a6bb914586201e67db67b919b2a1ff08925a7936d1b16'
def run_verify(global_args: str, command: str, command_args: str) -> subprocess.CompletedProcess:
maybe_here = Path.cwd() / 'verify.py'
path = maybe_here if maybe_here.exists() else Path.cwd() / 'contrib' / 'verify-binaries' / 'verify.py'
if command == "pub":
command += " --cleanup"
return subprocess.run(
f"{path} {global_args} {command} {command_args}",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
def expect_code(completed: subprocess.CompletedProcess, expected_code: int, msg: str):
if completed.returncode != expected_code:
print(f"{msg!r} failed: got code {completed.returncode}, expected {expected_code}")
print_process_failure(completed)
sys.exit(1)
else:
print(f"✓ {msg!r} passed")
def print_process_failure(completed: subprocess.CompletedProcess):
print(f"stdout:\n{completed.stdout.decode()}")
print(f"stderr:\n{completed.stderr.decode()}")
if __name__ == '__main__':
main()
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-binaries/README.md | ### Verify Binaries
#### Preparation
As of Bitcoin Core v22.0, releases are signed by a number of public keys on the basis
of the [guix.sigs repository](https://github.com/bitcoin-core/guix.sigs/). When
verifying binary downloads, you (the end user) decide which of these public keys you
trust and then use that trust model to evaluate the signature on a file that contains
hashes of the release binaries. The downloaded binaries are then hashed and compared to
the signed checksum file.
First, you have to figure out which public keys to recognize. Browse the [list of frequent
builder-keys](https://github.com/bitcoin-core/guix.sigs/tree/main/builder-keys) and
decide which of these keys you would like to trust. For each key you want to trust, you
must obtain that key for your local GPG installation.
You can obtain these keys by
- through a browser using a key server (e.g. keyserver.ubuntu.com),
- manually using the `gpg --keyserver <url> --recv-keys <key>` command, or
- you can run the packaged `verify.py --import-keys ...` script to
have it automatically retrieve unrecognized keys.
#### Usage
This script attempts to download the checksum file (`SHA256SUMS`) and corresponding
signature file `SHA256SUMS.asc` from https://bitcoincore.org and https://bitcoin.org.
It first checks if the checksum file is valid based upon a plurality of signatures, and
then downloads the release files specified in the checksum file, and checks if the
hashes of the release files are as expected.
If we encounter pubkeys in the signature file that we do not recognize, the script
can prompt the user as to whether they'd like to download the pubkeys. To enable
this behavior, use the `--import-keys` flag.
The script returns 0 if everything passes the checks. It returns 1 if either the
signature check or the hash check doesn't pass. An exit code of >2 indicates an error.
See the `Config` object for various options.
#### Examples
Validate releases with default settings:
```sh
./contrib/verify-binaries/verify.py pub 22.0
./contrib/verify-binaries/verify.py pub 22.0-rc3
```
Get JSON output and don't prompt for user input (no auto key import):
```sh
./contrib/verify-binaries/verify.py --json pub 22.0-x86
```
Rely only on local GPG state and manually specified keys, while requiring a
threshold of at least 10 trusted signatures:
```sh
./contrib/verify-binaries/verify.py \
--trusted-keys 74E2DEF5D77260B98BC19438099BAD163C70FBFA,9D3CC86A72F8494342EA5FD10A41BDC3F4FAFF1C \
--min-good-sigs 10 pub 22.0-x86
```
If you only want to download the binaries for a certain platform, add the corresponding suffix, e.g.:
```sh
./contrib/verify-binaries/verify.py pub 24.0.1-darwin
./contrib/verify-binaries/verify.py pub 23.1-rc1-win64
```
If you do not want to keep the downloaded binaries, specify the cleanup option.
```sh
./contrib/verify-binaries/verify.py pub --cleanup 22.0
```
Use the bin subcommand to verify all files listed in a local checksum file
```sh
./contrib/verify-binaries/verify.py bin SHA256SUMS
```
Verify only a subset of the files listed in a local checksum file
```sh
./contrib/verify-binaries/verify.py bin ~/Downloads/SHA256SUMS \
~/Downloads/bitcoin-24.0.1-x86_64-linux-gnu.tar.gz \
~/Downloads/bitcoin-24.0.1-arm-linux-gnueabihf.tar.gz
```
| 0 |
bitcoin/contrib | bitcoin/contrib/verify-binaries/verify.py | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Script for verifying Bitcoin Core release binaries.
This script attempts to download the sum file SHA256SUMS and corresponding
signature file SHA256SUMS.asc from bitcoincore.org and bitcoin.org and
compares them.
The sum-signature file is signed by a number of builder keys. This script
ensures that there is a minimum threshold of signatures from pubkeys that
we trust. This trust is articulated on the basis of configuration options
here, but by default is based upon local GPG trust settings.
The builder keys are available in the guix.sigs repo:
https://github.com/bitcoin-core/guix.sigs/tree/main/builder-keys
If a minimum good, trusted signature threshold is met on the sum file, we then
download the files specified in SHA256SUMS, and check if the hashes of these
files match those that are specified. The script returns 0 if everything passes
the checks. It returns 1 if either the signature check or the hash check
doesn't pass. If an error occurs the return value is >= 2.
Logging output goes to stderr and final binary verification data goes to stdout.
JSON output can by obtained by setting env BINVERIFY_JSON=1.
"""
import argparse
import difflib
import json
import logging
import os
import subprocess
import typing as t
import re
import sys
import shutil
import tempfile
import textwrap
import urllib.request
import urllib.error
import enum
from hashlib import sha256
from pathlib import PurePath, Path
# The primary host; this will fail if we can't retrieve files from here.
HOST1 = "https://bitcoincore.org"
HOST2 = "https://bitcoin.org"
VERSIONPREFIX = "bitcoin-core-"
SUMS_FILENAME = 'SHA256SUMS'
SIGNATUREFILENAME = f"{SUMS_FILENAME}.asc"
class ReturnCode(enum.IntEnum):
SUCCESS = 0
INTEGRITY_FAILURE = 1
FILE_GET_FAILED = 4
FILE_MISSING_FROM_ONE_HOST = 5
FILES_NOT_EQUAL = 6
NO_BINARIES_MATCH = 7
NOT_ENOUGH_GOOD_SIGS = 9
BINARY_DOWNLOAD_FAILED = 10
BAD_VERSION = 11
def set_up_logger(is_verbose: bool = True) -> logging.Logger:
"""Set up a logger that writes to stderr."""
log = logging.getLogger(__name__)
log.setLevel(logging.INFO if is_verbose else logging.WARNING)
console = logging.StreamHandler(sys.stderr) # log to stderr
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
console.setFormatter(formatter)
log.addHandler(console)
return log
log = set_up_logger()
def indent(output: str) -> str:
return textwrap.indent(output, ' ')
def bool_from_env(key, default=False) -> bool:
if key not in os.environ:
return default
raw = os.environ[key]
if raw.lower() in ('1', 'true'):
return True
elif raw.lower() in ('0', 'false'):
return False
raise ValueError(f"Unrecognized environment value {key}={raw!r}")
VERSION_FORMAT = "<major>.<minor>[.<patch>][-rc[0-9]][-platform]"
VERSION_EXAMPLE = "22.0-x86_64 or 23.1-rc1-darwin"
def parse_version_string(version_str):
parts = version_str.split('-')
version_base = parts[0]
version_rc = ""
version_os = ""
if len(parts) == 2: # "<version>-rcN" or "version-platform"
if "rc" in parts[1]:
version_rc = parts[1]
else:
version_os = parts[1]
elif len(parts) == 3: # "<version>-rcN-platform"
version_rc = parts[1]
version_os = parts[2]
return version_base, version_rc, version_os
def download_with_wget(remote_file, local_file):
result = subprocess.run(['wget', '-O', local_file, remote_file],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return result.returncode == 0, result.stdout.decode().rstrip()
def download_lines_with_urllib(url) -> tuple[bool, list[str]]:
"""Get (success, text lines of a file) over HTTP."""
try:
return (True, [
line.strip().decode() for line in urllib.request.urlopen(url).readlines()])
except urllib.error.HTTPError as e:
log.warning(f"HTTP request to {url} failed (HTTPError): {e}")
except Exception as e:
log.warning(f"HTTP request to {url} failed ({e})")
return (False, [])
def verify_with_gpg(
filename,
signature_filename,
output_filename: t.Optional[str] = None
) -> tuple[int, str]:
with tempfile.NamedTemporaryFile() as status_file:
args = [
'gpg', '--yes', '--verify', '--verify-options', 'show-primary-uid-only', "--status-file", status_file.name,
'--output', output_filename if output_filename else '', signature_filename, filename]
env = dict(os.environ, LANGUAGE='en')
result = subprocess.run(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env)
gpg_data = status_file.read().decode().rstrip()
log.debug(f'Result from GPG ({result.returncode}): {result.stdout.decode()}')
log.debug(f"{gpg_data}")
return result.returncode, gpg_data
def remove_files(filenames):
for filename in filenames:
os.remove(filename)
class SigData:
"""GPG signature data as parsed from GPG stdout."""
def __init__(self):
self.key = None
self.name = ""
self.trusted = False
self.status = ""
def __bool__(self):
return self.key is not None
def __repr__(self):
return (
"SigData(%r, %r, trusted=%s, status=%r)" %
(self.key, self.name, self.trusted, self.status))
def parse_gpg_result(
output: list[str]
) -> tuple[list[SigData], list[SigData], list[SigData]]:
"""Returns good, unknown, and bad signatures from GPG stdout."""
good_sigs: list[SigData] = []
unknown_sigs: list[SigData] = []
bad_sigs: list[SigData] = []
total_resolved_sigs = 0
# Ensure that all lines we match on include a prefix that prevents malicious input
# from fooling the parser.
def line_begins_with(patt: str, line: str) -> t.Optional[re.Match]:
return re.match(r'^(\[GNUPG:\])\s+' + patt, line)
curr_sigs = unknown_sigs
curr_sigdata = SigData()
for line in output:
if line_begins_with(r"NEWSIG(?:\s|$)", line):
total_resolved_sigs += 1
if curr_sigdata:
curr_sigs.append(curr_sigdata)
curr_sigdata = SigData()
newsig_split = line.split()
if len(newsig_split) == 3:
curr_sigdata.name = newsig_split[2]
elif line_begins_with(r"GOODSIG(?:\s|$)", line):
curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4]
curr_sigs = good_sigs
elif line_begins_with(r"EXPKEYSIG(?:\s|$)", line):
curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4]
curr_sigs = good_sigs
curr_sigdata.status = "expired"
elif line_begins_with(r"REVKEYSIG(?:\s|$)", line):
curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4]
curr_sigs = good_sigs
curr_sigdata.status = "revoked"
elif line_begins_with(r"BADSIG(?:\s|$)", line):
curr_sigdata.key, curr_sigdata.name = line.split(maxsplit=3)[2:4]
curr_sigs = bad_sigs
elif line_begins_with(r"ERRSIG(?:\s|$)", line):
curr_sigdata.key, _, _, _, _, _ = line.split()[2:8]
curr_sigs = unknown_sigs
elif line_begins_with(r"TRUST_(UNDEFINED|NEVER)(?:\s|$)", line):
curr_sigdata.trusted = False
elif line_begins_with(r"TRUST_(MARGINAL|FULLY|ULTIMATE)(?:\s|$)", line):
curr_sigdata.trusted = True
# The last one won't have been added, so add it now
assert curr_sigdata
curr_sigs.append(curr_sigdata)
all_found = len(good_sigs + bad_sigs + unknown_sigs)
if all_found != total_resolved_sigs:
raise RuntimeError(
f"failed to evaluate all signatures: found {all_found} "
f"but expected {total_resolved_sigs}")
return (good_sigs, unknown_sigs, bad_sigs)
def files_are_equal(filename1, filename2):
with open(filename1, 'rb') as file1:
contents1 = file1.read()
with open(filename2, 'rb') as file2:
contents2 = file2.read()
eq = contents1 == contents2
if not eq:
with open(filename1, 'r', encoding='utf-8') as f1, \
open(filename2, 'r', encoding='utf-8') as f2:
f1lines = f1.readlines()
f2lines = f2.readlines()
diff = indent(
''.join(difflib.unified_diff(f1lines, f2lines)))
log.warning(f"found diff in files ({filename1}, {filename2}):\n{diff}\n")
return eq
def get_files_from_hosts_and_compare(
hosts: list[str], path: str, filename: str, require_all: bool = False
) -> ReturnCode:
"""
Retrieve the same file from a number of hosts and ensure they have the same contents.
The first host given will be treated as the "primary" host, and is required to succeed.
Args:
filename: for writing the file locally.
"""
assert len(hosts) > 1
primary_host = hosts[0]
other_hosts = hosts[1:]
got_files = []
def join_url(host: str) -> str:
return host.rstrip('/') + '/' + path.lstrip('/')
url = join_url(primary_host)
success, output = download_with_wget(url, filename)
if not success:
log.error(
f"couldn't fetch file ({url}). "
"Have you specified the version number in the following format?\n"
f"{VERSION_FORMAT} "
f"(example: {VERSION_EXAMPLE})\n"
f"wget output:\n{indent(output)}")
return ReturnCode.FILE_GET_FAILED
else:
log.info(f"got file {url} as {filename}")
got_files.append(filename)
for i, host in enumerate(other_hosts):
url = join_url(host)
fname = filename + f'.{i + 2}'
success, output = download_with_wget(url, fname)
if require_all and not success:
log.error(
f"{host} failed to provide file ({url}), but {primary_host} did?\n"
f"wget output:\n{indent(output)}")
return ReturnCode.FILE_MISSING_FROM_ONE_HOST
elif not success:
log.warning(
f"{host} failed to provide file ({url}). "
f"Continuing based solely upon {primary_host}.")
else:
log.info(f"got file {url} as {fname}")
got_files.append(fname)
for i, got_file in enumerate(got_files):
if got_file == got_files[-1]:
break # break on last file, nothing after it to compare to
compare_to = got_files[i + 1]
if not files_are_equal(got_file, compare_to):
log.error(f"files not equal: {got_file} and {compare_to}")
return ReturnCode.FILES_NOT_EQUAL
return ReturnCode.SUCCESS
def check_multisig(sums_file: str, sigfilename: str, args: argparse.Namespace) -> tuple[int, str, list[SigData], list[SigData], list[SigData]]:
# check signature
#
# We don't write output to a file because this command will almost certainly
# fail with GPG exit code '2' (and so not writing to --output) because of the
# likely presence of multiple untrusted signatures.
retval, output = verify_with_gpg(sums_file, sigfilename)
if args.verbose:
log.info(f"gpg output:\n{indent(output)}")
good, unknown, bad = parse_gpg_result(output.splitlines())
if unknown and args.import_keys:
# Retrieve unknown keys and then try GPG again.
for unsig in unknown:
if prompt_yn(f" ? Retrieve key {unsig.key} ({unsig.name})? (y/N) "):
ran = subprocess.run(
["gpg", "--keyserver", args.keyserver, "--recv-keys", unsig.key])
if ran.returncode != 0:
log.warning(f"failed to retrieve key {unsig.key}")
# Reparse the GPG output now that we have more keys
retval, output = verify_with_gpg(sums_file, sigfilename)
good, unknown, bad = parse_gpg_result(output.splitlines())
return retval, output, good, unknown, bad
def prompt_yn(prompt) -> bool:
"""Return true if the user inputs 'y'."""
got = ''
while got not in ['y', 'n']:
got = input(prompt).lower()
return got == 'y'
def verify_shasums_signature(
signature_file_path: str, sums_file_path: str, args: argparse.Namespace
) -> tuple[
ReturnCode, list[SigData], list[SigData], list[SigData], list[SigData]
]:
min_good_sigs = args.min_good_sigs
gpg_allowed_codes = [0, 2] # 2 is returned when untrusted signatures are present.
gpg_retval, gpg_output, good, unknown, bad = check_multisig(sums_file_path, signature_file_path, args)
if gpg_retval not in gpg_allowed_codes:
if gpg_retval == 1:
log.critical(f"Bad signature (code: {gpg_retval}).")
else:
log.critical(f"unexpected GPG exit code ({gpg_retval})")
log.error(f"gpg output:\n{indent(gpg_output)}")
return (ReturnCode.INTEGRITY_FAILURE, [], [], [], [])
# Decide which keys we trust, though not "trust" in the GPG sense, but rather
# which pubkeys convince us that this sums file is legitimate. In other words,
# which pubkeys within the Bitcoin community do we trust for the purposes of
# binary verification?
trusted_keys = set()
if args.trusted_keys:
trusted_keys |= set(args.trusted_keys.split(','))
# Tally signatures and make sure we have enough goods to fulfill
# our threshold.
good_trusted = [sig for sig in good if sig.trusted or sig.key in trusted_keys]
good_untrusted = [sig for sig in good if sig not in good_trusted]
num_trusted = len(good_trusted) + len(good_untrusted)
log.info(f"got {num_trusted} good signatures")
if num_trusted < min_good_sigs:
log.info("Maybe you need to import "
f"(`gpg --keyserver {args.keyserver} --recv-keys <key-id>`) "
"some of the following keys: ")
log.info('')
for sig in unknown:
log.info(f" {sig.key} ({sig.name})")
log.info('')
log.error(
"not enough trusted sigs to meet threshold "
f"({num_trusted} vs. {min_good_sigs})")
return (ReturnCode.NOT_ENOUGH_GOOD_SIGS, [], [], [], [])
for sig in good_trusted:
log.info(f"GOOD SIGNATURE: {sig}")
for sig in good_untrusted:
log.info(f"GOOD SIGNATURE (untrusted): {sig}")
for sig in [sig for sig in good if sig.status == 'expired']:
log.warning(f"key {sig.key} for {sig.name} is expired")
for sig in bad:
log.warning(f"BAD SIGNATURE: {sig}")
for sig in unknown:
log.warning(f"UNKNOWN SIGNATURE: {sig}")
return (ReturnCode.SUCCESS, good_trusted, good_untrusted, unknown, bad)
def parse_sums_file(sums_file_path: str, filename_filter: list[str]) -> list[list[str]]:
# extract hashes/filenames of binaries to verify from hash file;
# each line has the following format: "<hash> <binary_filename>"
with open(sums_file_path, 'r', encoding='utf8') as hash_file:
return [line.split()[:2] for line in hash_file if len(filename_filter) == 0 or any(f in line for f in filename_filter)]
def verify_binary_hashes(hashes_to_verify: list[list[str]]) -> tuple[ReturnCode, dict[str, str]]:
offending_files = []
files_to_hashes = {}
for hash_expected, binary_filename in hashes_to_verify:
with open(binary_filename, 'rb') as binary_file:
hash_calculated = sha256(binary_file.read()).hexdigest()
if hash_calculated != hash_expected:
offending_files.append(binary_filename)
else:
files_to_hashes[binary_filename] = hash_calculated
if offending_files:
joined_files = '\n'.join(offending_files)
log.critical(
"Hashes don't match.\n"
f"Offending files:\n{joined_files}")
return (ReturnCode.INTEGRITY_FAILURE, files_to_hashes)
return (ReturnCode.SUCCESS, files_to_hashes)
def verify_published_handler(args: argparse.Namespace) -> ReturnCode:
WORKINGDIR = Path(tempfile.gettempdir()) / f"bitcoin_verify_binaries.{args.version}"
def cleanup():
log.info("cleaning up files")
os.chdir(Path.home())
shutil.rmtree(WORKINGDIR)
# determine remote dir dependent on provided version string
try:
version_base, version_rc, os_filter = parse_version_string(args.version)
version_tuple = [int(i) for i in version_base.split('.')]
except Exception as e:
log.debug(e)
log.error(f"unable to parse version; expected format is {VERSION_FORMAT}")
log.error(f" e.g. {VERSION_EXAMPLE}")
return ReturnCode.BAD_VERSION
remote_dir = f"/bin/{VERSIONPREFIX}{version_base}/"
if version_rc:
remote_dir += f"test.{version_rc}/"
remote_sigs_path = remote_dir + SIGNATUREFILENAME
remote_sums_path = remote_dir + SUMS_FILENAME
# create working directory
os.makedirs(WORKINGDIR, exist_ok=True)
os.chdir(WORKINGDIR)
hosts = [HOST1, HOST2]
got_sig_status = get_files_from_hosts_and_compare(
hosts, remote_sigs_path, SIGNATUREFILENAME, args.require_all_hosts)
if got_sig_status != ReturnCode.SUCCESS:
return got_sig_status
# Multi-sig verification is available after 22.0.
if version_tuple[0] < 22:
log.error("Version too old - single sig not supported. Use a previous "
"version of this script from the repo.")
return ReturnCode.BAD_VERSION
got_sums_status = get_files_from_hosts_and_compare(
hosts, remote_sums_path, SUMS_FILENAME, args.require_all_hosts)
if got_sums_status != ReturnCode.SUCCESS:
return got_sums_status
# Verify the signature on the SHA256SUMS file
sigs_status, good_trusted, good_untrusted, unknown, bad = verify_shasums_signature(SIGNATUREFILENAME, SUMS_FILENAME, args)
if sigs_status != ReturnCode.SUCCESS:
if sigs_status == ReturnCode.INTEGRITY_FAILURE:
cleanup()
return sigs_status
# Extract hashes and filenames
hashes_to_verify = parse_sums_file(SUMS_FILENAME, [os_filter])
if not hashes_to_verify:
log.error("no files matched the platform specified")
return ReturnCode.NO_BINARIES_MATCH
# remove binaries that are known not to be hosted by bitcoincore.org
fragments_to_remove = ['-unsigned', '-debug', '-codesignatures']
for fragment in fragments_to_remove:
nobinaries = [i for i in hashes_to_verify if fragment in i[1]]
if nobinaries:
remove_str = ', '.join(i[1] for i in nobinaries)
log.info(
f"removing *{fragment} binaries ({remove_str}) from verification "
f"since {HOST1} does not host *{fragment} binaries")
hashes_to_verify = [i for i in hashes_to_verify if fragment not in i[1]]
# download binaries
for _, binary_filename in hashes_to_verify:
log.info(f"downloading {binary_filename} to {WORKINGDIR}")
success, output = download_with_wget(
HOST1 + remote_dir + binary_filename, binary_filename)
if not success:
log.error(
f"failed to download {binary_filename}\n"
f"wget output:\n{indent(output)}")
return ReturnCode.BINARY_DOWNLOAD_FAILED
# verify hashes
hashes_status, files_to_hashes = verify_binary_hashes(hashes_to_verify)
if hashes_status != ReturnCode.SUCCESS:
return hashes_status
if args.cleanup:
cleanup()
else:
log.info(f"did not clean up {WORKINGDIR}")
if args.json:
output = {
'good_trusted_sigs': [str(s) for s in good_trusted],
'good_untrusted_sigs': [str(s) for s in good_untrusted],
'unknown_sigs': [str(s) for s in unknown],
'bad_sigs': [str(s) for s in bad],
'verified_binaries': files_to_hashes,
}
print(json.dumps(output, indent=2))
else:
for filename in files_to_hashes:
print(f"VERIFIED: {filename}")
return ReturnCode.SUCCESS
def verify_binaries_handler(args: argparse.Namespace) -> ReturnCode:
binary_to_basename = {}
for file in args.binary:
binary_to_basename[PurePath(file).name] = file
sums_sig_path = None
if args.sums_sig_file:
sums_sig_path = Path(args.sums_sig_file)
else:
log.info(f"No signature file specified, assuming it is {args.sums_file}.asc")
sums_sig_path = Path(args.sums_file).with_suffix(".asc")
# Verify the signature on the SHA256SUMS file
sigs_status, good_trusted, good_untrusted, unknown, bad = verify_shasums_signature(str(sums_sig_path), args.sums_file, args)
if sigs_status != ReturnCode.SUCCESS:
return sigs_status
# Extract hashes and filenames
hashes_to_verify = parse_sums_file(args.sums_file, [k for k, n in binary_to_basename.items()])
if not hashes_to_verify:
log.error(f"No files in {args.sums_file} match the specified binaries")
return ReturnCode.NO_BINARIES_MATCH
# Make sure all files are accounted for
sums_file_path = Path(args.sums_file)
missing_files = []
files_to_hash = []
if len(binary_to_basename) > 0:
for file_hash, file in hashes_to_verify:
files_to_hash.append([file_hash, binary_to_basename[file]])
del binary_to_basename[file]
if len(binary_to_basename) > 0:
log.error(f"Not all specified binaries are in {args.sums_file}")
return ReturnCode.NO_BINARIES_MATCH
else:
log.info(f"No binaries specified, assuming all files specified in {args.sums_file} are located relatively")
for file_hash, file in hashes_to_verify:
file_path = Path(sums_file_path.parent.joinpath(file))
if file_path.exists():
files_to_hash.append([file_hash, str(file_path)])
else:
missing_files.append(file)
# verify hashes
hashes_status, files_to_hashes = verify_binary_hashes(files_to_hash)
if hashes_status != ReturnCode.SUCCESS:
return hashes_status
if args.json:
output = {
'good_trusted_sigs': [str(s) for s in good_trusted],
'good_untrusted_sigs': [str(s) for s in good_untrusted],
'unknown_sigs': [str(s) for s in unknown],
'bad_sigs': [str(s) for s in bad],
'verified_binaries': files_to_hashes,
"missing_binaries": missing_files,
}
print(json.dumps(output, indent=2))
else:
for filename in files_to_hashes:
print(f"VERIFIED: {filename}")
for filename in missing_files:
print(f"MISSING: {filename}")
return ReturnCode.SUCCESS
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-v', '--verbose', action='store_true',
default=bool_from_env('BINVERIFY_VERBOSE'),
)
parser.add_argument(
'-q', '--quiet', action='store_true',
default=bool_from_env('BINVERIFY_QUIET'),
)
parser.add_argument(
'--import-keys', action='store_true',
default=bool_from_env('BINVERIFY_IMPORTKEYS'),
help='if specified, ask to import each unknown builder key'
)
parser.add_argument(
'--min-good-sigs', type=int, action='store', nargs='?',
default=int(os.environ.get('BINVERIFY_MIN_GOOD_SIGS', 3)),
help=(
'The minimum number of good signatures to require successful termination.'),
)
parser.add_argument(
'--keyserver', action='store', nargs='?',
default=os.environ.get('BINVERIFY_KEYSERVER', 'hkps://keys.openpgp.org'),
help='which keyserver to use',
)
parser.add_argument(
'--trusted-keys', action='store', nargs='?',
default=os.environ.get('BINVERIFY_TRUSTED_KEYS', ''),
help='A list of trusted signer GPG keys, separated by commas. Not "trusted keys" in the GPG sense.',
)
parser.add_argument(
'--json', action='store_true',
default=bool_from_env('BINVERIFY_JSON'),
help='If set, output the result as JSON',
)
subparsers = parser.add_subparsers(title="Commands", required=True, dest="command")
pub_parser = subparsers.add_parser("pub", help="Verify a published release.")
pub_parser.set_defaults(func=verify_published_handler)
pub_parser.add_argument(
'version', type=str, help=(
f'version of the bitcoin release to download; of the format '
f'{VERSION_FORMAT}. Example: {VERSION_EXAMPLE}')
)
pub_parser.add_argument(
'--cleanup', action='store_true',
default=bool_from_env('BINVERIFY_CLEANUP'),
help='if specified, clean up files afterwards'
)
pub_parser.add_argument(
'--require-all-hosts', action='store_true',
default=bool_from_env('BINVERIFY_REQUIRE_ALL_HOSTS'),
help=(
f'If set, require all hosts ({HOST1}, {HOST2}) to provide signatures. '
'(Sometimes bitcoin.org lags behind bitcoincore.org.)')
)
bin_parser = subparsers.add_parser("bin", help="Verify local binaries.")
bin_parser.set_defaults(func=verify_binaries_handler)
bin_parser.add_argument("--sums-sig-file", "-s", help="Path to the SHA256SUMS.asc file to verify")
bin_parser.add_argument("sums_file", help="Path to the SHA256SUMS file to verify")
bin_parser.add_argument(
"binary", nargs="*",
help="Path to a binary distribution file to verify. Can be specified multiple times for multiple files to verify."
)
args = parser.parse_args()
if args.quiet:
log.setLevel(logging.WARNING)
return args.func(args)
if __name__ == '__main__':
sys.exit(main())
| 0 |
bitcoin/contrib | bitcoin/contrib/zmq/zmq_sub.py | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Bitcoin should be started with the command line arguments:
bitcoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332 \
-zmqpubsequence=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 5):
print("This example only works with Python 3.5 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "sequence")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
topic, body, seq = await self.zmqSubSocket.recv_multipart()
sequence = "Unknown"
if len(seq) == 4:
sequence = str(struct.unpack('<I', seq)[-1])
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(body.hex())
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(body.hex())
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(body[:80].hex())
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(body.hex())
elif topic == b"sequence":
hash = body[:32].hex()
label = chr(body[32])
mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0]
print('- SEQUENCE ('+sequence+') -')
print(hash, label, mempool_sequence)
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 0 |
bitcoin | bitcoin/doc/tor.md | # TOR SUPPORT IN BITCOIN
It is possible to run Bitcoin Core as a Tor onion service, and connect to such services.
The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others may not. In particular, the Tor Browser Bundle defaults to listening on port 9150.
## Compatibility
- Starting with version 22.0, Bitcoin Core only supports Tor version 3 hidden
services (Tor v3). Tor v2 addresses are ignored by Bitcoin Core and neither
relayed nor stored.
- Tor removed v2 support beginning with version 0.4.6.
## How to see information about your Tor configuration via Bitcoin Core
There are several ways to see your local onion address in Bitcoin Core:
- in the "Local addresses" output of CLI `-netinfo`
- in the "localaddresses" output of RPC `getnetworkinfo`
- in the debug log (grep for "AddLocal"; the Tor address ends in `.onion`)
You may set the `-debug=tor` config logging option to have additional
information in the debug log about your Tor configuration.
CLI `-addrinfo` returns the number of addresses known to your node per
network. This can be useful to see how many onion peers your node knows,
e.g. for `-onlynet=onion`.
You can use the `getnodeaddresses` RPC to fetch a number of onion peers known to your node; run `bitcoin-cli help getnodeaddresses` for details.
## 1. Run Bitcoin Core behind a Tor proxy
The first step is running Bitcoin Core behind a Tor proxy. This will already anonymize all
outgoing connections, but more is possible.
-proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy
server will be used to try to reach .onion addresses as well.
You need to use -noonion or -onion=0 to explicitly disable
outbound access to onion services.
-onion=ip:port Set the proxy server to use for Tor onion services. You do not
need to set this if it's the same as -proxy. You can use -onion=0
to explicitly disable access to onion services.
------------------------------------------------------------------
Note: Only the -proxy option sets the proxy for DNS requests;
with -onion they will not route over Tor, so use -proxy if you
have privacy concerns.
------------------------------------------------------------------
-listen When using -proxy, listening is disabled by default. If you want
to manually configure an onion service (see section 3), you'll
need to enable it explicitly.
-connect=X When behind a Tor proxy, you can specify .onion addresses instead
-addnode=X of IP addresses or hostnames in these parameters. It requires
-seednode=X SOCKS5. In Tor mode, such addresses can also be exchanged with
other P2P nodes.
-onlynet=onion Make automatic outbound connections only to .onion addresses.
Inbound and manual connections are not affected by this option.
It can be specified multiple times to allow multiple networks,
e.g. onlynet=onion, onlynet=i2p, onlynet=cjdns.
In a typical situation, this suffices to run behind a Tor proxy:
./bitcoind -proxy=127.0.0.1:9050
## 2. Automatically create a Bitcoin Core onion service
Bitcoin Core makes use of Tor's control socket API to create and destroy
ephemeral onion services programmatically. This means that if Tor is running and
proper authentication has been configured, Bitcoin Core automatically creates an
onion service to listen on. The goal is to increase the number of available
onion nodes.
This feature is enabled by default if Bitcoin Core is listening (`-listen`) and
it requires a Tor connection to work. It can be explicitly disabled with
`-listenonion=0`. If it is not disabled, it can be configured using the
`-torcontrol` and `-torpassword` settings.
To see verbose Tor information in the bitcoind debug log, pass `-debug=tor`.
### Control Port
You may need to set up the Tor Control Port. On Linux distributions there may be
some or all of the following settings in `/etc/tor/torrc`, generally commented
out by default (if not, add them):
```
ControlPort 9051
CookieAuthentication 1
CookieAuthFileGroupReadable 1
DataDirectoryGroupReadable 1
```
Add or uncomment those, save, and restart Tor (usually `systemctl restart tor`
or `sudo systemctl restart tor` on most systemd-based systems, including recent
Debian and Ubuntu, or just restart the computer).
### Authentication
Connecting to Tor's control socket API requires one of two authentication
methods to be configured: cookie authentication or bitcoind's `-torpassword`
configuration option.
#### Cookie authentication
For cookie authentication, the user running bitcoind must have read access to
the `CookieAuthFile` specified in the Tor configuration. In some cases this is
preconfigured and the creation of an onion service is automatic. Don't forget to
use the `-debug=tor` bitcoind configuration option to enable Tor debug logging.
If a permissions problem is seen in the debug log, e.g. `tor: Authentication
cookie /run/tor/control.authcookie could not be opened (check permissions)`, it
can be resolved by adding both the user running Tor and the user running
bitcoind to the same Tor group and setting permissions appropriately.
On Debian-derived systems, the Tor group will likely be `debian-tor` and one way
to verify could be to list the groups and grep for a "tor" group name:
```
getent group | cut -d: -f1 | grep -i tor
```
You can also check the group of the cookie file. On most Linux systems, the Tor
auth cookie will usually be `/run/tor/control.authcookie`:
```
TORGROUP=$(stat -c '%G' /run/tor/control.authcookie)
```
Once you have determined the `${TORGROUP}` and selected the `${USER}` that will
run bitcoind, run this as root:
```
usermod -a -G ${TORGROUP} ${USER}
```
Then restart the computer (or log out) and log in as the `${USER}` that will run
bitcoind.
#### `torpassword` authentication
For the `-torpassword=password` option, the password is the clear text form that
was used when generating the hashed password for the `HashedControlPassword`
option in the Tor configuration file.
The hashed password can be obtained with the command `tor --hash-password
password` (refer to the [Tor Dev
Manual](https://2019.www.torproject.org/docs/tor-manual.html.en) for more
details).
## 3. Manually create a Bitcoin Core onion service
You can also manually configure your node to be reachable from the Tor network.
Add these lines to your `/etc/tor/torrc` (or equivalent config file):
HiddenServiceDir /var/lib/tor/bitcoin-service/
HiddenServicePort 8333 127.0.0.1:8334
The directory can be different of course, but virtual port numbers should be equal to
your bitcoind's P2P listen port (8333 by default), and target addresses and ports
should be equal to binding address and port for inbound Tor connections (127.0.0.1:8334 by default).
-externalip=X You can tell bitcoin about its publicly reachable addresses using
this option, and this can be an onion address. Given the above
configuration, you can find your onion address in
/var/lib/tor/bitcoin-service/hostname. For connections
coming from unroutable addresses (such as 127.0.0.1, where the
Tor proxy typically runs), onion addresses are given
preference for your node to advertise itself with.
You can set multiple local addresses with -externalip. The
one that will be rumoured to a particular peer is the most
compatible one and also using heuristics, e.g. the address
with the most incoming connections, etc.
-listen You'll need to enable listening for incoming connections, as this
is off by default behind a proxy.
-discover When -externalip is specified, no attempt is made to discover local
IPv4 or IPv6 addresses. If you want to run a dual stack, reachable
from both Tor and IPv4 (or IPv6), you'll need to either pass your
other addresses using -externalip, or explicitly enable -discover.
Note that both addresses of a dual-stack system may be easily
linkable using traffic analysis.
In a typical situation, where you're only reachable via Tor, this should suffice:
./bitcoind -proxy=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -listen
(obviously, replace the .onion address with your own). It should be noted that you still
listen on all devices and another node could establish a clearnet connection, when knowing
your address. To mitigate this, additionally bind the address of your Tor proxy:
./bitcoind ... -bind=127.0.0.1
If you don't care too much about hiding your node, and want to be reachable on IPv4
as well, use `discover` instead:
./bitcoind ... -discover
and open port 8333 on your firewall (or use port mapping, i.e., `-upnp` or `-natpmp`).
If you only want to use Tor to reach .onion addresses, but not use it as a proxy
for normal IPv4/IPv6 communication, use:
./bitcoind -onion=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -discover
## 4. Privacy recommendations
- Do not add anything but Bitcoin Core ports to the onion service created in section 3.
If you run a web service too, create a new onion service for that.
Otherwise it is trivial to link them, which may reduce privacy. Onion
services created automatically (as in section 2) always have only one port
open.
| 0 |
bitcoin | bitcoin/doc/offline-signing-tutorial.md | # Offline Signing Tutorial
This tutorial will describe how to use two instances of Bitcoin Core, one online and one offline, to greatly increase security by not having private keys reside on a networked device.
Maintaining an air-gap between private keys and any network connections drastically reduces the opportunity for those keys to be exfiltrated from the user.
This workflow uses [Partially Signed Bitcoin Transactions](https://github.com/bitcoin/bitcoin/blob/master/doc/psbt.md) (PSBTs) to transfer the transaction to and from the offline wallet for signing using the private keys.
> [!NOTE]
> While this tutorial demonstrates the process using `signet` network, you should omit the `-signet` flag in the provided commands when working with `mainnet`.
## Overview
In this tutorial we have two hosts, both running Bitcoin v25.0
* `offline` host which is disconnected from all networks (internet, Tor, wifi, bluetooth etc.) and does not have, or need, a copy of the blockchain.
* `online` host which is a regular online node with a synced blockchain.
We are going to first create an `offline_wallet` on the offline host. We will then create a `watch_only_wallet` on the online host using public key descriptors exported from the `offline_wallet`. Next we will receive some coins into the wallet. In order to spend these coins we'll create an unsigned PSBT using the `watch_only_wallet`, sign the PSBT using the private keys in the `offline_wallet`, and finally broadcast the signed PSBT using the online host.
### Requirements
- [jq](https://jqlang.github.io/jq/) installation - This tutorial uses jq to process certain fields from JSON RPC responses, but this convenience is optional.
### Create and Prepare the `offline_wallet`
1. On the offline machine create a wallet named `offline_wallet` secured by a wallet `passphrase`. This wallet will contain private keys and must remain unconnected to any networks at all times.
```sh
[offline]$ ./src/bitcoin-cli -signet -named createwallet \
wallet_name="offline_wallet" \
passphrase="** enter passphrase **"
{
"name": "offline_wallet"
}
```
> [!NOTE]
> The use of a passphrase is crucial to encrypt the wallet.dat file. This encryption ensures that even if an unauthorized individual gains access to the offline host, they won't be able to access the wallet's contents. Further details about securing your wallet can be found in [Managing the Wallet](https://github.com/bitcoin/bitcoin/blob/master/doc/managing-wallets.md#12-encrypting-the-wallet)
2. Export the public key-only descriptors from the offline host to a JSON file named `descriptors.json`. We use `jq` here to extract the `.descriptors` field from the full RPC response.
```sh
[offline]$ ./src/bitcoin-cli -signet -rpcwallet="offline_wallet" listdescriptors \
| jq -r '.descriptors' \
>> /path/to/descriptors.json
```
> [!NOTE]
> The `descriptors.json` file will be transferred to the online machine (e.g. using a USB flash drive) where it can be imported to create a related watch-only wallet.
### Create the online `watch_only_wallet`
1. On the online machine create a blank watch-only wallet which has private keys disabled and is named `watch_only_wallet`. This is achieved by using the `createwallet` options: `disable_private_keys=true, blank=true`.
The `watch_only_wallet` wallet will be used to track and validate incoming transactions, create unsigned PSBTs when spending coins, and broadcast signed and finalized PSBTs.
> [!NOTE]
> `disable_private_keys` indicates that the wallet should refuse to import private keys, i.e. will be a dedicated watch-only wallet.
```sh
[online]$ ./src/bitcoin-cli -signet -named createwallet \
wallet_name="watch_only_wallet" \
disable_private_keys=true
{
"name": "watch_only_wallet"
}
```
2. Import the `offline_wallet`s public key descriptors to the online `watch_only_wallet` using the `descriptors.json` file created on the offline wallet.
```sh
[online]$ ./src/bitcoin-cli -signet -rpcwallet="watch_only_wallet" importdescriptors "$(cat /path/to/descriptors.json)"
[
{
"success": true
},
{
"success": true
},
{
"success": true
},
{
"success": true
},
{
"success": true
},
{
"success": true
},
{
"success": true
},
{
"success": true
}
]
```
> [!NOTE]
> Multiple success values indicate that multiple descriptors, for different address types, have been successfully imported. This allows generating different address types on the `watch_only_wallet`.
### Fund the `offline_wallet`
At this point, it's important to understand that both the `offline_wallet` and online `watch_only_wallet` share the same public keys. As a result, they generate the same addresses. Transactions can be created using either wallet, but valid signatures can only be added by the `offline_wallet` as only it has the private keys.
1. Generate an address to receive coins. You can use _either_ the `offline_wallet` or the online `watch_only_wallet` to generate this address, as they will produce the same addresses. For the sake of this guide, we'll use the online `watch_only_wallet` to generate the address.
```sh
[online]$ ./src/bitcoin-cli -signet -rpcwallet="watch_only_wallet" getnewaddress
tb1qtu5qgc6ddhmqm5yqjvhg83qgk2t4ewajg0h6yh
```
2. Visit a faucet like https://signet.bc-2.jp and enter your address from the previous command to receive a small amount of signet coins to this address.
3. Confirm that coins were received using the online `watch_only_wallet`. Note that the transaction may take a few moments before being received on your local node, depending on its connectivity. Just re-run the command periodically until the transaction is received.
```sh
[online]$ ./src/bitcoin-cli -signet -rpcwallet="watch_only_wallet" listunspent
[
{
"txid": "0f3953dfc3eb8e753cd1633151837c5b9953992914ff32b7de08c47f1f29c762",
"vout": 1,
"address": "tb1qtu5qgc6ddhmqm5yqjvhg83qgk2t4ewajg0h6yh",
"label": "",
"scriptPubKey": "00145f2804634d6df60dd080932e83c408b2975cbbb2",
"amount": 0.01000000,
"confirmations": 4,
"spendable": true,
"solvable": true,
"desc": "wpkh([306c734f/84h/1h/0h/0/0]025932ccee7590158f7e08bb36290d135d30a0b045163da896e1cd7645ec4223a9)#xytvyr4a",
"parent_descs": [
"wpkh([306c734f/84h/1h/0h]tpubDCJnY92ib4Zu3qd6wrBXEjG436tQdA2tDiJU2iSJYjkNS1darssPWKaBfojhjUF5vMLBcxbN2r93pmFMz2zyTEZuNx9JDo9rWqoHhATW3Uz/0/*)#7mh08dkg"
],
"safe": true
}
]
```
### Create and Export an Unsigned PSBT
1. Get a destination address for the transaction. In this tutorial we'll be sending funds to the address `tb1q9k5w0nhnhyeh78snpxh0t5t7c3lxdeg3erez32`, but if you don't need the coins for further testing you could send the coins back to the faucet.
2. Create a funded but unsigned PSBT to the destination address with the online `watch_only_wallet` by using `send [{"address":amount},...]` and export the unsigned PSBT to a file `funded_psbt.txt` for easy portability to the `offline_wallet` for signing:
```sh
[online]$ ./src/bitcoin-cli -signet -rpcwallet="watch_only_wallet" send \
'{"tb1q9k5w0nhnhyeh78snpxh0t5t7c3lxdeg3erez32": 0.009}' \
| jq -r '.psbt' \
>> /path/to/funded_psbt.txt
[online]$ cat /path/to/funded_psbt.txt
cHNidP8BAHECAAAAAWLHKR9/xAjetzL/FCmZU5lbfINRMWPRPHWO68PfUzkPAQAAAAD9////AoA4AQAAAAAAFgAULajnzvO5M38eEwmu9dF+xH5m5RGs0g0AAAAAABYAFMaT0f/Wp2DCZzL6dkJ3GhWj4Y9vAAAAAAABAHECAAAAAY+dRPEBrGopkw4ugSzS9npzJDEIrE/bq1XXI0KbYnYrAQAAAAD+////ArKaXgAAAAAAFgAUwEc4LdoxSjbWo/2Ue+HS+QjwfiBAQg8AAAAAABYAFF8oBGNNbfYN0ICTLoPECLKXXLuyYW8CAAEBH0BCDwAAAAAAFgAUXygEY01t9g3QgJMug8QIspdcu7IiBgJZMszudZAVj34IuzYpDRNdMKCwRRY9qJbhzXZF7EIjqRgwbHNPVAAAgAEAAIAAAACAAAAAAAAAAAAAACICA7BlBnyAR4F2UkKuSX9MFhYCsn6j//z9i7lHDm1O0CU0GDBsc09UAACAAQAAgAAAAIABAAAAAAAAAAA=
```
> [!NOTE]
> Leaving the `input` array empty in the above `walletcreatefundedpsbt` command is permitted and will cause the wallet to automatically select appropriate inputs for the transaction.
### Decode and Analyze the Unsigned PSBT
Decode and analyze the unsigned PSBT on the `offline_wallet` using the `funded_psbt.txt` file:
```sh
[offline]$ ./src/bitcoin-cli -signet decodepsbt $(cat /path/to/funded_psbt.txt)
{
...
}
[offline]$ ./src/bitcoin-cli -signet analyzepsbt $(cat /path/to/funded_psbt.txt)
{
"inputs": [
{
"has_utxo": true,
"is_final": false,
"next": "signer",
"missing": {
"signatures": [
"5f2804634d6df60dd080932e83c408b2975cbbb2"
]
}
}
],
"estimated_vsize": 141,
"estimated_feerate": 0.00100000,
"fee": 0.00014100,
"next": "signer"
}
```
Notice that the analysis of the PSBT shows that "signatures" are missing and should be provided by the private key corresponding to the public key hash (hash160) "5f2804634d6df60dd080932e83c408b2975cbbb2"
### Process and Sign the PSBT
1. Unlock the `offline_wallet` with the Passphrase:
Use the walletpassphrase command to unlock the `offline_wallet` with the passphrase. You should specify the passphrase and a timeout (in seconds) for how long you want the wallet to remain unlocked.
```sh
[offline]$ ./src/bitcoin-cli -signet -rpcwallet="offline_wallet" walletpassphrase "** enter passphrase **" 60
```
2. Process, sign and finalize the PSBT on the `offline_wallet` using the `walletprocesspsbt` command, saving the output to a file `final_psbt.txt`.
```sh
[offline]$ ./src/bitcoin-cli -signet -rpcwallet="offline_wallet" walletprocesspsbt \
$(cat /path/to/funded_psbt.txt) \
| jq -r .hex \
>> /path/to/final_psbt.txt
```
### Broadcast the Signed and Finalized PSBT
Broadcast the funded, signed and finalized PSBT `final_psbt.txt` using `sendrawtransaction` with an online node:
```sh
[online]$ ./src/bitcoin-cli -signet sendrawtransaction $(cat /path/to/final_psbt.txt)
c2430a0e46df472b04b0ca887bbcd5c4abf7b2ce2eb71de981444a80e2b96d52
```
### Confirm Wallet Balance
Confirm the updated balance of the offline wallet using the `watch_only_wallet`.
```sh
[online]$ ./src/bitcoin-cli -signet -rpcwallet="watch_only_wallet" getbalances
{
"mine": {
"trusted": 0.00085900,
"untrusted_pending": 0.00000000,
"immature": 0.00000000
},
"lastprocessedblock": {
"hash": "0000003065c0669fff27edb4a71928cb48e5a6cfcdf06f491a83fd86822d18a6",
"height": 159592
}
}
```
You can also show transactions related to the wallet using `listtransactions`
```sh
[online]$ ./src/bitcoin-cli -signet -rpcwallet="watch_only_wallet" listtransactions
{
...
}
``` | 0 |
bitcoin | bitcoin/doc/bitcoin-conf.md | # `bitcoin.conf` Configuration File
The configuration file is used by `bitcoind`, `bitcoin-qt` and `bitcoin-cli`.
All command-line options (except for `-?`, `-help`, `-version` and `-conf`) may be specified in a configuration file, and all configuration file options (except for `includeconf`) may also be specified on the command line. Command-line options override values set in the configuration file and configuration file options override values set in the GUI.
Changes to the configuration file while `bitcoind` or `bitcoin-qt` is running only take effect after restarting.
Users should never make any configuration changes which they do not understand. Furthermore, users should always be wary of accepting any configuration changes provided to them by another source (even if they believe that they do understand them).
## Configuration File Format
The configuration file is a plain text file and consists of `option=value` entries, one per line. Leading and trailing whitespaces are removed.
In contrast to the command-line usage:
- an option must be specified without leading `-`;
- a value of the given option is mandatory; e.g., `testnet=1` (for chain selection options), `noconnect=1` (for negated options).
### Blank lines
Blank lines are allowed and ignored by the parser.
### Comments
A comment starts with a number sign (`#`) and extends to the end of the line. All comments are ignored by the parser.
Comments may appear in two ways:
- on their own on an otherwise empty line (_preferable_);
- after an `option=value` entry.
### Network specific options
Network specific options can be:
- placed into sections with headers `[main]` (not `[mainnet]`), `[test]` (not `[testnet]`), `[signet]` or `[regtest]`;
- prefixed with a chain name; e.g., `regtest.maxmempool=100`.
Network specific options take precedence over non-network specific options.
If multiple values for the same option are found with the same precedence, the
first one is generally chosen.
This means that given the following configuration, `regtest.rpcport` is set to `3000`:
```
regtest=1
rpcport=2000
regtest.rpcport=3000
[regtest]
rpcport=4000
```
## Configuration File Path
The configuration file is not automatically created; you can create it using your favorite text editor. By default, the configuration file name is `bitcoin.conf` and it is located in the Bitcoin data directory, but both the Bitcoin data directory and the configuration file path may be changed using the `-datadir` and `-conf` command-line options.
The `includeconf=<file>` option in the `bitcoin.conf` file can be used to include additional configuration files.
### Default configuration file locations
Operating System | Data Directory | Example Path
-- | -- | --
Windows | `%APPDATA%\Bitcoin\` | `C:\Users\username\AppData\Roaming\Bitcoin\bitcoin.conf`
Linux | `$HOME/.bitcoin/` | `/home/username/.bitcoin/bitcoin.conf`
macOS | `$HOME/Library/Application Support/Bitcoin/` | `/Users/username/Library/Application Support/Bitcoin/bitcoin.conf`
An example configuration file can be generated by [contrib/devtools/gen-bitcoin-conf.sh](../contrib/devtools/gen-bitcoin-conf.sh).
Run this script after compiling to generate an up-to-date configuration file.
The output is placed under `share/examples/bitcoin.conf`.
To use the generated configuration file, copy the example file into your data directory and edit it there, like so:
```
# example copy command for linux user
cp share/examples/bitcoin.conf ~/.bitcoin
```
| 0 |
bitcoin | bitcoin/doc/release-notes-empty-template.md | *The release notes draft is a temporary file that can be added to by anyone. See
[/doc/developer-notes.md#release-notes](/doc/developer-notes.md#release-notes)
for the process.*
*version* Release Notes Draft
===============================
Bitcoin Core version *version* is now available from:
<https://bitcoincore.org/bin/bitcoin-core-*version*/>
This release includes new features, various bug fixes and performance
improvements, as well as updated translations.
Please report bugs using the issue tracker at GitHub:
<https://github.com/bitcoin/bitcoin/issues>
To receive security and update notifications, please subscribe to:
<https://bitcoincore.org/en/list/announcements/join/>
How to Upgrade
==============
If you are running an older version, shut it down. Wait until it has completely
shut down (which might take a few minutes in some cases), then run the
installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on macOS)
or `bitcoind`/`bitcoin-qt` (on Linux).
Upgrading directly from a version of Bitcoin Core that has reached its EOL is
possible, but it might take some time if the data directory needs to be migrated. Old
wallet versions of Bitcoin Core are generally supported.
Compatibility
==============
Bitcoin Core is supported and extensively tested on operating systems
using the Linux Kernel 3.17+, macOS 11.0+, and Windows 7 and newer. Bitcoin
Core should also work on most other Unix-like systems but is not as
frequently tested on them. It is not recommended to use Bitcoin Core on
unsupported systems.
Notable changes
===============
P2P and network changes
-----------------------
Updated RPCs
------------
Changes to wallet related RPCs can be found in the Wallet section below.
New RPCs
--------
Build System
------------
Updated settings
----------------
Changes to GUI or wallet related settings can be found in the GUI or Wallet section below.
New settings
------------
Tools and Utilities
-------------------
Wallet
------
GUI changes
-----------
Low-level changes
=================
RPC
---
Tests
-----
*version* change log
====================
Credits
=======
Thanks to everyone who directly contributed to this release:
As well as to everyone that helped with translations on
[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
| 0 |
bitcoin | bitcoin/doc/descriptors.md | # Support for Output Descriptors in Bitcoin Core
Since Bitcoin Core v0.17, there is support for Output Descriptors. This is a
simple language which can be used to describe collections of output scripts.
Supporting RPCs are:
- `scantxoutset` takes as input descriptors to scan for, and also reports
specialized descriptors for the matching UTXOs.
- `getdescriptorinfo` analyzes a descriptor, and reports a canonicalized version
with checksum added.
- `deriveaddresses` takes as input a descriptor and computes the corresponding
addresses.
- `listunspent` outputs a specialized descriptor for the reported unspent outputs.
- `getaddressinfo` outputs a descriptor for solvable addresses (since v0.18).
- `importmulti` takes as input descriptors to import into a legacy wallet
(since v0.18).
- `generatetodescriptor` takes as input a descriptor and generates coins to it
(`regtest` only, since v0.19).
- `utxoupdatepsbt` takes as input descriptors to add information to the psbt
(since v0.19).
- `createmultisig` and `addmultisigaddress` return descriptors as well (since v0.20).
- `importdescriptors` takes as input descriptors to import into a descriptor wallet
(since v0.21).
- `listdescriptors` outputs descriptors imported into a descriptor wallet (since v22).
- `scanblocks` takes as input descriptors to scan for in blocks and returns the
relevant blockhashes (since v25).
This document describes the language. For the specifics on usage, see the RPC
documentation for the functions mentioned above.
## Features
Output descriptors currently support:
- Pay-to-pubkey scripts (P2PK), through the `pk` function.
- Pay-to-pubkey-hash scripts (P2PKH), through the `pkh` function.
- Pay-to-witness-pubkey-hash scripts (P2WPKH), through the `wpkh` function.
- Pay-to-script-hash scripts (P2SH), through the `sh` function.
- Pay-to-witness-script-hash scripts (P2WSH), through the `wsh` function.
- Pay-to-taproot outputs (P2TR), through the `tr` function.
- Multisig scripts, through the `multi` function.
- Multisig scripts where the public keys are sorted lexicographically, through the `sortedmulti` function.
- Multisig scripts inside taproot script trees, through the `multi_a` (and `sortedmulti_a`) function.
- Any type of supported address through the `addr` function.
- Raw hex scripts through the `raw` function.
- Public keys (compressed and uncompressed) in hex notation, or BIP32 extended pubkeys with derivation paths.
## Examples
- `pk(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)` describes a P2PK output with the specified public key.
- `pkh(02c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5)` describes a P2PKH output with the specified public key.
- `wpkh(02f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9)` describes a P2WPKH output with the specified public key.
- `sh(wpkh(03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556))` describes a P2SH-P2WPKH output with the specified public key.
- `combo(0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798)` describes any P2PK, P2PKH, P2WPKH, or P2SH-P2WPKH output with the specified public key.
- `sh(wsh(pkh(02e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13)))` describes an (overly complicated) P2SH-P2WSH-P2PKH output with the specified public key.
- `multi(1,022f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,025cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc)` describes a bare *1-of-2* multisig output with keys in the specified order.
- `sh(multi(2,022f01e5e15cca351daff3843fb70f3c2f0a1bdd05e5af888a67784ef3e10a2a01,03acd484e2f0c7f65309ad178a9f559abde09796974c57e714c35f110dfc27ccbe))` describes a P2SH *2-of-2* multisig output with keys in the specified order.
- `sh(sortedmulti(2,03acd484e2f0c7f65309ad178a9f559abde09796974c57e714c35f110dfc27ccbe,022f01e5e15cca351daff3843fb70f3c2f0a1bdd05e5af888a67784ef3e10a2a01))` describes a P2SH *2-of-2* multisig output with keys sorted lexicographically in the resulting redeemScript.
- `wsh(multi(2,03a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7,03774ae7f858a9411e5ef4246b70c65aac5649980be5c17891bbec17895da008cb,03d01115d548e7561b15c38f004d734633687cf4419620095bc5b0f47070afe85a))` describes a P2WSH *2-of-3* multisig output with keys in the specified order.
- `sh(wsh(multi(1,03f28773c2d975288bc7d1d205c3748651b075fbc6610e58cddeeddf8f19405aa8,03499fdf9e895e719cfd64e67f07d38e3226aa7b63678949e6e49b241a60e823e4,02d7924d4f7d43ea965a465ae3095ff41131e5946f3c85f79e44adbcf8e27e080e)))` describes a P2SH-P2WSH *1-of-3* multisig output with keys in the specified order.
- `pk(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8)` describes a P2PK output with the public key of the specified xpub.
- `pkh(xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw/1/2)` describes a P2PKH output with child key *1/2* of the specified xpub.
- `pkh([d34db33f/44'/0'/0']xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/1/*)` describes a set of P2PKH outputs, but additionally specifies that the specified xpub is a child of a master with fingerprint `d34db33f`, and derived using path `44'/0'/0'`.
- `wsh(multi(1,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1/0/*,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/0/0/*))` describes a set of *1-of-2* P2WSH multisig outputs where the first multisig key is the *1/0/`i`* child of the first specified xpub and the second multisig key is the *0/0/`i`* child of the second specified xpub, and `i` is any number in a configurable range (`0-1000` by default).
- `wsh(sortedmulti(1,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1/0/*,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/0/0/*))` describes a set of *1-of-2* P2WSH multisig outputs where one multisig key is the *1/0/`i`* child of the first specified xpub and the other multisig key is the *0/0/`i`* child of the second specified xpub, and `i` is any number in a configurable range (`0-1000` by default). The order of public keys in the resulting witnessScripts is determined by the lexicographic order of the public keys at that index.
- `tr(c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5,{pk(fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556),pk(e493dbf1c10d80f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd13)})` describes a P2TR output with the `c6...` x-only pubkey as internal key, and two script paths.
- `tr(c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5,sortedmulti_a(2,2f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4,5cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc))` describes a P2TR output with the `c6...` x-only pubkey as internal key, and a single `multi_a` script that needs 2 signatures with 2 specified x-only keys, which will be sorted lexicographically.
## Reference
Descriptors consist of several types of expressions. The top level expression is either a `SCRIPT`, or `SCRIPT#CHECKSUM` where `CHECKSUM` is an 8-character alphanumeric descriptor checksum.
`SCRIPT` expressions:
- `sh(SCRIPT)` (top level only): P2SH embed the argument.
- `wsh(SCRIPT)` (top level or inside `sh` only): P2WSH embed the argument.
- `pk(KEY)` (anywhere): P2PK output for the given public key.
- `pkh(KEY)` (not inside `tr`): P2PKH output for the given public key (use `addr` if you only know the pubkey hash).
- `wpkh(KEY)` (top level or inside `sh` only): P2WPKH output for the given compressed pubkey.
- `combo(KEY)` (top level only): an alias for the collection of `pk(KEY)` and `pkh(KEY)`. If the key is compressed, it also includes `wpkh(KEY)` and `sh(wpkh(KEY))`.
- `multi(k,KEY_1,KEY_2,...,KEY_n)` (not inside `tr`): k-of-n multisig script using OP_CHECKMULTISIG.
- `sortedmulti(k,KEY_1,KEY_2,...,KEY_n)` (not inside `tr`): k-of-n multisig script with keys sorted lexicographically in the resulting script.
- `multi_a(k,KEY_1,KEY_2,...,KEY_N)` (only inside `tr`): k-of-n multisig script using OP_CHECKSIG, OP_CHECKSIGADD, and OP_NUMEQUAL.
- `sortedmulti_a(k,KEY_1,KEY_2,...,KEY_N)` (only inside `tr`): similar to `multi_a`, but the (x-only) public keys in it will be sorted lexicographically.
- `tr(KEY)` or `tr(KEY,TREE)` (top level only): P2TR output with the specified key as internal key, and optionally a tree of script paths.
- `addr(ADDR)` (top level only): the script which ADDR expands to.
- `raw(HEX)` (top level only): the script whose hex encoding is HEX.
- `rawtr(KEY)` (top level only): P2TR output with the specified key as output key. NOTE: while it's possible to use this to construct wallets, it has several downsides, like being unable to prove no hidden script path exists. Use at your own risk.
`KEY` expressions:
- Optionally, key origin information, consisting of:
- An open bracket `[`
- Exactly 8 hex characters for the fingerprint of the key where the derivation starts (see BIP32 for details)
- Followed by zero or more `/NUM` or `/NUM'` path elements to indicate unhardened or hardened derivation steps between the fingerprint and the key or xpub/xprv root that follows
- A closing bracket `]`
- Followed by the actual key, which is either:
- Hex encoded public keys (either 66 characters starting with `02` or `03` for a compressed pubkey, or 130 characters starting with `04` for an uncompressed pubkey).
- Inside `wpkh` and `wsh`, only compressed public keys are permitted.
- Inside `tr` and `rawtr`, x-only pubkeys are also permitted (64 hex characters).
- [WIF](https://en.bitcoin.it/wiki/Wallet_import_format) encoded private keys may be specified instead of the corresponding public key, with the same meaning.
- `xpub` encoded extended public key or `xprv` encoded extended private key (as defined in [BIP 32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)).
- Followed by zero or more `/NUM` unhardened and `/NUM'` hardened BIP32 derivation steps.
- Optionally followed by a single `/*` or `/*'` final step to denote all (direct) unhardened or hardened children.
- The usage of hardened derivation steps requires providing the private key.
(Anywhere a `'` suffix is permitted to denote hardened derivation, the suffix `h` can be used instead.)
`TREE` expressions:
- any `SCRIPT` expression
- An open brace `{`, a `TREE` expression, a comma `,`, a `TREE` expression, and a closing brace `}`
`ADDR` expressions are any type of supported address:
- P2PKH addresses (base58, of the form `1...` for mainnet or `[nm]...` for testnet). Note that P2PKH addresses in descriptors cannot be used for P2PK outputs (use the `pk` function instead).
- P2SH addresses (base58, of the form `3...` for mainnet or `2...` for testnet, defined in [BIP 13](https://github.com/bitcoin/bips/blob/master/bip-0013.mediawiki)).
- Segwit addresses (bech32 and bech32m, of the form `bc1...` for mainnet or `tb1...` for testnet, defined in [BIP 173](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) and [BIP 350](https://github.com/bitcoin/bips/blob/master/bip-0350.mediawiki)).
## Explanation
### Single-key scripts
Many single-key constructions are used in practice, generally including
P2PK, P2PKH, P2WPKH, and P2SH-P2WPKH. Many more combinations are
imaginable, though they may not be optimal: P2SH-P2PK, P2SH-P2PKH,
P2WSH-P2PK, P2WSH-P2PKH, P2SH-P2WSH-P2PK, P2SH-P2WSH-P2PKH.
To describe these, we model these as functions. The functions `pk`
(P2PK), `pkh` (P2PKH) and `wpkh` (P2WPKH) take as input a `KEY` expression, and return the
corresponding *scriptPubKey*. The functions `sh` (P2SH) and `wsh` (P2WSH)
take as input a `SCRIPT` expression, and return the script describing P2SH and P2WSH
outputs with the input as embedded script. The names of the functions do
not contain "p2" for brevity.
### Multisig
Several pieces of software use multi-signature (multisig) scripts based
on Bitcoin's OP_CHECKMULTISIG opcode. To support these, we introduce the
`multi(k,key_1,key_2,...,key_n)` and `sortedmulti(k,key_1,key_2,...,key_n)`
functions. They represent a *k-of-n*
multisig policy, where any *k* out of the *n* provided `KEY` expressions must
sign.
Key order is significant for `multi()`. A `multi()` expression describes a multisig script
with keys in the specified order, and in a search for TXOs, it will not match
outputs with multisig scriptPubKeys that have the same keys in a different
order. Also, to prevent a combinatorial explosion of the search space, if more
than one of the `multi()` key arguments is a BIP32 wildcard path ending in `/*`
or `*'`, the `multi()` expression only matches multisig scripts with the `i`th
child key from each wildcard path in lockstep, rather than scripts with any
combination of child keys from each wildcard path.
Key order does not matter for `sortedmulti()`. `sortedmulti()` behaves in the same way
as `multi()` does but the keys are reordered in the resulting script such that they
are lexicographically ordered as described in BIP67.
#### Basic multisig example
For a good example of a basic M-of-N multisig between multiple participants using descriptor
wallets and PSBTs, as well as a signing flow, see [this functional test](/test/functional/wallet_multisig_descriptor_psbt.py).
Disclaimers: It is important to note that this example serves as a quick-start and is kept basic for readability. A downside of the approach
outlined here is that each participant must maintain (and backup) two separate wallets: a signer and the corresponding multisig.
It should also be noted that privacy best-practices are not "by default" here - participants should take care to only use the signer to sign
transactions related to the multisig. Lastly, it is not recommended to use anything other than a Bitcoin Core descriptor wallet to serve as your
signer(s). Other wallets, whether hardware or software, likely impose additional checks and safeguards to prevent users from signing transactions that
could lead to loss of funds, or are deemed security hazards. Conforming to various 3rd-party checks and verifications is not in the scope of this example.
The basic steps are:
1. Every participant generates an xpub. The most straightforward way is to create a new descriptor wallet which we will refer to as
the participant's signer wallet. Avoid reusing this wallet for any purpose other than signing transactions from the
corresponding multisig we are about to create. Hint: extract the wallet's xpubs using `listdescriptors` and pick the one from the
`pkh` descriptor since it's least likely to be accidentally reused (legacy addresses)
2. Create a watch-only descriptor wallet (blank, private keys disabled). Now the multisig is created by importing the two descriptors:
`wsh(sortedmulti(<M>,XPUB1/0/*,XPUB2/0/*,…,XPUBN/0/*))` and `wsh(sortedmulti(<M>,XPUB1/1/*,XPUB2/1/*,…,XPUBN/1/*))`
(one descriptor w/ `0` for receiving addresses and another w/ `1` for change). Every participant does this
3. A receiving address is generated for the multisig. As a check to ensure step 2 was done correctly, every participant
should verify they get the same addresses
4. Funds are sent to the resulting address
5. A sending transaction from the multisig is created using `walletcreatefundedpsbt` (anyone can initiate this). It is simple to do
this in the GUI by going to the `Send` tab in the multisig wallet and creating an unsigned transaction (PSBT)
6. At least `M` participants check the PSBT with their multisig using `decodepsbt` to verify the transaction is OK before signing it.
7. (If OK) the participant signs the PSBT with their signer wallet using `walletprocesspsbt`. It is simple to do this in the GUI by
loading the PSBT from file and signing it
8. The signed PSBTs are collected with `combinepsbt`, finalized w/ `finalizepsbt`, and then the resulting transaction is broadcasted
to the network. Note that any wallet (eg one of the signers or multisig) is capable of doing this.
9. Checks that balances are correct after the transaction has been included in a block
You may prefer a daisy chained signing flow where each participant signs the PSBT one after another until
the PSBT has been signed `M` times and is "complete." For the most part, the steps above remain the same, except (6, 7)
change slightly from signing the original PSBT in parallel to signing it in series. `combinepsbt` is not necessary with
this signing flow and the last (`m`th) signer can just broadcast the PSBT after signing. Note that a parallel signing flow may be
preferable in cases where there are more signers. This signing flow is also included in the test / Python example.
[The test](/test/functional/wallet_multisig_descriptor_psbt.py) is meant to be documentation as much as it is a functional test, so
it is kept as simple and readable as possible.
### BIP32 derived keys and chains
Most modern wallet software and hardware uses keys that are derived using
BIP32 ("HD keys"). We support these directly by permitting strings
consisting of an extended public key (commonly referred to as an *xpub*)
plus derivation path anywhere a public key is expected. The derivation
path consists of a sequence of 0 or more integers (in the range
*0..2<sup>31</sup>-1*) each optionally followed by `'` or `h`, and
separated by `/` characters. The string may optionally end with the
literal `/*` or `/*'` (or `/*h`) to refer to all unhardened or hardened
child keys in a configurable range (by default `0-1000`, inclusive).
Whenever a public key is described using a hardened derivation step, the
script cannot be computed without access to the corresponding private
key.
### Key origin identification
In order to describe scripts whose signing keys reside on another device,
it may be necessary to identify the master key and derivation path an
xpub was derived with.
For example, when following BIP44, it would be useful to describe a
change chain directly as `xpub.../44'/0'/0'/1/*` where `xpub...`
corresponds with the master key `m`. Unfortunately, since there are
hardened derivation steps that follow the xpub, this descriptor does not
let you compute scripts without access to the corresponding private keys.
Instead, it should be written as `xpub.../1/*`, where xpub corresponds to
`m/44'/0'/0'`.
When interacting with a hardware device, it may be necessary to include
the entire path from the master down. [BIP174](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki) standardizes this by
providing the master key *fingerprint* (first 32 bit of the Hash160 of
the master pubkey), plus all derivation steps. To support constructing
these, we permit providing this key origin information inside the
descriptor language, even though it does not affect the actual
scriptPubKeys it refers to.
Every public key can be prefixed by an 8-character hexadecimal
fingerprint plus optional derivation steps (hardened and unhardened)
surrounded by brackets, identifying the master and derivation path the key or xpub
that follows was derived with.
Note that the fingerprint of the parent only serves as a fast way to detect
parent and child nodes in software, and software must be willing to deal with
collisions.
### Including private keys
Often it is useful to communicate a description of scripts along with the
necessary private keys. For this reason, anywhere a public key or xpub is
supported, a private key in WIF format or xprv may be provided instead.
This is useful when private keys are necessary for hardened derivation
steps, or for dumping wallet descriptors including private key material.
### Compatibility with old wallets
In order to easily represent the sets of scripts currently supported by
existing Bitcoin Core wallets, a convenience function `combo` is
provided, which takes as input a public key, and describes a set of P2PK,
P2PKH, P2WPKH, and P2SH-P2WPKH scripts for that key. In case the key is
uncompressed, the set only includes P2PK and P2PKH scripts.
### Checksums
Descriptors can optionally be suffixed with a checksum to protect against
typos or copy-paste errors.
These checksums consist of 8 alphanumeric characters. As long as errors are
restricted to substituting characters in `0123456789()[],'/*abcdefgh@:$%{}`
for others in that set and changes in letter case, up to 4 errors will always
be detected in descriptors up to 501 characters, and up to 3 errors in longer
ones. For larger numbers of errors, or other types of errors, there is a
roughly 1 in a trillion chance of not detecting the errors.
All RPCs in Bitcoin Core will include the checksum in their output. Only
certain RPCs require checksums on input, including `deriveaddresses` and
`importmulti`. The checksum for a descriptor without one can be computed
using the `getdescriptorinfo` RPC.
| 0 |
bitcoin | bitcoin/doc/release-notes-28207.md | mempool.dat compatibility
========================
The `mempool.dat` file created by -persistmempool or the savemempool RPC will
be written in a new format, which can not be read by previous software
releases. To allow for a downgrade, a temporary setting `-persistmempoolv1` has
been added to fall back to the legacy format.
| 0 |
bitcoin | bitcoin/doc/build-osx.md | # macOS Build Guide
**Updated for MacOS [11.2](https://www.apple.com/macos/big-sur/)**
This guide describes how to build bitcoind, command-line utilities, and GUI on macOS
## Preparation
The commands in this guide should be executed in a Terminal application.
macOS comes with a built-in Terminal located in:
```
/Applications/Utilities/Terminal.app
```
### 1. Xcode Command Line Tools
The Xcode Command Line Tools are a collection of build tools for macOS.
These tools must be installed in order to build Bitcoin Core from source.
To install, run the following command from your terminal:
``` bash
xcode-select --install
```
Upon running the command, you should see a popup appear.
Click on `Install` to continue the installation process.
### 2. Homebrew Package Manager
Homebrew is a package manager for macOS that allows one to install packages from the command line easily.
While several package managers are available for macOS, this guide will focus on Homebrew as it is the most popular.
Since the examples in this guide which walk through the installation of a package will use Homebrew, it is recommended that you install it to follow along.
Otherwise, you can adapt the commands to your package manager of choice.
To install the Homebrew package manager, see: https://brew.sh
Note: If you run into issues while installing Homebrew or pulling packages, refer to [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting).
### 3. Install Required Dependencies
The first step is to download the required dependencies.
These dependencies represent the packages required to get a barebones installation up and running.
See [dependencies.md](dependencies.md) for a complete overview.
To install, run the following from your terminal:
``` bash
brew install automake libtool boost pkg-config libevent
```
### 4. Clone Bitcoin repository
`git` should already be installed by default on your system.
Now that all the required dependencies are installed, let's clone the Bitcoin Core repository to a directory.
All build scripts and commands will run from this directory.
``` bash
git clone https://github.com/bitcoin/bitcoin.git
```
### 5. Install Optional Dependencies
#### Wallet Dependencies
It is not necessary to build wallet functionality to run `bitcoind` or `bitcoin-qt`.
###### Descriptor Wallet Support
`sqlite` is required to support for descriptor wallets.
macOS ships with a useable `sqlite` package, meaning you don't need to
install anything.
###### Legacy Wallet Support
`berkeley-db@4` is only required to support for legacy wallets.
Skip if you don't intend to use legacy wallets.
``` bash
brew install berkeley-db@4
```
---
#### GUI Dependencies
###### Qt
Bitcoin Core includes a GUI built with the cross-platform Qt Framework.
To compile the GUI, we need to install `qt@5`.
Skip if you don't intend to use the GUI.
``` bash
brew install qt@5
```
Note: Building with Qt binaries downloaded from the Qt website is not officially supported.
See the notes in [#7714](https://github.com/bitcoin/bitcoin/issues/7714).
###### qrencode
The GUI can encode addresses in a QR Code. To build in QR support for the GUI, install `qrencode`.
Skip if not using the GUI or don't want QR code functionality.
``` bash
brew install qrencode
```
---
#### Port Mapping Dependencies
###### miniupnpc
miniupnpc may be used for UPnP port mapping.
Skip if you do not need this functionality.
``` bash
brew install miniupnpc
```
###### libnatpmp
libnatpmp may be used for NAT-PMP port mapping.
Skip if you do not need this functionality.
``` bash
brew install libnatpmp
```
Note: UPnP and NAT-PMP support will be compiled in and disabled by default.
Check out the [further configuration](#further-configuration) section for more information.
---
#### ZMQ Dependencies
Support for ZMQ notifications requires the following dependency.
Skip if you do not need ZMQ functionality.
``` bash
brew install zeromq
```
ZMQ is automatically compiled in and enabled if the dependency is detected.
Check out the [further configuration](#further-configuration) section for more information.
For more information on ZMQ, see: [zmq.md](zmq.md)
---
#### Test Suite Dependencies
There is an included test suite that is useful for testing code changes when developing.
To run the test suite (recommended), you will need to have Python 3 installed:
``` bash
brew install python
```
---
#### Deploy Dependencies
You can deploy a `.zip` containing the Bitcoin Core application using `make deploy`.
It is required that you have `python` installed.
## Building Bitcoin Core
### 1. Configuration
There are many ways to configure Bitcoin Core, here are a few common examples:
##### Wallet (BDB + SQlite) Support, No GUI:
If `berkeley-db@4` is installed, then legacy wallet support will be built.
If `sqlite` is installed, then descriptor wallet support will also be built.
Additionally, this explicitly disables the GUI.
``` bash
./autogen.sh
./configure --with-gui=no
```
##### Wallet (only SQlite) and GUI Support:
This explicitly enables the GUI and disables legacy wallet support.
If `qt` is not installed, this will throw an error.
If `sqlite` is installed then descriptor wallet functionality will be built.
If `sqlite` is not installed, then wallet functionality will be disabled.
``` bash
./autogen.sh
./configure --without-bdb --with-gui=yes
```
##### No Wallet or GUI
``` bash
./autogen.sh
./configure --without-wallet --with-gui=no
```
##### Further Configuration
You may want to dig deeper into the configuration options to achieve your desired behavior.
Examine the output of the following command for a full list of configuration options:
``` bash
./configure -help
```
### 2. Compile
After configuration, you are ready to compile.
Run the following in your terminal to compile Bitcoin Core:
``` bash
make # use "-j N" here for N parallel jobs
make check # Run tests if Python 3 is available
```
### 3. Deploy (optional)
You can also create a `.zip` containing the `.app` bundle by running the following command:
``` bash
make deploy
```
## Running Bitcoin Core
Bitcoin Core should now be available at `./src/bitcoind`.
If you compiled support for the GUI, it should be available at `./src/qt/bitcoin-qt`.
The first time you run `bitcoind` or `bitcoin-qt`, it will start downloading the blockchain.
This process could take many hours, or even days on slower than average systems.
By default, blockchain and wallet data files will be stored in:
``` bash
/Users/${USER}/Library/Application Support/Bitcoin/
```
Before running, you may create an empty configuration file:
```shell
mkdir -p "/Users/${USER}/Library/Application Support/Bitcoin"
touch "/Users/${USER}/Library/Application Support/Bitcoin/bitcoin.conf"
chmod 600 "/Users/${USER}/Library/Application Support/Bitcoin/bitcoin.conf"
```
You can monitor the download process by looking at the debug.log file:
```shell
tail -f $HOME/Library/Application\ Support/Bitcoin/debug.log
```
## Other commands:
```shell
./src/bitcoind -daemon # Starts the bitcoin daemon.
./src/bitcoin-cli --help # Outputs a list of command-line options.
./src/bitcoin-cli help # Outputs a list of RPC commands when the daemon is running.
./src/qt/bitcoin-qt -server # Starts the bitcoin-qt server mode, allows bitcoin-cli control
```
| 0 |
bitcoin | bitcoin/doc/build-windows.md | WINDOWS BUILD NOTES
====================
Below are some notes on how to build Bitcoin Core for Windows.
The options known to work for building Bitcoin Core on Windows are:
* On Linux, using the [Mingw-w64](https://www.mingw-w64.org/) cross compiler tool chain.
* On Windows, using [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/about) and Mingw-w64.
* On Windows, using [Microsoft Visual Studio](https://visualstudio.microsoft.com). See [README.md](/build_msvc/README.md).
Other options which may work, but which have not been extensively tested are (please contribute instructions):
* On Windows, using a POSIX compatibility layer application such as [cygwin](https://www.cygwin.com/) or [msys2](https://www.msys2.org/).
Installing Windows Subsystem for Linux
---------------------------------------
Follow the upstream installation instructions, available [here](https://learn.microsoft.com/en-us/windows/wsl/install).
Cross-compilation for Ubuntu and Windows Subsystem for Linux
------------------------------------------------------------
The steps below can be performed on Ubuntu or WSL. The depends system
will also work on other Linux distributions, however the commands for
installing the toolchain will be different.
First, install the general dependencies:
sudo apt update
sudo apt upgrade
sudo apt install build-essential libtool autotools-dev automake pkg-config bsdmainutils curl git
A host toolchain (`build-essential`) is necessary because some dependency
packages need to build host utilities that are used in the build process.
See [dependencies.md](dependencies.md) for a complete overview.
If you want to build the windows installer with `make deploy` you need [NSIS](https://nsis.sourceforge.io/Main_Page):
sudo apt install nsis
Acquire the source in the usual way:
git clone https://github.com/bitcoin/bitcoin.git
cd bitcoin
## Building for 64-bit Windows
The first step is to install the mingw-w64 cross-compilation tool chain:
```sh
sudo apt install g++-mingw-w64-x86-64-posix
```
Once the toolchain is installed the build steps are common:
Note that for WSL the Bitcoin Core source path MUST be somewhere in the default mount file system, for
example /usr/src/bitcoin, AND not under /mnt/d/. If this is not the case the dependency autoconf scripts will fail.
This means you cannot use a directory that is located directly on the host Windows file system to perform the build.
Additional WSL Note: WSL support for [launching Win32 applications](https://learn.microsoft.com/en-us/archive/blogs/wsl/windows-and-ubuntu-interoperability#launching-win32-applications-from-within-wsl)
results in `Autoconf` configure scripts being able to execute Windows Portable Executable files. This can cause
unexpected behaviour during the build, such as Win32 error dialogs for missing libraries. The recommended approach
is to temporarily disable WSL support for Win32 applications.
Build using:
PATH=$(echo "$PATH" | sed -e 's/:\/mnt.*//g') # strip out problematic Windows %PATH% imported var
sudo bash -c "echo 0 > /proc/sys/fs/binfmt_misc/status" # Disable WSL support for Win32 applications.
cd depends
make HOST=x86_64-w64-mingw32
cd ..
./autogen.sh
CONFIG_SITE=$PWD/depends/x86_64-w64-mingw32/share/config.site ./configure --prefix=/
make # use "-j N" for N parallel jobs
sudo bash -c "echo 1 > /proc/sys/fs/binfmt_misc/status" # Enable WSL support for Win32 applications.
## Depends system
For further documentation on the depends system see [README.md](../depends/README.md) in the depends directory.
Installation
-------------
After building using the Windows subsystem it can be useful to copy the compiled
executables to a directory on the Windows drive in the same directory structure
as they appear in the release `.zip` archive. This can be done in the following
way. This will install to `c:\workspace\bitcoin`, for example:
make install DESTDIR=/mnt/c/workspace/bitcoin
You can also create an installer using:
make deploy
| 0 |
bitcoin | bitcoin/doc/zmq.md | # Block and Transaction Broadcasting with ZeroMQ
[ZeroMQ](https://zeromq.org/) is a lightweight wrapper around TCP
connections, inter-process communication, and shared-memory,
providing various message-oriented semantics such as publish/subscribe,
request/reply, and push/pull.
The Bitcoin Core daemon can be configured to act as a trusted "border
router", implementing the bitcoin wire protocol and relay, making
consensus decisions, maintaining the local blockchain database,
broadcasting locally generated transactions into the network, and
providing a queryable RPC interface to interact on a polled basis for
requesting blockchain related data. However, there exists only a
limited service to notify external software of events like the arrival
of new blocks or transactions.
The ZeroMQ facility implements a notification interface through a set
of specific notifiers. Currently there are notifiers that publish
blocks and transactions. This read-only facility requires only the
connection of a corresponding ZeroMQ subscriber port in receiving
software; it is not authenticated nor is there any two-way protocol
involvement. Therefore, subscribers should validate the received data
since it may be out of date, incomplete or even invalid.
ZeroMQ sockets are self-connecting and self-healing; that is,
connections made between two endpoints will be automatically restored
after an outage, and either end may be freely started or stopped in
any order.
Because ZeroMQ is message oriented, subscribers receive transactions
and blocks all-at-once and do not need to implement any sort of
buffering or reassembly.
## Prerequisites
The ZeroMQ feature in Bitcoin Core requires the ZeroMQ API >= 4.0.0
[libzmq](https://github.com/zeromq/libzmq/releases).
For version information, see [dependencies.md](dependencies.md).
Typically, it is packaged by distributions as something like
*libzmq3-dev*. The C++ wrapper for ZeroMQ is *not* needed.
In order to run the example Python client scripts in the `contrib/zmq/`
directory, one must also install [PyZMQ](https://github.com/zeromq/pyzmq)
(generally with `pip install pyzmq`), though this is not necessary for daemon
operation.
## Enabling
By default, the ZeroMQ feature is automatically compiled in if the
necessary prerequisites are found. To disable, use --disable-zmq
during the *configure* step of building bitcoind:
$ ./configure --disable-zmq (other options)
To actually enable operation, one must set the appropriate options on
the command line or in the configuration file.
## Usage
Currently, the following notifications are supported:
-zmqpubhashtx=address
-zmqpubhashblock=address
-zmqpubrawblock=address
-zmqpubrawtx=address
-zmqpubsequence=address
The socket type is PUB and the address must be a valid ZeroMQ socket
address. The same address can be used in more than one notification.
The same notification can be specified more than once.
The option to set the PUB socket's outbound message high water mark
(SNDHWM) may be set individually for each notification:
-zmqpubhashtxhwm=n
-zmqpubhashblockhwm=n
-zmqpubrawblockhwm=n
-zmqpubrawtxhwm=n
-zmqpubsequencehwm=n
The high water mark value must be an integer greater than or equal to 0.
For instance:
$ bitcoind -zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://192.168.1.2:28332 \
-zmqpubhashblock="tcp://[::1]:28333" \
-zmqpubrawtx=ipc:///tmp/bitcoind.tx.raw \
-zmqpubhashtxhwm=10000
Each PUB notification has a topic and body, where the header
corresponds to the notification type. For instance, for the
notification `-zmqpubhashtx` the topic is `hashtx` (no null
terminator). These options can also be provided in bitcoin.conf.
The topics are:
`sequence`: the body is structured as the following based on the type of message:
<32-byte hash>C : Blockhash connected
<32-byte hash>D : Blockhash disconnected
<32-byte hash>R<8-byte LE uint> : Transactionhash removed from mempool for non-block inclusion reason
<32-byte hash>A<8-byte LE uint> : Transactionhash added mempool
Where the 8-byte uints correspond to the mempool sequence number.
`rawtx`: Notifies about all transactions, both when they are added to mempool or when a new block arrives. This means a transaction could be published multiple times. First, when it enters the mempool and then again in each block that includes it. The messages are ZMQ multipart messages with three parts. The first part is the topic (`rawtx`), the second part is the serialized transaction, and the last part is a sequence number (representing the message count to detect lost messages).
| rawtx | <serialized transaction> | <uint32 sequence number in Little Endian>
`hashtx`: Notifies about all transactions, both when they are added to mempool or when a new block arrives. This means a transaction could be published multiple times. First, when it enters the mempool and then again in each block that includes it. The messages are ZMQ multipart messages with three parts. The first part is the topic (`hashtx`), the second part is the 32-byte transaction hash, and the last part is a sequence number (representing the message count to detect lost messages).
| hashtx | <32-byte transaction hash in Little Endian> | <uint32 sequence number in Little Endian>
`rawblock`: Notifies when the chain tip is updated. When assumeutxo is in use, this notification will not be issued for historical blocks connected to the background validation chainstate. Messages are ZMQ multipart messages with three parts. The first part is the topic (`rawblock`), the second part is the serialized block, and the last part is a sequence number (representing the message count to detect lost messages).
| rawblock | <serialized block> | <uint32 sequence number in Little Endian>
`hashblock`: Notifies when the chain tip is updated. When assumeutxo is in use, this notification will not be issued for historical blocks connected to the background validation chainstate. Messages are ZMQ multipart messages with three parts. The first part is the topic (`hashblock`), the second part is the 32-byte block hash, and the last part is a sequence number (representing the message count to detect lost messages).
| hashblock | <32-byte block hash in Little Endian> | <uint32 sequence number in Little Endian>
**_NOTE:_** Note that the 32-byte hashes are in Little Endian and not in the Big Endian format that the RPC interface and block explorers use to display transaction and block hashes.
ZeroMQ endpoint specifiers for TCP (and others) are documented in the
[ZeroMQ API](http://api.zeromq.org/4-0:_start).
Client side, then, the ZeroMQ subscriber socket must have the
ZMQ_SUBSCRIBE option set to one or either of these prefixes (for
instance, just `hash`); without doing so will result in no messages
arriving. Please see [`contrib/zmq/zmq_sub.py`](/contrib/zmq/zmq_sub.py) for a working example.
The ZMQ_PUB socket's ZMQ_TCP_KEEPALIVE option is enabled. This means that
the underlying SO_KEEPALIVE option is enabled when using a TCP transport.
The effective TCP keepalive values are managed through the underlying
operating system configuration and must be configured prior to connection establishment.
For example, when running on GNU/Linux, one might use the following
to lower the keepalive setting to 10 minutes:
sudo sysctl -w net.ipv4.tcp_keepalive_time=600
Setting the keepalive values appropriately for your operating environment may
improve connectivity in situations where long-lived connections are silently
dropped by network middle boxes.
Also, the socket's ZMQ_IPV6 option is enabled to accept connections from IPv6
hosts as well. If needed, this option has to be set on the client side too.
## Remarks
From the perspective of bitcoind, the ZeroMQ socket is write-only; PUB
sockets don't even have a read function. Thus, there is no state
introduced into bitcoind directly. Furthermore, no information is
broadcast that wasn't already received from the public P2P network.
No authentication or authorization is done on connecting clients; it
is assumed that the ZeroMQ port is exposed only to trusted entities,
using other means such as firewalling.
Note that for `*block` topics, when the block chain tip changes,
a reorganisation may occur and just the tip will be notified.
It is up to the subscriber to retrieve the chain from the last known
block to the new tip. Also note that no notification will occur if the tip
was in the active chain--as would be the case after calling invalidateblock RPC.
In contrast, the `sequence` topic publishes all block connections and
disconnections.
There are several possibilities that ZMQ notification can get lost
during transmission depending on the communication type you are
using. Bitcoind appends an up-counting sequence number to each
notification which allows listeners to detect lost notifications.
The `sequence` topic refers specifically to the mempool sequence
number, which is also published along with all mempool events. This
is a different sequence value than in ZMQ itself in order to allow a total
ordering of mempool events to be constructed.
| 0 |
bitcoin | bitcoin/doc/reduce-traffic.md | Reduce Traffic
==============
Some node operators need to deal with bandwidth caps imposed by their ISPs.
By default, Bitcoin Core allows up to 125 connections to different peers, 11 of
which are outbound. You can therefore, have at most 114 inbound connections.
Of the 11 outbound peers, there can be 8 full-relay connections, 2
block-relay-only ones and occasionally 1 short-lived feeler or an extra block-relay-only connection.
The default settings can result in relatively significant traffic consumption.
Ways to reduce traffic:
## 1. Use `-maxuploadtarget=<MiB per day>`
A major component of the traffic is caused by serving historic blocks to other nodes
during the initial blocks download phase (syncing up a new node).
This option can be specified in MiB per day and is turned off by default.
This is *not* a hard limit; only a threshold to minimize the outbound
traffic. When the limit is about to be reached, the uploaded data is cut by no
longer serving historic blocks (blocks older than one week).
Keep in mind that new nodes require other nodes that are willing to serve
historic blocks.
Peers with the `download` permission will never be disconnected, although their traffic counts for
calculating the target.
## 2. Disable "listening" (`-listen=0`)
Disabling listening will result in fewer nodes connected (remember the maximum of 11
outbound peers). Fewer nodes will result in less traffic usage as you are relaying
blocks and transactions to fewer nodes.
## 3. Reduce maximum connections (`-maxconnections=<num>`)
Reducing the maximum connected nodes to a minimum could be desirable if traffic
limits are tiny. Keep in mind that bitcoin's trustless model works best if you are
connected to a handful of nodes.
## 4. Turn off transaction relay (`-blocksonly`)
Forwarding transactions to peers increases the P2P traffic. To only sync blocks
with other peers, you can disable transaction relay.
Be reminded of the effects of this setting.
- Fee estimation will no longer work.
- It sets the flag "-walletbroadcast" to be "0", only if it is currently unset.
Doing so disables the automatic broadcasting of transactions from wallet. Not
relaying other's transactions could hurt your privacy if used while a wallet
is loaded or if you use the node to broadcast transactions.
- If a peer has the forcerelay permission, we will still receive and relay
their transactions.
- It makes block propagation slower because compact block relay can only be
used when transaction relay is enabled.
| 0 |
bitcoin | bitcoin/doc/psbt.md | # PSBT Howto for Bitcoin Core
Since Bitcoin Core 0.17, an RPC interface exists for Partially Signed Bitcoin
Transactions (PSBTs, as specified in
[BIP 174](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki)).
This document describes the overall workflow for producing signed transactions
through the use of PSBT, and the specific RPC commands used in typical
scenarios.
## PSBT in general
PSBT is an interchange format for Bitcoin transactions that are not fully signed
yet, together with relevant metadata to help entities work towards signing it.
It is intended to simplify workflows where multiple parties need to cooperate to
produce a transaction. Examples include hardware wallets, multisig setups, and
[CoinJoin](https://bitcointalk.org/?topic=279249) transactions.
### Overall workflow
Overall, the construction of a fully signed Bitcoin transaction goes through the
following steps:
- A **Creator** proposes a particular transaction to be created. They construct
a PSBT that contains certain inputs and outputs, but no additional metadata.
- For each input, an **Updater** adds information about the UTXOs being spent by
the transaction to the PSBT. They also add information about the scripts and
public keys involved in each of the inputs (and possibly outputs) of the PSBT.
- **Signers** inspect the transaction and its metadata to decide whether they
agree with the transaction. They can use amount information from the UTXOs
to assess the values and fees involved. If they agree, they produce a
partial signature for the inputs for which they have relevant key(s).
- A **Finalizer** is run for each input to convert the partial signatures and
possibly script information into a final `scriptSig` and/or `scriptWitness`.
- An **Extractor** produces a valid Bitcoin transaction (in network format)
from a PSBT for which all inputs are finalized.
Generally, each of the above (excluding Creator and Extractor) will simply
add more and more data to a particular PSBT, until all inputs are fully signed.
In a naive workflow, they all have to operate sequentially, passing the PSBT
from one to the next, until the Extractor can convert it to a real transaction.
In order to permit parallel operation, **Combiners** can be employed which merge
metadata from different PSBTs for the same unsigned transaction.
The names above in bold are the names of the roles defined in BIP174. They're
useful in understanding the underlying steps, but in practice, software and
hardware implementations will typically implement multiple roles simultaneously.
## PSBT in Bitcoin Core
### RPCs
- **`converttopsbt` (Creator)** is a utility RPC that converts an
unsigned raw transaction to PSBT format. It ignores existing signatures.
- **`createpsbt` (Creator)** is a utility RPC that takes a list of inputs and
outputs and converts them to a PSBT with no additional information. It is
equivalent to calling `createrawtransaction` followed by `converttopsbt`.
- **`walletcreatefundedpsbt` (Creator, Updater)** is a wallet RPC that creates a
PSBT with the specified inputs and outputs, adds additional inputs and change
to it to balance it out, and adds relevant metadata. In particular, for inputs
that the wallet knows about (counting towards its normal or watch-only
balance), UTXO information will be added. For outputs and inputs with UTXO
information present, key and script information will be added which the wallet
knows about. It is equivalent to running `createrawtransaction`, followed by
`fundrawtransaction`, and `converttopsbt`.
- **`walletprocesspsbt` (Updater, Signer, Finalizer)** is a wallet RPC that takes as
input a PSBT, adds UTXO, key, and script data to inputs and outputs that miss
it, and optionally signs inputs. Where possible it also finalizes the partial
signatures.
- **`utxoupdatepsbt` (Updater)** is a node RPC that takes a PSBT and updates it
to include information available from the UTXO set (works only for SegWit
inputs).
- **`finalizepsbt` (Finalizer, Extractor)** is a utility RPC that finalizes any
partial signatures, and if all inputs are finalized, converts the result to a
fully signed transaction which can be broadcast with `sendrawtransaction`.
- **`combinepsbt` (Combiner)** is a utility RPC that implements a Combiner. It
can be used at any point in the workflow to merge information added to
different versions of the same PSBT. In particular it is useful to combine the
output of multiple Updaters or Signers.
- **`joinpsbts`** (Creator) is a utility RPC that joins multiple PSBTs together,
concatenating the inputs and outputs. This can be used to construct CoinJoin
transactions.
- **`decodepsbt`** is a diagnostic utility RPC which will show all information in
a PSBT in human-readable form, as well as compute its eventual fee if known.
- **`analyzepsbt`** is a utility RPC that examines a PSBT and reports the
current status of its inputs, the next step in the workflow if known, and if
possible, computes the fee of the resulting transaction and estimates the
final weight and feerate.
### Workflows
#### Multisig with multiple Bitcoin Core instances
For a quick start see [Basic M-of-N multisig example using descriptor wallets and PSBTs](./descriptors.md#basic-multisig-example).
If you are using legacy wallets feel free to continue with the example provided here.
Alice, Bob, and Carol want to create a 2-of-3 multisig address. They're all using
Bitcoin Core. We assume their wallets only contain the multisig funds. In case
they also have a personal wallet, this can be accomplished through the
multiwallet feature - possibly resulting in a need to add `-rpcwallet=name` to
the command line in case `bitcoin-cli` is used.
Setup:
- All three call `getnewaddress` to create a new address; call these addresses
*Aalice*, *Abob*, and *Acarol*.
- All three call `getaddressinfo "X"`, with *X* their respective address, and
remember the corresponding public keys. Call these public keys *Kalice*,
*Kbob*, and *Kcarol*.
- All three now run `addmultisigaddress 2 ["Kalice","Kbob","Kcarol"]` to teach
their wallet about the multisig script. Call the address produced by this
command *Amulti*. They may be required to explicitly specify the same
addresstype option each, to avoid constructing different versions due to
differences in configuration.
- They also run `importaddress "Amulti" "" false` to make their wallets treat
payments to *Amulti* as contributing to the watch-only balance.
- Others can verify the produced address by running
`createmultisig 2 ["Kalice","Kbob","Kcarol"]`, and expecting *Amulti* as
output. Again, it may be necessary to explicitly specify the addresstype
in order to get a result that matches. This command won't enable them to
initiate transactions later, however.
- They can now give out *Amulti* as address others can pay to.
Later, when *V* BTC has been received on *Amulti*, and Bob and Carol want to
move the coins in their entirety to address *Asend*, with no change. Alice
does not need to be involved.
- One of them - let's assume Carol here - initiates the creation. She runs
`walletcreatefundedpsbt [] {"Asend":V} 0 {"subtractFeeFromOutputs":[0], "includeWatching":true}`.
We call the resulting PSBT *P*. *P* does not contain any signatures.
- Carol needs to sign the transaction herself. In order to do so, she runs
`walletprocesspsbt "P"`, and gives the resulting PSBT *P2* to Bob.
- Bob inspects the PSBT using `decodepsbt "P2"` to determine if the transaction
has indeed just the expected input, and an output to *Asend*, and the fee is
reasonable. If he agrees, he calls `walletprocesspsbt "P2"` to sign. The
resulting PSBT *P3* contains both Carol's and Bob's signature.
- Now anyone can call `finalizepsbt "P3"` to extract a fully signed transaction
*T*.
- Finally anyone can broadcast the transaction using `sendrawtransaction "T"`.
In case there are more signers, it may be advantageous to let them all sign in
parallel, rather than passing the PSBT from one signer to the next one. In the
above example this would translate to Carol handing a copy of *P* to each signer
separately. They can then all invoke `walletprocesspsbt "P"`, and end up with
their individually-signed PSBT structures. They then all send those back to
Carol (or anyone) who can combine them using `combinepsbt`. The last two steps
(`finalizepsbt` and `sendrawtransaction`) remain unchanged.
| 0 |
bitcoin | bitcoin/doc/assets-attribution.md | The list of assets used in the bitcoin source and their attribution can now be found in [contrib/debian/copyright](../contrib/debian/copyright).
| 0 |
bitcoin | bitcoin/doc/cjdns.md | # CJDNS support in Bitcoin Core
It is possible to run Bitcoin Core over CJDNS, an encrypted IPv6 network that
uses public-key cryptography for address allocation and a distributed hash table
for routing.
## What is CJDNS?
CJDNS is like a distributed, shared VPN with multiple entry points where every
participant can reach any other participant. All participants use addresses from
the `fc00::/8` network (reserved IPv6 range). Installation and configuration is
done outside of Bitcoin Core, similarly to a VPN (either in the host/OS or on
the network router). See https://github.com/cjdelisle/cjdns#readme and
https://github.com/hyperboria/docs#hyperboriadocs for more information.
Compared to IPv4/IPv6, CJDNS provides end-to-end encryption and protects nodes
from traffic analysis and filtering.
Used with Tor and I2P, CJDNS is a complementary option that can enhance network
redundancy and robustness for both the Bitcoin network and individual nodes.
Each network has different characteristics. For instance, Tor is widely used but
somewhat centralized. I2P connections have a source address and I2P is slow.
CJDNS is fast but does not hide the sender and the recipient from intermediate
routers.
## Installing CJDNS and finding a peer to connect to the network
To install and set up CJDNS, follow the instructions at
https://github.com/cjdelisle/cjdns#how-to-install-cjdns.
You need to initiate an outbound connection to a peer on the CJDNS network
before it will work with your Bitcoin Core node. This is described in steps
["2. Find a friend"](https://github.com/cjdelisle/cjdns#2-find-a-friend) and
["3. Connect your node to your friend's
node"](https://github.com/cjdelisle/cjdns#3-connect-your-node-to-your-friends-node)
in the CJDNS documentation.
One quick way to accomplish these two steps is to query for available public
peers on [Hyperboria](https://github.com/hyperboria) by running the following:
```
git clone https://github.com/hyperboria/peers hyperboria-peers
cd hyperboria-peers
./testAvailable.py
```
For each peer, the `./testAvailable.py` script prints the filename of the peer's
credentials followed by the ping result.
Choose one or several peers, copy their credentials from their respective files,
paste them into the relevant IPv4 or IPv6 "connectTo" JSON object in the
`cjdroute.conf` file you created in step ["1. Generate a new configuration
file"](https://github.com/cjdelisle/cjdns#1-generate-a-new-configuration-file),
and save the file.
## Launching CJDNS
Typically, CJDNS might be launched from its directory with
`sudo ./cjdroute < cjdroute.conf` and it sheds permissions after setting up the
[TUN](https://en.wikipedia.org/wiki/TUN/TAP) interface. You may also [launch it as an
unprivileged user](https://github.com/cjdelisle/cjdns/blob/master/doc/non-root-user.md)
with some additional setup.
The network connection can be checked by running `./tools/peerStats` from the
CJDNS directory.
## Run Bitcoin Core with CJDNS
Once you are connected to the CJDNS network, the following Bitcoin Core
configuration option makes CJDNS peers automatically reachable:
```
-cjdnsreachable
```
When enabled, this option tells Bitcoin Core that it is running in an
environment where a connection to an `fc00::/8` address will be to the CJDNS
network instead of to an [RFC4193](https://datatracker.ietf.org/doc/html/rfc4193)
IPv6 local network. This helps Bitcoin Core perform better address management:
- Your node can consider incoming `fc00::/8` connections to be from the CJDNS
network rather than from an IPv6 private one.
- If one of your node's local addresses is `fc00::/8`, then it can choose to
gossip that address to peers.
## Additional configuration options related to CJDNS
```
-onlynet=cjdns
```
Make automatic outbound connections only to CJDNS addresses. Inbound and manual
connections are not affected by this option. It can be specified multiple times
to allow multiple networks, e.g. onlynet=cjdns, onlynet=i2p, onlynet=onion.
CJDNS support was added to Bitcoin Core in version 23.0 and there may be fewer
CJDNS peers than Tor or IP ones. You can use `bitcoin-cli -addrinfo` to see the
number of CJDNS addresses known to your node.
In general, a node can be run with both an onion service and CJDNS (or any/all
of IPv4/IPv6/onion/I2P/CJDNS), which can provide a potential fallback if one of
the networks has issues. There are a number of ways to configure this; see
[doc/tor.md](https://github.com/bitcoin/bitcoin/blob/master/doc/tor.md) for
details.
## CJDNS-related information in Bitcoin Core
There are several ways to see your CJDNS address in Bitcoin Core:
- in the "Local addresses" output of CLI `-netinfo`
- in the "localaddresses" output of RPC `getnetworkinfo`
To see which CJDNS peers your node is connected to, use `bitcoin-cli -netinfo 4`
or the `getpeerinfo` RPC (i.e. `bitcoin-cli getpeerinfo`).
You can use the `getnodeaddresses` RPC to fetch a number of CJDNS peers known to your node; run `bitcoin-cli help getnodeaddresses` for details.
| 0 |
bitcoin | bitcoin/doc/benchmarking.md | Benchmarking
============
Bitcoin Core has an internal benchmarking framework, with benchmarks
for cryptographic algorithms (e.g. SHA1, SHA256, SHA512, RIPEMD160, Poly1305, ChaCha20), rolling bloom filter, coins selection,
thread queue, wallet balance.
Running
---------------------
For benchmarking, you only need to compile `bitcoin_bench`. The bench runner
warns if you configure with `--enable-debug`, but consider if building without
it will impact the benchmark(s) you are interested in by unlatching log printers
and lock analysis.
make -C src bitcoin_bench
After compiling bitcoin-core, the benchmarks can be run with:
src/bench/bench_bitcoin
The output will look similar to:
```
| ns/op | op/s | err% | total | benchmark
|--------------------:|--------------------:|--------:|----------:|:----------
| 57,927,463.00 | 17.26 | 3.6% | 0.66 | `AddrManAdd`
| 677,816.00 | 1,475.33 | 4.9% | 0.01 | `AddrManGetAddr`
...
| ns/byte | byte/s | err% | total | benchmark
|--------------------:|--------------------:|--------:|----------:|:----------
| 127.32 | 7,854,302.69 | 0.3% | 0.00 | `Base58CheckEncode`
| 31.95 | 31,303,226.99 | 0.2% | 0.00 | `Base58Decode`
...
```
Help
---------------------
src/bench/bench_bitcoin -?
To print the various options, like listing the benchmarks without running them
or using a regex filter to only run certain benchmarks.
Notes
---------------------
More benchmarks are needed for, in no particular order:
- Script Validation
- Coins database
- Memory pool
- Cuckoo Cache
- P2P throughput
Going Further
--------------------
To monitor Bitcoin Core performance more in depth (like reindex or IBD): https://github.com/chaincodelabs/bitcoinperf
To generate Flame Graphs for Bitcoin Core: https://github.com/eklitzke/bitcoin/blob/flamegraphs/doc/flamegraphs.md
| 0 |
bitcoin | bitcoin/doc/Doxyfile.in | # Doxyfile 1.8.12
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all text
# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
# for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "Bitcoin Core"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = @PACKAGE_VERSION@
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF = "P2P Digital Currency"
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO = doc/bitcoin_logo_doxygen.png
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY = doc/doxygen
# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system.
# The default value is: NO.
CREATE_SUBDIRS = NO
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
# Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
"The $name file" \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = YES
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = YES
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
# page for each member. If set to NO, the documentation of a member will be part
# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 8
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines.
ALIASES =
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding "class=itcl::class"
# will allow you to use the command class in the itcl::class meaning.
TCL_SUBST =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
# Fortran. In the later case the parser tries to guess whether the code is fixed
# or free formatted code, this is the default for Fortran type files), VHDL. For
# instance to make doxygen treat .inc files as Fortran files (default is PHP),
# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See http://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 0.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 0
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = YES
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = YES
# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO, these classes will be included in the various overviews. This option
# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# (class|struct|union) declarations. If set to NO, these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = NO
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
# append additional text to a page's title, such as Class Reference. If set to
# YES the compound reference will be hidden.
# The default value is: NO.
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES, the
# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
# a warning is encountered.
# The default value is: NO.
WARN_AS_ERROR = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = src doc/README_doxygen.md
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: http://www.gnu.org/software/libiconv) for the list of
# possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf.
FILE_PATTERNS = *.c \
*.cc \
*.cxx \
*.cpp \
*.c++ \
*.d \
*.java \
*.ii \
*.ixx \
*.ipp \
*.i++ \
*.inl \
*.h \
*.hh \
*.hxx \
*.hpp \
*.h++ \
*.idl \
*.odl \
*.cs \
*.php \
*.php3 \
*.inc \
*.m \
*.mm \
*.dox \
*.py \
*.f90 \
*.f \
*.for \
*.vhd \
*.vhdl
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE = src/crc32c \
src/leveldb \
src/json
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS = boost \
google
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS = *
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE = doc/README_doxygen.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = YES
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# function all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see http://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
# which the alphabetical index list will be split.
# Minimum value: 1, maximum value: 20, default value: 5.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefore more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see: http://developer.apple.com/tools/xcode/), introduced with
# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
# Makefile in the HTML output directory. Running make will produce the docset in
# that directory and running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.bitcoin.Bitcoin-Core
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.bitcoin.Bitcoin-Core
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
# (YES) or that it should be included in the master .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated
# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
# folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location of Qt's
# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
# generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 250
# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
# Note that when changing this option you need to delete any form_*.png files in
# the HTML output directory before the changes have effect.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_TRANSPARENT = YES
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from http://www.mathjax.org before deployment.
# The default value is: http://cdn.mathjax.org/mathjax/latest.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://www.mathjax.org/mathjax
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the javascript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use <access key> + S
# (what the <access key> is depends on the OS and browser, but it is typically
# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
# key> to jump into the search results window, the results can be navigated
# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
# the search. The filter options can be selected when the cursor is inside the
# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
# to select a filter and <Enter> or <escape> to activate or cancel the filter
# option.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using Javascript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
# and searching needs to be provided by external tools. See the section
# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
# which needs to be processed by an external indexer. Doxygen will invoke an
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
# SEARCHDATA_FILE tag the name of this file can be specified.
# The default file is: searchdata.xml.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHDATA_FILE = searchdata.xml
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
# projects other than the one defined by this configuration file, but that are
# all added to the same external search index. Each project needs to have a
# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
# to a relative location where the documentation can be found. The format is:
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when enabling USE_PDFLATEX this option is only used for generating
# bitmaps for formulas in the HTML output, but not in the Makefile that is
# written to the output directory.
# The default file is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used by the
# printer.
# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
# 14 inches) and executive (7.25 x 10.5 inches).
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
# by its name or with the correct syntax as to be used with the LaTeX
# \usepackage command. To get the times font for instance you can specify :
# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
# To use the option intlimits with the amsmath package you can specify:
# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
# string, for the replacement values of the other commands the user is referred
# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# LaTeX style sheets that are included after the standard style sheets created
# by doxygen. Using this option one can overrule certain style aspects. Doxygen
# will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list).
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_STYLESHEET =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
# contain links (just like the HTML output) instead of page references. This
# makes the output suitable for online browsing using a PDF viewer.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
# the PDF file directly from the LaTeX files. Set this option to YES, to get a
# higher quality PDF documentation.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
# index chapters (such as File Index, Compound Index, etc.) in the output.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_TIMESTAMP = NO
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: rtf.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
# contain hyperlink fields. The RTF file will contain links (just like the HTML
# output) instead of page references. This makes the output suitable for online
# browsing using Word or some other Word compatible readers that support those
# fields.
#
# Note: WordPad (write) and others do not support links.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's config
# file, i.e. a series of assignments. You only have to provide replacements,
# missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's config file. A template extensions file can be generated
# using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
# with syntax highlighting in the RTF output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
# classes and files.
# The default value is: NO.
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it. A directory man3 will be created inside the directory specified by
# MAN_OUTPUT.
# The default directory is: man.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to the generated
# man pages. In case the manual section does not start with a number, the number
# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
# optional.
# The default value is: .3.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
# The MAN_SUBDIR tag determines the name of the directory created within
# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
# MAN_EXTENSION with the initial . removed.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_SUBDIR =
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# them the man command would be unable to find the correct page.
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
GENERATE_XML = NO
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: xml.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_OUTPUT = xml
# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
GENERATE_DOCBOOK = NO
# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
# front of it.
# The default directory is: docbook.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
# AutoGen Definitions (see http://autogen.sf.net) file that captures the
# structure of the code including all documentation. Note that this feature is
# still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file are
# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
# so different doxyrules.make files included by the same Makefile don't
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
# EXPAND_AS_DEFINED tags.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will be
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
# gcc). The argument of the tag is a list of macros of the form: name or
# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
# is assumed. To prevent a macro definition from being undefined via #undef or
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = ENABLE_EXTERNAL_SIGNER
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
# macro definition that is found in the sources will be used. Use the PREDEFINED
# tag if you want to use a different macro definition that overrules the
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
# remove all references to function-like macros that are alone on a line, have
# an all uppercase name, and do not end with a semicolon. Such function macros
# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
# The TAGFILES tag can be used to specify one or more tag files. For each tag
# file the location of the external documentation should be added. The format of
# a tag file without this location is as follows:
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
# the class index. If set to NO, only the inherited external classes will be
# listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
# in the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of 'which perl').
# The default file (with absolute path) is: /usr/bin/perl.
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see:
# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
HAVE_DOT = YES
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
# to run in parallel. When set to 0 doxygen will base this on the number of
# processors available in the system. You can set it explicitly to a value
# larger than 0 to get control over the balance between CPU load and processing
# speed.
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
# setting DOT_FONTPATH to the directory containing the font.
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.
# Minimum value: 4, maximum value: 24, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
# the path where dot can find it using this tag.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
# class with other documented classes.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
# groups, showing the direct groups dependencies.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = NO
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
# number of items for each type to make the size more manageable. Set this to 0
# for no limit. Note that the threshold may be exceeded by 50% before the limit
# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LIMIT_NUM_FIELDS = 10
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command. Disabling a call graph can be
# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = YES
# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command. Disabling a caller graph can be
# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = YES
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
# files in the directories.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
# http://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = svg
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
#
# Note that this requires a modern browser other than Internet Explorer. Tested
# and working are Firefox, Chrome, Safari, and Opera.
# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
# the SVG files visible. Older versions of IE do not have SVG support.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
PLANTUML_JAR_PATH =
# When using plantuml, the specified paths are searched for files specified by
# the !include statement in a plantuml block.
PLANTUML_INCLUDE_PATH =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
# by representing a node as a red box. Note that doxygen if the number of direct
# children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
# root by following a path via at most 3 edges will be shown. Nodes that lay
# further from the root node will be omitted. Note that setting this option to 1
# or 2 may greatly reduce the computation time needed for large code bases. Also
# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not seem
# to support this out of the box.
#
# Warning: Depending on the platform used, enabling this option may lead to
# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
# read).
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
| 0 |
bitcoin | bitcoin/doc/JSON-RPC-interface.md | # JSON-RPC Interface
The headless daemon `bitcoind` has the JSON-RPC API enabled by default, the GUI
`bitcoin-qt` has it disabled by default. This can be changed with the `-server`
option. In the GUI it is possible to execute RPC methods in the Debug Console
Dialog.
## Endpoints
There are two JSON-RPC endpoints on the server:
1. `/`
2. `/wallet/<walletname>/`
### `/` endpoint
This endpoint is always active.
It can always service non-wallet requests and can service wallet requests when
exactly one wallet is loaded.
### `/wallet/<walletname>/` endpoint
This endpoint is only activated when the wallet component has been compiled in.
It can service both wallet and non-wallet requests.
It MUST be used for wallet requests when two or more wallets are loaded.
This is the endpoint used by bitcoin-cli when a `-rpcwallet=` parameter is passed in.
Best practice would dictate using the `/wallet/<walletname>/` endpoint for ALL
requests when multiple wallets are in use.
### Examples
```sh
# Get block count from the / endpoint when rpcuser=alice and rpcport=38332
$ curl --user alice --data-binary '{"jsonrpc": "1.0", "id": "0", "method": "getblockcount", "params": []}' -H 'content-type: text/plain;' localhost:38332/
# Get balance from the /wallet/walletname endpoint when rpcuser=alice, rpcport=38332 and rpcwallet=desc-wallet
$ curl --user alice --data-binary '{"jsonrpc": "1.0", "id": "0", "method": "getbalance", "params": []}' -H 'content-type: text/plain;' localhost:38332/wallet/desc-wallet
```
## Parameter passing
The JSON-RPC server supports both _by-position_ and _by-name_ [parameter
structures](https://www.jsonrpc.org/specification#parameter_structures)
described in the JSON-RPC specification. For extra convenience, to avoid the
need to name every parameter value, all RPC methods accept a named parameter
called `args`, which can be set to an array of initial positional values that
are combined with named values.
Examples:
```sh
# "params": ["mywallet", false, false, "", false, false, true]
bitcoin-cli createwallet mywallet false false "" false false true
# "params": {"wallet_name": "mywallet", "load_on_startup": true}
bitcoin-cli -named createwallet wallet_name=mywallet load_on_startup=true
# "params": {"args": ["mywallet"], "load_on_startup": true}
bitcoin-cli -named createwallet mywallet load_on_startup=true
```
## Versioning
The RPC interface might change from one major version of Bitcoin Core to the
next. This makes the RPC interface implicitly versioned on the major version.
The version tuple can be retrieved by e.g. the `getnetworkinfo` RPC in
`version`.
Usually deprecated features can be re-enabled during the grace-period of one
major version via the `-deprecatedrpc=` command line option. The release notes
of a new major release come with detailed instructions on what RPC features
were deprecated and how to re-enable them temporarily.
## Security
The RPC interface allows other programs to control Bitcoin Core,
including the ability to spend funds from your wallets, affect consensus
verification, read private data, and otherwise perform operations that
can cause loss of money, data, or privacy. This section suggests how
you should use and configure Bitcoin Core to reduce the risk that its
RPC interface will be abused.
- **Securing the executable:** Anyone with physical or remote access to
the computer, container, or virtual machine running Bitcoin Core can
compromise either the whole program or just the RPC interface. This
includes being able to record any passphrases you enter for unlocking
your encrypted wallets or changing settings so that your Bitcoin Core
program tells you that certain transactions have multiple
confirmations even when they aren't part of the best block chain. For
this reason, you should not use Bitcoin Core for security sensitive
operations on systems you do not exclusively control, such as shared
computers or virtual private servers.
- **Securing local network access:** By default, the RPC interface can
only be accessed by a client running on the same computer and only
after the client provides a valid authentication credential (username
and passphrase). Any program on your computer with access to the file
system and local network can obtain this level of access.
Additionally, other programs on your computer can attempt to provide
an RPC interface on the same port as used by Bitcoin Core in order to
trick you into revealing your authentication credentials. For this
reason, it is important to only use Bitcoin Core for
security-sensitive operations on a computer whose other programs you
trust.
- **Securing remote network access:** You may optionally allow other
computers to remotely control Bitcoin Core by setting the `rpcallowip`
and `rpcbind` configuration parameters. These settings are only meant
for enabling connections over secure private networks or connections
that have been otherwise secured (e.g. using a VPN or port forwarding
with SSH or stunnel). **Do not enable RPC connections over the public
Internet.** Although Bitcoin Core's RPC interface does use
authentication, it does not use encryption, so your login credentials
are sent as clear text that can be read by anyone on your network
path. Additionally, the RPC interface has not been hardened to
withstand arbitrary Internet traffic, so changing the above settings
to expose it to the Internet (even using something like a Tor onion
service) could expose you to unconsidered vulnerabilities. See
`bitcoind -help` for more information about these settings and other
settings described in this document.
Related, if you use Bitcoin Core inside a Docker container, you may
need to expose the RPC port to the host system. The default way to
do this in Docker also exposes the port to the public Internet.
Instead, expose it only on the host system's localhost, for example:
`-p 127.0.0.1:8332:8332`
- **Secure authentication:** By default, when no `rpcpassword` is specified, Bitcoin Core generates unique
login credentials each time it restarts and puts them into a file
readable only by the user that started Bitcoin Core, allowing any of
that user's RPC clients with read access to the file to login
automatically. The file is `.cookie` in the Bitcoin Core
configuration directory, and using these credentials is the preferred
RPC authentication method. If you need to generate static login
credentials for your programs, you can use the script in the
`share/rpcauth` directory in the Bitcoin Core source tree. As a final
fallback, you can directly use manually-chosen `rpcuser` and
`rpcpassword` configuration parameters---but you must ensure that you
choose a strong and unique passphrase (and still don't use insecure
networks, as mentioned above).
- **Secure string handling:** The RPC interface does not guarantee any
escaping of data beyond what's necessary to encode it as JSON,
although it does usually provide serialized data using a hex
representation of the bytes. If you use RPC data in your programs or
provide its data to other programs, you must ensure any problem strings
are properly escaped. For example, the `createwallet` RPC accepts
arguments such as `wallet_name` which is a string and could be used
for a path traversal attack without application level checks. Multiple
websites have been manipulated because they displayed decoded hex strings
that included HTML `<script>` tags. For this reason, and others, it is
recommended to display all serialized data in hex form only.
## RPC consistency guarantees
State that can be queried via RPCs is guaranteed to be at least up-to-date with
the chain state immediately prior to the call's execution. However, the state
returned by RPCs that reflect the mempool may not be up-to-date with the
current mempool state.
### Transaction Pool
The mempool state returned via an RPC is consistent with itself and with the
chain state at the time of the call. Thus, the mempool state only encompasses
transactions that are considered mine-able by the node at the time of the RPC.
The mempool state returned via an RPC reflects all effects of mempool and chain
state related RPCs that returned prior to this call.
### Wallet
The wallet state returned via an RPC is consistent with itself and with the
chain state at the time of the call.
Wallet RPCs will return the latest chain state consistent with prior non-wallet
RPCs. The effects of all blocks (and transactions in blocks) at the time of the
call is reflected in the state of all wallet transactions. For example, if a
block contains transactions that conflicted with mempool transactions, the
wallet would reflect the removal of these mempool transactions in the state.
However, the wallet may not be up-to-date with the current state of the mempool
or the state of the mempool by an RPC that returned before this RPC. For
example, a wallet transaction that was BIP-125-replaced in the mempool prior to
this RPC may not yet be reflected as such in this RPC response.
## Limitations
There is a known issue in the JSON-RPC interface that can cause a node to crash if
too many http connections are being opened at the same time because the system runs
out of available file descriptors. To prevent this from happening you might
want to increase the number of maximum allowed file descriptors in your system
and try to prevent opening too many connections to your JSON-RPC interface at the
same time if this is under your control. It is hard to give general advice
since this depends on your system but if you make several hundred requests at
once you are definitely at risk of encountering this issue.
| 0 |
bitcoin | bitcoin/doc/reduce-memory.md | # Reduce Memory
There are a few parameters that can be dialed down to reduce the memory usage of `bitcoind`. This can be useful on embedded systems or small VPSes.
## In-memory caches
The size of some in-memory caches can be reduced. As caches trade off memory usage for performance, reducing these will usually have a negative effect on performance.
- `-dbcache=<n>` - the UTXO database cache size, this defaults to `450`. The unit is MiB (1024).
- The minimum value for `-dbcache` is 4.
- A lower `-dbcache` makes initial sync time much longer. After the initial sync, the effect is less pronounced for most use-cases, unless fast validation of blocks is important, such as for mining.
## Memory pool
- In Bitcoin Core there is a memory pool limiter which can be configured with `-maxmempool=<n>`, where `<n>` is the size in MB (1000). The default value is `300`.
- The minimum value for `-maxmempool` is 5.
- A lower maximum mempool size means that transactions will be evicted sooner. This will affect any uses of `bitcoind` that process unconfirmed transactions.
- Since `0.14.0`, unused memory allocated to the mempool (default: 300MB) is shared with the UTXO cache, so when trying to reduce memory usage you should limit the mempool, with the `-maxmempool` command line argument.
- To disable most of the mempool functionality there is the `-blocksonly` option. This will reduce the default memory usage to 5MB and make the client opt out of receiving (and thus relaying) transactions, except from peers who have the `relay` permission set (e.g. whitelisted peers), and as part of blocks.
- Do not use this when using the client to broadcast transactions as any transaction sent will stick out like a sore thumb, affecting privacy. When used with the wallet it should be combined with `-walletbroadcast=0` and `-spendzeroconfchange=0`. Another mechanism for broadcasting outgoing transactions (if any) should be used.
## Number of peers
- `-maxconnections=<n>` - the maximum number of connections, which defaults to 125. Each active connection takes up some
memory. This option applies only if inbound connections are enabled; otherwise, the number of connections will not
be more than 11. Of the 11 outbound peers, there can be 8 full-relay connections, 2 block-relay-only ones,
and occasionally 1 short-lived feeler or extra outbound block-relay-only connection.
- These limits do not apply to connections added manually with the `-addnode` configuration option or
the `addnode` RPC, which have a separate limit of 8 connections.
## Thread configuration
For each thread a thread stack needs to be allocated. By default on Linux,
threads take up 8MiB for the thread stack on a 64-bit system, and 4MiB in a
32-bit system.
- `-par=<n>` - the number of script verification threads, defaults to the number of cores in the system minus one.
- `-rpcthreads=<n>` - the number of threads used for processing RPC requests, defaults to `4`.
## Linux specific
By default, glibc's implementation of `malloc` may use more than one arena. This is known to cause excessive memory usage in some scenarios. To avoid this, make a script that sets `MALLOC_ARENA_MAX` before starting bitcoind:
```bash
#!/usr/bin/env bash
export MALLOC_ARENA_MAX=1
bitcoind
```
The behavior was introduced to increase CPU locality of allocated memory and performance with concurrent allocation, so this setting could in theory reduce performance. However, in Bitcoin Core very little parallel allocation happens, so the impact is expected to be small or absent.
| 0 |
bitcoin | bitcoin/doc/managing-wallets.md | # Managing the Wallet
## 1. Backing Up and Restoring The Wallet
### 1.1 Creating the Wallet
Since version 0.21, Bitcoin Core no longer has a default wallet.
Wallets can be created with the `createwallet` RPC or with the `Create wallet` GUI menu item.
In the GUI, the `Create a new wallet` button is displayed on the main screen when there is no wallet loaded. Alternatively, there is the option `File` ->`Create wallet`.
The following command, for example, creates a descriptor wallet. More information about this command may be found by running `bitcoin-cli help createwallet`.
```
$ bitcoin-cli createwallet "wallet-01"
```
By default, wallets are created in the `wallets` folder of the data directory, which varies by operating system, as shown below. The user can change the default by using the `-datadir` or `-walletdir` initialization parameters.
| Operating System | Default wallet directory |
| -----------------|:------------------------------------------------------------|
| Linux | `/home/<user>/.bitcoin/wallets` |
| Windows | `C:\Users\<user>\AppData\Roaming\Bitcoin\wallets` |
| macOS | `/Users/<user>/Library/Application Support/Bitcoin/wallets` |
### 1.2 Encrypting the Wallet
The `wallet.dat` file is not encrypted by default and is, therefore, vulnerable if an attacker gains access to the device where the wallet or the backups are stored.
Wallet encryption may prevent unauthorized access. However, this significantly increases the risk of losing coins due to forgotten passphrases. There is no way to recover a passphrase. This tradeoff should be well thought out by the user.
Wallet encryption may also not protect against more sophisticated attacks. An attacker can, for example, obtain the password by installing a keylogger on the user's machine.
After encrypting the wallet or changing the passphrase, a new backup needs to be created immediately. The reason is that the keypool is flushed and a new HD seed is generated after encryption. Any bitcoins received by the new seed cannot be recovered from the previous backups.
The wallet's private key may be encrypted with the following command:
```
$ bitcoin-cli -rpcwallet="wallet-01" encryptwallet "passphrase"
```
Once encrypted, the passphrase can be changed with the `walletpassphrasechange` command.
```
$ bitcoin-cli -rpcwallet="wallet-01" walletpassphrasechange "oldpassphrase" "newpassphrase"
```
The argument passed to `-rpcwallet` is the name of the wallet to be encrypted.
Only the wallet's private key is encrypted. All other wallet information, such as transactions, is still visible.
The wallet's private key can also be encrypted in the `createwallet` command via the `passphrase` argument:
```
$ bitcoin-cli -named createwallet wallet_name="wallet-01" passphrase="passphrase"
```
Note that if the passphrase is lost, all the coins in the wallet will also be lost forever.
### 1.3 Unlocking the Wallet
If the wallet is encrypted and the user tries any operation related to private keys, such as sending bitcoins, an error message will be displayed.
```
$ bitcoin-cli -rpcwallet="wallet-01" sendtoaddress "tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx" 0.01
error code: -13
error message:
Error: Please enter the wallet passphrase with walletpassphrase first.
```
To unlock the wallet and allow it to run these operations, the `walletpassphrase` RPC is required.
This command takes the passphrase and an argument called `timeout`, which specifies the time in seconds that the wallet decryption key is stored in memory. After this period expires, the user needs to execute this RPC again.
```
$ bitcoin-cli -rpcwallet="wallet-01" walletpassphrase "passphrase" 120
```
In the GUI, there is no specific menu item to unlock the wallet. When the user sends bitcoins, the passphrase will be prompted automatically.
### 1.4 Backing Up the Wallet
To backup the wallet, the `backupwallet` RPC or the `Backup Wallet` GUI menu item must be used to ensure the file is in a safe state when the copy is made.
In the RPC, the destination parameter must include the name of the file. Otherwise, the command will return an error message like "Error: Wallet backup failed!" for descriptor wallets. If it is a legacy wallet, it will be copied and a file will be created with the default file name `wallet.dat`.
```
$ bitcoin-cli -rpcwallet="wallet-01" backupwallet /home/node01/Backups/backup-01.dat
```
In the GUI, the wallet is selected in the `Wallet` drop-down list in the upper right corner. If this list is not present, the wallet can be loaded in `File` ->`Open Wallet` if necessary. Then, the backup can be done in `File` -> `Backup Wallet…`.
This backup file can be stored on one or multiple offline devices, which must be reliable enough to work in an emergency and be malware free. Backup files can be regularly tested to avoid problems in the future.
If the computer has malware, it can compromise the wallet when recovering the backup file. One way to minimize this is to not connect the backup to an online device.
If both the wallet and all backups are lost for any reason, the bitcoins related to this wallet will become permanently inaccessible.
### 1.5 Backup Frequency
The original Bitcoin Core wallet was a collection of unrelated private keys. If a non-HD wallet had received funds to an address and then was restored from a backup made before the address was generated, then any funds sent to that address would have been lost because there was no deterministic mechanism to derive the address again.
Bitcoin Core [version 0.13](https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-0.13.0.md) introduced HD wallets with deterministic key derivation. With HD wallets, users no longer lose funds when restoring old backups because all addresses are derived from the HD wallet seed.
This means that a single backup is enough to recover the coins at any time. It is still recommended to make regular backups (once a week) or after a significant number of new transactions to maintain the metadata, such as labels. Metadata cannot be retrieved from a blockchain rescan, so if the backup is too old, the metadata will be lost forever.
Wallets created before version 0.13 are not HD and must be backed up every 100 keys used since the previous backup, or even more often to maintain the metadata.
### 1.6 Restoring the Wallet From a Backup
To restore a wallet, the `restorewallet` RPC or the `Restore Wallet` GUI menu item (`File` -> `Restore Wallet…`) must be used.
```
$ bitcoin-cli restorewallet "restored-wallet" /home/node01/Backups/backup-01.dat
```
After that, `getwalletinfo` can be used to check if the wallet has been fully restored.
```
$ bitcoin-cli -rpcwallet="restored-wallet" getwalletinfo
```
The restored wallet can also be loaded in the GUI via `File` ->`Open wallet`.
## Migrating Legacy Wallets to Descriptor Wallets
Legacy wallets (traditional non-descriptor wallets) can be migrated to become Descriptor wallets
through the use of the `migratewallet` RPC. Migrated wallets will have all of their addresses and private keys added to
a newly created Descriptor wallet that has the same name as the original wallet. Because Descriptor
wallets do not support having private keys and watch-only scripts, there may be up to two
additional wallets created after migration. In addition to a descriptor wallet of the same name,
there may also be a wallet named `<name>_watchonly` and `<name>_solvables`. `<name>_watchonly`
contains all of the watchonly scripts. `<name>_solvables` contains any scripts which the wallet
knows but is not watching the corresponding P2(W)SH scripts.
Migrated wallets will also generate new addresses differently. While the same BIP 32 seed will be
used, the BIP 44, 49, 84, and 86 standard derivation paths will be used. After migrating, a new
backup of the wallet(s) will need to be created.
Given that there is an extremely large number of possible configurations for the scripts that
Legacy wallets can know about, be watching for, and be able to sign for, `migratewallet` only
makes a best effort attempt to capture all of these things into Descriptor wallets. There may be
unforeseen configurations which result in some scripts being excluded. If a migration fails
unexpectedly or otherwise misses any scripts, please create an issue on GitHub. A backup of the
original wallet can be found in the wallet directory with the name `<name>-<timestamp>.legacy.bak`.
The backup can be restored using the methods discussed in the
[Restoring the Wallet From a Backup](#16-restoring-the-wallet-from-a-backup) section.
| 0 |
bitcoin | bitcoin/doc/dependencies.md | # Dependencies
These are the dependencies used by Bitcoin Core.
You can find installation instructions in the `build-*.md` file for your platform.
"Runtime" and "Version Used" are both in reference to the release binaries.
| Dependency | Minimum required |
| --- | --- |
| [Autoconf](https://www.gnu.org/software/autoconf/) | [2.69](https://github.com/bitcoin/bitcoin/pull/17769) |
| [Automake](https://www.gnu.org/software/automake/) | [1.13](https://github.com/bitcoin/bitcoin/pull/18290) |
| [Clang](https://clang.llvm.org) | [13.0](https://github.com/bitcoin/bitcoin/pull/28210) |
| [GCC](https://gcc.gnu.org) | [10.1](https://github.com/bitcoin/bitcoin/pull/28348) |
| [Python](https://www.python.org) (scripts, tests) | [3.9](https://github.com/bitcoin/bitcoin/pull/28211) |
| [systemtap](https://sourceware.org/systemtap/) ([tracing](tracing.md))| N/A |
## Required
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [Boost](../depends/packages/boost.mk) | [link](https://www.boost.org/users/download/) | [1.81.0](https://github.com/bitcoin/bitcoin/pull/26557) | [1.73.0](https://github.com/bitcoin/bitcoin/pull/29066) | No |
| [libevent](../depends/packages/libevent.mk) | [link](https://github.com/libevent/libevent/releases) | [2.1.12-stable](https://github.com/bitcoin/bitcoin/pull/21991) | [2.1.8](https://github.com/bitcoin/bitcoin/pull/24681) | No |
| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.27](https://github.com/bitcoin/bitcoin/pull/27029) | Yes |
| Linux Kernel | [link](https://www.kernel.org/) | N/A | [3.17.0](https://github.com/bitcoin/bitcoin/pull/27699) | Yes |
## Optional
### GUI
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [Fontconfig](../depends/packages/fontconfig.mk) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes |
| [FreeType](../depends/packages/freetype.mk) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes |
| [qrencode](../depends/packages/qrencode.mk) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | | No |
| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.11](https://github.com/bitcoin/bitcoin/pull/28769) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No |
### Networking
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [libnatpmp](../depends/packages/libnatpmp.mk) | [link](https://github.com/miniupnp/libnatpmp/) | commit [07004b9...](https://github.com/bitcoin/bitcoin/pull/25917) | | No |
| [MiniUPnPc](../depends/packages/miniupnpc.mk) | [link](https://miniupnp.tuxfamily.org/) | [2.2.2](https://github.com/bitcoin/bitcoin/pull/20421) | 2.1 | No |
### Notifications
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [ZeroMQ](../depends/packages/zeromq.mk) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No |
### Wallet
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No |
| [SQLite](../depends/packages/sqlite.mk) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No |
| 0 |
bitcoin | bitcoin/doc/developer-notes.md | Developer Notes
===============
<!-- markdown-toc start -->
**Table of Contents**
- [Developer Notes](#developer-notes)
- [Coding Style (General)](#coding-style-general)
- [Coding Style (C++)](#coding-style-c)
- [Coding Style (Python)](#coding-style-python)
- [Coding Style (Doxygen-compatible comments)](#coding-style-doxygen-compatible-comments)
- [Generating Documentation](#generating-documentation)
- [Development tips and tricks](#development-tips-and-tricks)
- [Compiling for debugging](#compiling-for-debugging)
- [Show sources in debugging](#show-sources-in-debugging)
- [Compiling for gprof profiling](#compiling-for-gprof-profiling)
- [`debug.log`](#debuglog)
- [Signet, testnet, and regtest modes](#signet-testnet-and-regtest-modes)
- [DEBUG_LOCKORDER](#debug_lockorder)
- [DEBUG_LOCKCONTENTION](#debug_lockcontention)
- [Valgrind suppressions file](#valgrind-suppressions-file)
- [Compiling for test coverage](#compiling-for-test-coverage)
- [Performance profiling with perf](#performance-profiling-with-perf)
- [Sanitizers](#sanitizers)
- [Locking/mutex usage notes](#lockingmutex-usage-notes)
- [Threads](#threads)
- [Ignoring IDE/editor files](#ignoring-ideeditor-files)
- [Development guidelines](#development-guidelines)
- [General Bitcoin Core](#general-bitcoin-core)
- [Wallet](#wallet)
- [General C++](#general-c)
- [C++ data structures](#c-data-structures)
- [Strings and formatting](#strings-and-formatting)
- [Shadowing](#shadowing)
- [Lifetimebound](#lifetimebound)
- [Threads and synchronization](#threads-and-synchronization)
- [Scripts](#scripts)
- [Shebang](#shebang)
- [Source code organization](#source-code-organization)
- [GUI](#gui)
- [Subtrees](#subtrees)
- [Upgrading LevelDB](#upgrading-leveldb)
- [File Descriptor Counts](#file-descriptor-counts)
- [Consensus Compatibility](#consensus-compatibility)
- [Scripted diffs](#scripted-diffs)
- [Suggestions and examples](#suggestions-and-examples)
- [Release notes](#release-notes)
- [RPC interface guidelines](#rpc-interface-guidelines)
- [Internal interface guidelines](#internal-interface-guidelines)
<!-- markdown-toc end -->
Coding Style (General)
----------------------
Various coding styles have been used during the history of the codebase,
and the result is not very consistent. However, we're now trying to converge to
a single style, which is specified below. When writing patches, favor the new
style over attempting to mimic the surrounding style, except for move-only
commits.
Do not submit patches solely to modify the style of existing code.
Coding Style (C++)
------------------
- **Indentation and whitespace rules** as specified in
[src/.clang-format](/src/.clang-format). You can use the provided
[clang-format-diff script](/contrib/devtools/README.md#clang-format-diffpy)
tool to clean up patches automatically before submission.
- Braces on new lines for classes, functions, methods.
- Braces on the same line for everything else.
- 4 space indentation (no tabs) for every block except namespaces.
- No indentation for `public`/`protected`/`private` or for `namespace`.
- No extra spaces inside parenthesis; don't do `( this )`.
- No space after function names; one space after `if`, `for` and `while`.
- If an `if` only has a single-statement `then`-clause, it can appear
on the same line as the `if`, without braces. In every other case,
braces are required, and the `then` and `else` clauses must appear
correctly indented on a new line.
- There's no hard limit on line width, but prefer to keep lines to <100
characters if doing so does not decrease readability. Break up long
function declarations over multiple lines using the Clang Format
[AlignAfterOpenBracket](https://clang.llvm.org/docs/ClangFormatStyleOptions.html)
style option.
- **Symbol naming conventions**. These are preferred in new code, but are not
required when doing so would need changes to significant pieces of existing
code.
- Variable (including function arguments) and namespace names are all lowercase and may use `_` to
separate words (snake_case).
- Class member variables have a `m_` prefix.
- Global variables have a `g_` prefix.
- Constant names are all uppercase, and use `_` to separate words.
- Enumerator constants may be `snake_case`, `PascalCase` or `ALL_CAPS`.
This is a more tolerant policy than the [C++ Core
Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#Renum-caps),
which recommend using `snake_case`. Please use what seems appropriate.
- Class names, function names, and method names are UpperCamelCase
(PascalCase). Do not prefix class names with `C`. See [Internal interface
naming style](#internal-interface-naming-style) for an exception to this
convention.
- Test suite naming convention: The Boost test suite in file
`src/test/foo_tests.cpp` should be named `foo_tests`. Test suite names
must be unique.
- **Miscellaneous**
- `++i` is preferred over `i++`.
- `nullptr` is preferred over `NULL` or `(void*)0`.
- `static_assert` is preferred over `assert` where possible. Generally; compile-time checking is preferred over run-time checking.
- Use a named cast or functional cast, not a C-Style cast. When casting
between integer types, use functional casts such as `int(x)` or `int{x}`
instead of `(int) x`. When casting between more complex types, use `static_cast`.
Use `reinterpret_cast` and `const_cast` as appropriate.
For function calls a namespace should be specified explicitly, unless such functions have been declared within it.
Otherwise, [argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl), also known as ADL, could be
triggered that makes code harder to maintain and reason about:
```c++
#include <filesystem>
namespace fs {
class path : public std::filesystem::path
{
};
// The intention is to disallow this function.
bool exists(const fs::path& p) = delete;
} // namespace fs
int main()
{
//fs::path p; // error
std::filesystem::path p; // compiled
exists(p); // ADL being used for unqualified name lookup
}
```
Block style example:
```c++
int g_count = 0;
namespace foo {
class Class
{
std::string m_name;
public:
bool Function(const std::string& s, int n)
{
// Comment summarising what this section of code does
for (int i = 0; i < n; ++i) {
int total_sum = 0;
// When something fails, return early
if (!Something()) return false;
...
if (SomethingElse(i)) {
total_sum += ComputeSomething(g_count);
} else {
DoSomething(m_name, total_sum);
}
}
// Success return is usually at the end
return true;
}
}
} // namespace foo
```
Coding Style (C++ functions and methods)
--------------------
- When ordering function parameters, place input parameters first, then any
in-out parameters, followed by any output parameters.
- *Rationale*: API consistency.
- Prefer returning values directly to using in-out or output parameters. Use
`std::optional` where helpful for returning values.
- *Rationale*: Less error-prone (no need for assumptions about what the output
is initialized to on failure), easier to read, and often the same or better
performance.
- Generally, use `std::optional` to represent optional by-value inputs (and
instead of a magic default value, if there is no real default). Non-optional
input parameters should usually be values or const references, while
non-optional in-out and output parameters should usually be references, as
they cannot be null.
Coding Style (C++ named arguments)
------------------------------
When passing named arguments, use a format that clang-tidy understands. The
argument names can otherwise not be verified by clang-tidy.
For example:
```c++
void function(Addrman& addrman, bool clear);
int main()
{
function(g_addrman, /*clear=*/false);
}
```
### Running clang-tidy
To run clang-tidy on Ubuntu/Debian, install the dependencies:
```sh
apt install clang-tidy bear clang
```
Then, pass clang as compiler to configure, and use bear to produce the `compile_commands.json`:
```sh
./autogen.sh && ./configure CC=clang CXX=clang++
make clean && bear --config src/.bear-tidy-config -- make -j $(nproc)
```
The output is denoised of errors from external dependencies.
To run clang-tidy on all source files:
```sh
( cd ./src/ && run-clang-tidy -j $(nproc) )
```
To run clang-tidy on the changed source lines:
```sh
git diff | ( cd ./src/ && clang-tidy-diff -p2 -j $(nproc) )
```
Coding Style (Python)
---------------------
Refer to [/test/functional/README.md#style-guidelines](/test/functional/README.md#style-guidelines).
Coding Style (Doxygen-compatible comments)
------------------------------------------
Bitcoin Core uses [Doxygen](https://www.doxygen.nl/) to generate its official documentation.
Use Doxygen-compatible comment blocks for functions, methods, and fields.
For example, to describe a function use:
```c++
/**
* ... Description ...
*
* @param[in] arg1 input description...
* @param[in] arg2 input description...
* @param[out] arg3 output description...
* @return Return cases...
* @throws Error type and cases...
* @pre Pre-condition for function...
* @post Post-condition for function...
*/
bool function(int arg1, const char *arg2, std::string& arg3)
```
A complete list of `@xxx` commands can be found at https://www.doxygen.nl/manual/commands.html.
As Doxygen recognizes the comments by the delimiters (`/**` and `*/` in this case), you don't
*need* to provide any commands for a comment to be valid; just a description text is fine.
To describe a class, use the same construct above the class definition:
```c++
/**
* Alerts are for notifying old versions if they become too obsolete and
* need to upgrade. The message is displayed in the status bar.
* @see GetWarnings()
*/
class CAlert
```
To describe a member or variable use:
```c++
//! Description before the member
int var;
```
or
```c++
int var; //!< Description after the member
```
Also OK:
```c++
///
/// ... Description ...
///
bool function2(int arg1, const char *arg2)
```
Not picked up by Doxygen:
```c++
//
// ... Description ...
//
```
Also not picked up by Doxygen:
```c++
/*
* ... Description ...
*/
```
A full list of comment syntaxes picked up by Doxygen can be found at https://www.doxygen.nl/manual/docblocks.html,
but the above styles are favored.
Recommendations:
- Avoiding duplicating type and input/output information in function
descriptions.
- Use backticks (``) to refer to `argument` names in function and
parameter descriptions.
- Backticks aren't required when referring to functions Doxygen already knows
about; it will build hyperlinks for these automatically. See
https://www.doxygen.nl/manual/autolink.html for complete info.
- Avoid linking to external documentation; links can break.
- Javadoc and all valid Doxygen comments are stripped from Doxygen source code
previews (`STRIP_CODE_COMMENTS = YES` in [Doxyfile.in](doc/Doxyfile.in)). If
you want a comment to be preserved, it must instead use `//` or `/* */`.
### Generating Documentation
The documentation can be generated with `make docs` and cleaned up with `make
clean-docs`. The resulting files are located in `doc/doxygen/html`; open
`index.html` in that directory to view the homepage.
Before running `make docs`, you'll need to install these dependencies:
Linux: `sudo apt install doxygen graphviz`
MacOS: `brew install doxygen graphviz`
Development tips and tricks
---------------------------
### Compiling for debugging
Run configure with `--enable-debug` to add additional compiler flags that
produce better debugging builds.
### Show sources in debugging
If you have ccache enabled, absolute paths are stripped from debug information
with the -fdebug-prefix-map and -fmacro-prefix-map options (if supported by the
compiler). This might break source file detection in case you move binaries
after compilation, debug from the directory other than the project root or use
an IDE that only supports absolute paths for debugging.
There are a few possible fixes:
1. Configure source file mapping.
For `gdb` create or append to `.gdbinit` file:
```
set substitute-path ./src /path/to/project/root/src
```
For `lldb` create or append to `.lldbinit` file:
```
settings set target.source-map ./src /path/to/project/root/src
```
2. Add a symlink to the `./src` directory:
```
ln -s /path/to/project/root/src src
```
3. Use `debugedit` to modify debug information in the binary.
### Compiling for gprof profiling
Run configure with the `--enable-gprof` option, then make.
### `debug.log`
If the code is behaving strangely, take a look in the `debug.log` file in the data directory;
error and debugging messages are written there.
Debug logging can be enabled on startup with the `-debug` and `-loglevel`
configuration options and toggled while bitcoind is running with the `logging`
RPC. For instance, launching bitcoind with `-debug` or `-debug=1` will turn on
all log categories and `-loglevel=trace` will turn on all log severity levels.
The Qt code routes `qDebug()` output to `debug.log` under category "qt": run with `-debug=qt`
to see it.
### Signet, testnet, and regtest modes
If you are testing multi-machine code that needs to operate across the internet,
you can run with either the `-signet` or the `-testnet` config option to test
with "play bitcoins" on a test network.
If you are testing something that can run on one machine, run with the
`-regtest` option. In regression test mode, blocks can be created on demand;
see [test/functional/](/test/functional) for tests that run in `-regtest` mode.
### DEBUG_LOCKORDER
Bitcoin Core is a multi-threaded application, and deadlocks or other
multi-threading bugs can be very difficult to track down. The `--enable-debug`
configure option adds `-DDEBUG_LOCKORDER` to the compiler flags. This inserts
run-time checks to keep track of which locks are held and adds warnings to the
`debug.log` file if inconsistencies are detected.
### DEBUG_LOCKCONTENTION
Defining `DEBUG_LOCKCONTENTION` adds a "lock" logging category to the logging
RPC that, when enabled, logs the location and duration of each lock contention
to the `debug.log` file.
The `--enable-debug` configure option adds `-DDEBUG_LOCKCONTENTION` to the
compiler flags. You may also enable it manually for a non-debug build by running
configure with `-DDEBUG_LOCKCONTENTION` added to your CPPFLAGS,
i.e. `CPPFLAGS="-DDEBUG_LOCKCONTENTION"`, then build and run bitcoind.
You can then use the `-debug=lock` configuration option at bitcoind startup or
`bitcoin-cli logging '["lock"]'` at runtime to turn on lock contention logging.
It can be toggled off again with `bitcoin-cli logging [] '["lock"]'`.
### Assertions and Checks
The util file `src/util/check.h` offers helpers to protect against coding and
internal logic bugs. They must never be used to validate user, network or any
other input.
* `assert` or `Assert` should be used to document assumptions when any
violation would mean that it is not safe to continue program execution. The
code is always compiled with assertions enabled.
- For example, a nullptr dereference or any other logic bug in validation
code means the program code is faulty and must terminate immediately.
* `CHECK_NONFATAL` should be used for recoverable internal logic bugs. On
failure, it will throw an exception, which can be caught to recover from the
error.
- For example, a nullptr dereference or any other logic bug in RPC code
means that the RPC code is faulty and cannot be executed. However, the
logic bug can be shown to the user and the program can continue to run.
* `Assume` should be used to document assumptions when program execution can
safely continue even if the assumption is violated. In debug builds it
behaves like `Assert`/`assert` to notify developers and testers about
nonfatal errors. In production it doesn't warn or log anything, though the
expression is always evaluated.
- For example it can be assumed that a variable is only initialized once,
but a failed assumption does not result in a fatal bug. A failed
assumption may or may not result in a slightly degraded user experience,
but it is safe to continue program execution.
### Valgrind suppressions file
Valgrind is a programming tool for memory debugging, memory leak detection, and
profiling. The repo contains a Valgrind suppressions file
([`valgrind.supp`](https://github.com/bitcoin/bitcoin/blob/master/contrib/valgrind.supp))
which includes known Valgrind warnings in our dependencies that cannot be fixed
in-tree. Example use:
```shell
$ valgrind --suppressions=contrib/valgrind.supp src/test/test_bitcoin
$ valgrind --suppressions=contrib/valgrind.supp --leak-check=full \
--show-leak-kinds=all src/test/test_bitcoin --log_level=test_suite
$ valgrind -v --leak-check=full src/bitcoind -printtoconsole
$ ./test/functional/test_runner.py --valgrind
```
### Compiling for test coverage
LCOV can be used to generate a test coverage report based upon `make check`
execution. LCOV must be installed on your system (e.g. the `lcov` package
on Debian/Ubuntu).
To enable LCOV report generation during test runs:
```shell
./configure --enable-lcov
make
make cov
# A coverage report will now be accessible at `./test_bitcoin.coverage/index.html`,
# which covers unit tests, and `./total.coverage/index.html`, which covers
# unit and functional tests.
```
### Performance profiling with perf
Profiling is a good way to get a precise idea of where time is being spent in
code. One tool for doing profiling on Linux platforms is called
[`perf`](https://www.brendangregg.com/perf.html), and has been integrated into
the functional test framework. Perf can observe a running process and sample
(at some frequency) where its execution is.
Perf installation is contingent on which kernel version you're running; see
[this thread](https://askubuntu.com/questions/50145/how-to-install-perf-monitoring-tool)
for specific instructions.
Certain kernel parameters may need to be set for perf to be able to inspect the
running process's stack.
```sh
$ sudo sysctl -w kernel.perf_event_paranoid=-1
$ sudo sysctl -w kernel.kptr_restrict=0
```
Make sure you [understand the security
trade-offs](https://lwn.net/Articles/420403/) of setting these kernel
parameters.
To profile a running bitcoind process for 60 seconds, you could use an
invocation of `perf record` like this:
```sh
$ perf record \
-g --call-graph dwarf --per-thread -F 140 \
-p `pgrep bitcoind` -- sleep 60
```
You could then analyze the results by running:
```sh
perf report --stdio | c++filt | less
```
or using a graphical tool like [Hotspot](https://github.com/KDAB/hotspot).
See the functional test documentation for how to invoke perf within tests.
### Sanitizers
Bitcoin Core can be compiled with various "sanitizers" enabled, which add
instrumentation for issues regarding things like memory safety, thread race
conditions, or undefined behavior. This is controlled with the
`--with-sanitizers` configure flag, which should be a comma separated list of
sanitizers to enable. The sanitizer list should correspond to supported
`-fsanitize=` options in your compiler. These sanitizers have runtime overhead,
so they are most useful when testing changes or producing debugging builds.
Some examples:
```bash
# Enable both the address sanitizer and the undefined behavior sanitizer
./configure --with-sanitizers=address,undefined
# Enable the thread sanitizer
./configure --with-sanitizers=thread
```
If you are compiling with GCC you will typically need to install corresponding
"san" libraries to actually compile with these flags, e.g. libasan for the
address sanitizer, libtsan for the thread sanitizer, and libubsan for the
undefined sanitizer. If you are missing required libraries, the configure script
will fail with a linker error when testing the sanitizer flags.
The test suite should pass cleanly with the `thread` and `undefined` sanitizers. You
may need to use a suppressions file, see `test/sanitizer_suppressions`. They may be
used as follows:
```bash
export LSAN_OPTIONS="suppressions=$(pwd)/test/sanitizer_suppressions/lsan"
export TSAN_OPTIONS="suppressions=$(pwd)/test/sanitizer_suppressions/tsan:halt_on_error=1:second_deadlock_stack=1"
export UBSAN_OPTIONS="suppressions=$(pwd)/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1"
```
See the CI config for more examples, and upstream documentation for more information
about any additional options.
There are a number of known problems when using the `address` sanitizer. The
address sanitizer is known to fail in
[sha256_sse4::Transform](/src/crypto/sha256_sse4.cpp) which makes it unusable
unless you also use `--disable-asm` when running configure. We would like to fix
sanitizer issues, so please send pull requests if you can fix any errors found
by the address sanitizer (or any other sanitizer).
Not all sanitizer options can be enabled at the same time, e.g. trying to build
with `--with-sanitizers=address,thread` will fail in the configure script as
these sanitizers are mutually incompatible. Refer to your compiler manual to
learn more about these options and which sanitizers are supported by your
compiler.
Additional resources:
* [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html)
* [LeakSanitizer](https://clang.llvm.org/docs/LeakSanitizer.html)
* [MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html)
* [ThreadSanitizer](https://clang.llvm.org/docs/ThreadSanitizer.html)
* [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html)
* [GCC Instrumentation Options](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html)
* [Google Sanitizers Wiki](https://github.com/google/sanitizers/wiki)
* [Issue #12691: Enable -fsanitize flags in Travis](https://github.com/bitcoin/bitcoin/issues/12691)
Locking/mutex usage notes
-------------------------
The code is multi-threaded and uses mutexes and the
`LOCK` and `TRY_LOCK` macros to protect data structures.
Deadlocks due to inconsistent lock ordering (thread 1 locks `cs_main` and then
`cs_wallet`, while thread 2 locks them in the opposite order: result, deadlock
as each waits for the other to release its lock) are a problem. Compile with
`-DDEBUG_LOCKORDER` (or use `--enable-debug`) to get lock order inconsistencies
reported in the `debug.log` file.
Re-architecting the core code so there are better-defined interfaces
between the various components is a goal, with any necessary locking
done by the components (e.g. see the self-contained `FillableSigningProvider` class
and its `cs_KeyStore` lock for example).
Threads
-------
- [Main thread (`bitcoind`)](https://doxygen.bitcoincore.org/bitcoind_8cpp.html#a0ddf1224851353fc92bfbff6f499fa97)
: Started from `main()` in `bitcoind.cpp`. Responsible for starting up and
shutting down the application.
- [Init load (`b-initload`)](https://doxygen.bitcoincore.org/namespacenode.html#ab4305679079866f0f420f7dbf278381d)
: Performs various loading tasks that are part of init but shouldn't block the node from being started: external block import,
reindex, reindex-chainstate, main chain activation, spawn indexes background sync threads and mempool load.
- [CCheckQueue::Loop (`b-scriptch.x`)](https://doxygen.bitcoincore.org/class_c_check_queue.html#a6e7fa51d3a25e7cb65446d4b50e6a987)
: Parallel script validation threads for transactions in blocks.
- [ThreadHTTP (`b-http`)](https://doxygen.bitcoincore.org/httpserver_8cpp.html#abb9f6ea8819672bd9a62d3695070709c)
: Libevent thread to listen for RPC and REST connections.
- [HTTP worker threads(`b-httpworker.x`)](https://doxygen.bitcoincore.org/httpserver_8cpp.html#aa6a7bc27265043bc0193220c5ae3a55f)
: Threads to service RPC and REST requests.
- [Indexer threads (`b-txindex`, etc)](https://doxygen.bitcoincore.org/class_base_index.html#a96a7407421fbf877509248bbe64f8d87)
: One thread per indexer.
- [SchedulerThread (`b-scheduler`)](https://doxygen.bitcoincore.org/class_c_scheduler.html#a14d2800815da93577858ea078aed1fba)
: Does asynchronous background tasks like dumping wallet contents, dumping
addrman and running asynchronous validationinterface callbacks.
- [TorControlThread (`b-torcontrol`)](https://doxygen.bitcoincore.org/torcontrol_8cpp.html#a52a3efff23634500bb42c6474f306091)
: Libevent thread for tor connections.
- Net threads:
- [ThreadMessageHandler (`b-msghand`)](https://doxygen.bitcoincore.org/class_c_connman.html#aacdbb7148575a31bb33bc345e2bf22a9)
: Application level message handling (sending and receiving). Almost
all net_processing and validation logic runs on this thread.
- [ThreadDNSAddressSeed (`b-dnsseed`)](https://doxygen.bitcoincore.org/class_c_connman.html#aa7c6970ed98a4a7bafbc071d24897d13)
: Loads addresses of peers from the DNS.
- ThreadMapPort (`b-mapport`)
: Universal plug-and-play startup/shutdown.
- [ThreadSocketHandler (`b-net`)](https://doxygen.bitcoincore.org/class_c_connman.html#a765597cbfe99c083d8fa3d61bb464e34)
: Sends/Receives data from peers on port 8333.
- [ThreadOpenAddedConnections (`b-addcon`)](https://doxygen.bitcoincore.org/class_c_connman.html#a0b787caf95e52a346a2b31a580d60a62)
: Opens network connections to added nodes.
- [ThreadOpenConnections (`b-opencon`)](https://doxygen.bitcoincore.org/class_c_connman.html#a55e9feafc3bab78e5c9d408c207faa45)
: Initiates new connections to peers.
- [ThreadI2PAcceptIncoming (`b-i2paccept`)](https://doxygen.bitcoincore.org/class_c_connman.html#a57787b4f9ac847d24065fbb0dd6e70f8)
: Listens for and accepts incoming I2P connections through the I2P SAM proxy.
Ignoring IDE/editor files
--------------------------
In closed-source environments in which everyone uses the same IDE, it is common
to add temporary files it produces to the project-wide `.gitignore` file.
However, in open source software such as Bitcoin Core, where everyone uses
their own editors/IDE/tools, it is less common. Only you know what files your
editor produces and this may change from version to version. The canonical way
to do this is thus to create your local gitignore. Add this to `~/.gitconfig`:
```
[core]
excludesfile = /home/.../.gitignore_global
```
(alternatively, type the command `git config --global core.excludesfile ~/.gitignore_global`
on a terminal)
Then put your favourite tool's temporary filenames in that file, e.g.
```
# NetBeans
nbproject/
```
Another option is to create a per-repository excludes file `.git/info/exclude`.
These are not committed but apply only to one repository.
If a set of tools is used by the build system or scripts the repository (for
example, lcov) it is perfectly acceptable to add its files to `.gitignore`
and commit them.
Development guidelines
============================
A few non-style-related recommendations for developers, as well as points to
pay attention to for reviewers of Bitcoin Core code.
General Bitcoin Core
----------------------
- New features should be exposed on RPC first, then can be made available in the GUI.
- *Rationale*: RPC allows for better automatic testing. The test suite for
the GUI is very limited.
- Make sure pull requests pass CI before merging.
- *Rationale*: Makes sure that they pass thorough testing, and that the tester will keep passing
on the master branch. Otherwise, all new pull requests will start failing the tests, resulting in
confusion and mayhem.
- *Explanation*: If the test suite is to be updated for a change, this has to
be done first.
Wallet
-------
- Make sure that no crashes happen with run-time option `-disablewallet`.
General C++
-------------
For general C++ guidelines, you may refer to the [C++ Core
Guidelines](https://isocpp.github.io/CppCoreGuidelines/).
Common misconceptions are clarified in those sections:
- Passing (non-)fundamental types in the [C++ Core
Guideline](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#Rf-conventional).
- If you use the `.h`, you must link the `.cpp`.
- *Rationale*: Include files define the interface for the code in implementation files. Including one but
not linking the other is confusing. Please avoid that. Moving functions from
the `.h` to the `.cpp` should not result in build errors.
- Use the RAII (Resource Acquisition Is Initialization) paradigm where possible. For example, by using
`unique_ptr` for allocations in a function.
- *Rationale*: This avoids memory and resource leaks, and ensures exception safety.
C++ data structures
--------------------
- Never use the `std::map []` syntax when reading from a map, but instead use `.find()`.
- *Rationale*: `[]` does an insert (of the default element) if the item doesn't
exist in the map yet. This has resulted in memory leaks in the past, as well as
race conditions (expecting read-read behavior). Using `[]` is fine for *writing* to a map.
- Do not compare an iterator from one data structure with an iterator of
another data structure (even if of the same type).
- *Rationale*: Behavior is undefined. In C++ parlor this means "may reformat
the universe", in practice this has resulted in at least one hard-to-debug crash bug.
- Watch out for out-of-bounds vector access. `&vch[vch.size()]` is illegal,
including `&vch[0]` for an empty vector. Use `vch.data()` and `vch.data() +
vch.size()` instead.
- Vector bounds checking is only enabled in debug mode. Do not rely on it.
- Initialize all non-static class members where they are defined.
If this is skipped for a good reason (i.e., optimization on the critical
path), add an explicit comment about this.
- *Rationale*: Ensure determinism by avoiding accidental use of uninitialized
values. Also, static analyzers balk about this.
Initializing the members in the declaration makes it easy to
spot uninitialized ones.
```cpp
class A
{
uint32_t m_count{0};
}
```
- By default, declare constructors `explicit`.
- *Rationale*: This is a precaution to avoid unintended
[conversions](https://en.cppreference.com/w/cpp/language/converting_constructor).
- Use explicitly signed or unsigned `char`s, or even better `uint8_t` and
`int8_t`. Do not use bare `char` unless it is to pass to a third-party API.
This type can be signed or unsigned depending on the architecture, which can
lead to interoperability problems or dangerous conditions such as
out-of-bounds array accesses.
- Prefer explicit constructions over implicit ones that rely on 'magical' C++ behavior.
- *Rationale*: Easier to understand what is happening, thus easier to spot mistakes, even for those
that are not language lawyers.
- Use `Span` as function argument when it can operate on any range-like container.
- *Rationale*: Compared to `Foo(const vector<int>&)` this avoids the need for a (potentially expensive)
conversion to vector if the caller happens to have the input stored in another type of container.
However, be aware of the pitfalls documented in [span.h](../src/span.h).
```cpp
void Foo(Span<const int> data);
std::vector<int> vec{1,2,3};
Foo(vec);
```
- Prefer `enum class` (scoped enumerations) over `enum` (traditional enumerations) where possible.
- *Rationale*: Scoped enumerations avoid two potential pitfalls/problems with traditional C++ enumerations: implicit conversions to `int`, and name clashes due to enumerators being exported to the surrounding scope.
- `switch` statement on an enumeration example:
```cpp
enum class Tabs {
info,
console,
network_graph,
peers
};
int GetInt(Tabs tab)
{
switch (tab) {
case Tabs::info: return 0;
case Tabs::console: return 1;
case Tabs::network_graph: return 2;
case Tabs::peers: return 3;
} // no default case, so the compiler can warn about missing cases
assert(false);
}
```
*Rationale*: The comment documents skipping `default:` label, and it complies with `clang-format` rules. The assertion prevents firing of `-Wreturn-type` warning on some compilers.
Strings and formatting
------------------------
- Use `std::string`, avoid C string manipulation functions.
- *Rationale*: C++ string handling is marginally safer, less scope for
buffer overflows, and surprises with `\0` characters. Also, some C string manipulations
tend to act differently depending on platform, or even the user locale.
- Use `ToIntegral` from [`strencodings.h`](/src/util/strencodings.h) for number parsing. In legacy code you might also find `ParseInt*` family of functions, `ParseDouble` or `LocaleIndependentAtoi`.
- *Rationale*: These functions do overflow checking and avoid pesky locale issues.
- Avoid using locale dependent functions if possible. You can use the provided
[`lint-locale-dependence.py`](/test/lint/lint-locale-dependence.py)
to check for accidental use of locale dependent functions.
- *Rationale*: Unnecessary locale dependence can cause bugs that are very tricky to isolate and fix.
- These functions are known to be locale dependent:
`alphasort`, `asctime`, `asprintf`, `atof`, `atoi`, `atol`, `atoll`, `atoq`,
`btowc`, `ctime`, `dprintf`, `fgetwc`, `fgetws`, `fprintf`, `fputwc`,
`fputws`, `fscanf`, `fwprintf`, `getdate`, `getwc`, `getwchar`, `isalnum`,
`isalpha`, `isblank`, `iscntrl`, `isdigit`, `isgraph`, `islower`, `isprint`,
`ispunct`, `isspace`, `isupper`, `iswalnum`, `iswalpha`, `iswblank`,
`iswcntrl`, `iswctype`, `iswdigit`, `iswgraph`, `iswlower`, `iswprint`,
`iswpunct`, `iswspace`, `iswupper`, `iswxdigit`, `isxdigit`, `mblen`,
`mbrlen`, `mbrtowc`, `mbsinit`, `mbsnrtowcs`, `mbsrtowcs`, `mbstowcs`,
`mbtowc`, `mktime`, `putwc`, `putwchar`, `scanf`, `snprintf`, `sprintf`,
`sscanf`, `stoi`, `stol`, `stoll`, `strcasecmp`, `strcasestr`, `strcoll`,
`strfmon`, `strftime`, `strncasecmp`, `strptime`, `strtod`, `strtof`,
`strtoimax`, `strtol`, `strtold`, `strtoll`, `strtoq`, `strtoul`,
`strtoull`, `strtoumax`, `strtouq`, `strxfrm`, `swprintf`, `tolower`,
`toupper`, `towctrans`, `towlower`, `towupper`, `ungetwc`, `vasprintf`,
`vdprintf`, `versionsort`, `vfprintf`, `vfscanf`, `vfwprintf`, `vprintf`,
`vscanf`, `vsnprintf`, `vsprintf`, `vsscanf`, `vswprintf`, `vwprintf`,
`wcrtomb`, `wcscasecmp`, `wcscoll`, `wcsftime`, `wcsncasecmp`, `wcsnrtombs`,
`wcsrtombs`, `wcstod`, `wcstof`, `wcstoimax`, `wcstol`, `wcstold`,
`wcstoll`, `wcstombs`, `wcstoul`, `wcstoull`, `wcstoumax`, `wcswidth`,
`wcsxfrm`, `wctob`, `wctomb`, `wctrans`, `wctype`, `wcwidth`, `wprintf`
- For `strprintf`, `LogPrint`, `LogPrintf` formatting characters don't need size specifiers.
- *Rationale*: Bitcoin Core uses tinyformat, which is type safe. Leave them out to avoid confusion.
- Use `.c_str()` sparingly. Its only valid use is to pass C++ strings to C functions that take NULL-terminated
strings.
- Do not use it when passing a sized array (so along with `.size()`). Use `.data()` instead to get a pointer
to the raw data.
- *Rationale*: Although this is guaranteed to be safe starting with C++11, `.data()` communicates the intent better.
- Do not use it when passing strings to `tfm::format`, `strprintf`, `LogPrint[f]`.
- *Rationale*: This is redundant. Tinyformat handles strings.
- Do not use it to convert to `QString`. Use `QString::fromStdString()`.
- *Rationale*: Qt has built-in functionality for converting their string
type from/to C++. No need to roll your own.
- In cases where do you call `.c_str()`, you might want to additionally check that the string does not contain embedded '\0' characters, because
it will (necessarily) truncate the string. This might be used to hide parts of the string from logging or to circumvent
checks. If a use of strings is sensitive to this, take care to check the string for embedded NULL characters first
and reject it if there are any (see `ParsePrechecks` in `strencodings.cpp` for an example).
Shadowing
--------------
Although the shadowing warning (`-Wshadow`) is not enabled by default (it prevents issues arising
from using a different variable with the same name),
please name variables so that their names do not shadow variables defined in the source code.
When using nested cycles, do not name the inner cycle variable the same as in
the outer cycle, etc.
Lifetimebound
--------------
The [Clang `lifetimebound`
attribute](https://clang.llvm.org/docs/AttributeReference.html#lifetimebound)
can be used to tell the compiler that a lifetime is bound to an object and
potentially see a compile-time warning if the object has a shorter lifetime from
the invalid use of a temporary. You can use the attribute by adding a `LIFETIMEBOUND`
annotation defined in `src/attributes.h`; please grep the codebase for examples.
Threads and synchronization
----------------------------
- Prefer `Mutex` type to `RecursiveMutex` one.
- Consistently use [Clang Thread Safety Analysis](https://clang.llvm.org/docs/ThreadSafetyAnalysis.html) annotations to
get compile-time warnings about potential race conditions or deadlocks in code.
- In functions that are declared separately from where they are defined, the
thread safety annotations should be added exclusively to the function
declaration. Annotations on the definition could lead to false positives
(lack of compile failure) at call sites between the two.
- Prefer locks that are in a class rather than global, and that are
internal to a class (private or protected) rather than public.
- Combine annotations in function declarations with run-time asserts in
function definitions (`AssertLockNotHeld()` can be omitted if `LOCK()` is
called unconditionally after it because `LOCK()` does the same check as
`AssertLockNotHeld()` internally, for non-recursive mutexes):
```C++
// txmempool.h
class CTxMemPool
{
public:
...
mutable RecursiveMutex cs;
...
void UpdateTransactionsFromBlock(...) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, cs);
...
}
// txmempool.cpp
void CTxMemPool::UpdateTransactionsFromBlock(...)
{
AssertLockHeld(::cs_main);
AssertLockHeld(cs);
...
}
```
```C++
// validation.h
class Chainstate
{
protected:
...
Mutex m_chainstate_mutex;
...
public:
...
bool ActivateBestChain(
BlockValidationState& state,
std::shared_ptr<const CBlock> pblock = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex)
LOCKS_EXCLUDED(::cs_main);
...
bool PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex)
LOCKS_EXCLUDED(::cs_main);
...
}
// validation.cpp
bool Chainstate::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
{
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
{
LOCK(cs_main);
...
}
return ActivateBestChain(state, std::shared_ptr<const CBlock>());
}
```
- Build and run tests with `-DDEBUG_LOCKORDER` to verify that no potential
deadlocks are introduced. As of 0.12, this is defined by default when
configuring with `--enable-debug`.
- When using `LOCK`/`TRY_LOCK` be aware that the lock exists in the context of
the current scope, so surround the statement and the code that needs the lock
with braces.
OK:
```c++
{
TRY_LOCK(cs_vNodes, lockNodes);
...
}
```
Wrong:
```c++
TRY_LOCK(cs_vNodes, lockNodes);
{
...
}
```
Scripts
--------------------------
Write scripts in Python rather than bash, when possible.
### Shebang
- Use `#!/usr/bin/env bash` instead of obsolete `#!/bin/bash`.
- [*Rationale*](https://github.com/dylanaraps/pure-bash-bible#shebang):
`#!/bin/bash` assumes it is always installed to /bin/ which can cause issues;
`#!/usr/bin/env bash` searches the user's PATH to find the bash binary.
OK:
```bash
#!/usr/bin/env bash
```
Wrong:
```bash
#!/bin/bash
```
Source code organization
--------------------------
- Implementation code should go into the `.cpp` file and not the `.h`, unless necessary due to template usage or
when performance due to inlining is critical.
- *Rationale*: Shorter and simpler header files are easier to read and reduce compile time.
- Use only the lowercase alphanumerics (`a-z0-9`), underscore (`_`) and hyphen (`-`) in source code filenames.
- *Rationale*: `grep`:ing and auto-completing filenames is easier when using a consistent
naming pattern. Potential problems when building on case-insensitive filesystems are
avoided when using only lowercase characters in source code filenames.
- Every `.cpp` and `.h` file should `#include` every header file it directly uses classes, functions or other
definitions from, even if those headers are already included indirectly through other headers.
- *Rationale*: Excluding headers because they are already indirectly included results in compilation
failures when those indirect dependencies change. Furthermore, it obscures what the real code
dependencies are.
- Don't import anything into the global namespace (`using namespace ...`). Use
fully specified types such as `std::string`.
- *Rationale*: Avoids symbol conflicts.
- Terminate namespaces with a comment (`// namespace mynamespace`). The comment
should be placed on the same line as the brace closing the namespace, e.g.
```c++
namespace mynamespace {
...
} // namespace mynamespace
namespace {
...
} // namespace
```
- *Rationale*: Avoids confusion about the namespace context.
- Use `#include <primitives/transaction.h>` bracket syntax instead of
`#include "primitives/transactions.h"` quote syntax.
- *Rationale*: Bracket syntax is less ambiguous because the preprocessor
searches a fixed list of include directories without taking location of the
source file into account. This allows quoted includes to stand out more when
the location of the source file actually is relevant.
- Use include guards to avoid the problem of double inclusion. The header file
`foo/bar.h` should use the include guard identifier `BITCOIN_FOO_BAR_H`, e.g.
```c++
#ifndef BITCOIN_FOO_BAR_H
#define BITCOIN_FOO_BAR_H
...
#endif // BITCOIN_FOO_BAR_H
```
GUI
-----
- Do not display or manipulate dialogs in model code (classes `*Model`).
- *Rationale*: Model classes pass through events and data from the core, they
should not interact with the user. That's where View classes come in. The converse also
holds: try to not directly access core data structures from Views.
- Avoid adding slow or blocking code in the GUI thread. In particular, do not
add new `interfaces::Node` and `interfaces::Wallet` method calls, even if they
may be fast now, in case they are changed to lock or communicate across
processes in the future.
Prefer to offload work from the GUI thread to worker threads (see
`RPCExecutor` in console code as an example) or take other steps (see
https://doc.qt.io/archives/qq/qq27-responsive-guis.html) to keep the GUI
responsive.
- *Rationale*: Blocking the GUI thread can increase latency, and lead to
hangs and deadlocks.
Subtrees
----------
Several parts of the repository are subtrees of software maintained elsewhere.
Some of these are maintained by active developers of Bitcoin Core, in which case
changes should go directly upstream without being PRed directly against the project.
They will be merged back in the next subtree merge.
Others are external projects without a tight relationship with our project. Changes
to these should also be sent upstream, but bugfixes may also be prudent to PR against
a Bitcoin Core subtree, so that they can be integrated quickly. Cosmetic changes
should be taken upstream.
There is a tool in `test/lint/git-subtree-check.sh` ([instructions](../test/lint#git-subtree-checksh))
to check a subtree directory for consistency with its upstream repository.
Current subtrees include:
- src/leveldb
- Subtree at https://github.com/bitcoin-core/leveldb-subtree ; maintained by Core contributors.
- Upstream at https://github.com/google/leveldb ; maintained by Google. Open
important PRs to the subtree to avoid delay.
- **Note**: Follow the instructions in [Upgrading LevelDB](#upgrading-leveldb) when
merging upstream changes to the LevelDB subtree.
- src/crc32c
- Used by leveldb for hardware acceleration of CRC32C checksums for data integrity.
- Subtree at https://github.com/bitcoin-core/crc32c-subtree ; maintained by Core contributors.
- Upstream at https://github.com/google/crc32c ; maintained by Google.
- src/secp256k1
- Upstream at https://github.com/bitcoin-core/secp256k1/ ; maintained by Core contributors.
- src/crypto/ctaes
- Upstream at https://github.com/bitcoin-core/ctaes ; maintained by Core contributors.
- src/minisketch
- Upstream at https://github.com/sipa/minisketch ; maintained by Core contributors.
Upgrading LevelDB
---------------------
Extra care must be taken when upgrading LevelDB. This section explains issues
you must be aware of.
### File Descriptor Counts
In most configurations, we use the default LevelDB value for `max_open_files`,
which is 1000 at the time of this writing. If LevelDB actually uses this many
file descriptors, it will cause problems with Bitcoin's `select()` loop, because
it may cause new sockets to be created where the fd value is >= 1024. For this
reason, on 64-bit Unix systems, we rely on an internal LevelDB optimization that
uses `mmap()` + `close()` to open table files without actually retaining
references to the table file descriptors. If you are upgrading LevelDB, you must
sanity check the changes to make sure that this assumption remains valid.
In addition to reviewing the upstream changes in `env_posix.cc`, you can use `lsof` to
check this. For example, on Linux this command will show open `.ldb` file counts:
```bash
$ lsof -p $(pidof bitcoind) |\
awk 'BEGIN { fd=0; mem=0; } /ldb$/ { if ($4 == "mem") mem++; else fd++ } END { printf "mem = %s, fd = %s\n", mem, fd}'
mem = 119, fd = 0
```
The `mem` value shows how many files are mmap'ed, and the `fd` value shows you
many file descriptors these files are using. You should check that `fd` is a
small number (usually 0 on 64-bit hosts).
See the notes in the `SetMaxOpenFiles()` function in `dbwrapper.cc` for more
details.
### Consensus Compatibility
It is possible for LevelDB changes to inadvertently change consensus
compatibility between nodes. This happened in Bitcoin 0.8 (when LevelDB was
first introduced). When upgrading LevelDB, you should review the upstream changes
to check for issues affecting consensus compatibility.
For example, if LevelDB had a bug that accidentally prevented a key from being
returned in an edge case, and that bug was fixed upstream, the bug "fix" would
be an incompatible consensus change. In this situation, the correct behavior
would be to revert the upstream fix before applying the updates to Bitcoin's
copy of LevelDB. In general, you should be wary of any upstream changes affecting
what data is returned from LevelDB queries.
Scripted diffs
--------------
For reformatting and refactoring commits where the changes can be easily automated using a bash script, we use
scripted-diff commits. The bash script is included in the commit message and our CI job checks that
the result of the script is identical to the commit. This aids reviewers since they can verify that the script
does exactly what it is supposed to do. It is also helpful for rebasing (since the same script can just be re-run
on the new master commit).
To create a scripted-diff:
- start the commit message with `scripted-diff:` (and then a description of the diff on the same line)
- in the commit message include the bash script between lines containing just the following text:
- `-BEGIN VERIFY SCRIPT-`
- `-END VERIFY SCRIPT-`
The scripted-diff is verified by the tool `test/lint/commit-script-check.sh`. The tool's default behavior, when supplied
with a commit is to verify all scripted-diffs from the beginning of time up to said commit. Internally, the tool passes
the first supplied argument to `git rev-list --reverse` to determine which commits to verify script-diffs for, ignoring
commits that don't conform to the commit message format described above.
For development, it might be more convenient to verify all scripted-diffs in a range `A..B`, for example:
```bash
test/lint/commit-script-check.sh origin/master..HEAD
```
### Suggestions and examples
If you need to replace in multiple files, prefer `git ls-files` to `find` or globbing, and `git grep` to `grep`, to
avoid changing files that are not under version control.
For efficient replacement scripts, reduce the selection to the files that potentially need to be modified, so for
example, instead of a blanket `git ls-files src | xargs sed -i s/apple/orange/`, use
`git grep -l apple src | xargs sed -i s/apple/orange/`.
Also, it is good to keep the selection of files as specific as possible — for example, replace only in directories where
you expect replacements — because it reduces the risk that a rebase of your commit by re-running the script will
introduce accidental changes.
Some good examples of scripted-diff:
- [scripted-diff: Rename InitInterfaces to NodeContext](https://github.com/bitcoin/bitcoin/commit/301bd41a2e6765b185bd55f4c541f9e27aeea29d)
uses an elegant script to replace occurrences of multiple terms in all source files.
- [scripted-diff: Remove g_connman, g_banman globals](https://github.com/bitcoin/bitcoin/commit/8922d7f6b751a3e6b3b9f6fb7961c442877fb65a)
replaces specific terms in a list of specific source files.
- [scripted-diff: Replace fprintf with tfm::format](https://github.com/bitcoin/bitcoin/commit/fac03ec43a15ad547161e37e53ea82482cc508f9)
does a global replacement but excludes certain directories.
To find all previous uses of scripted diffs in the repository, do:
```
git log --grep="-BEGIN VERIFY SCRIPT-"
```
Release notes
-------------
Release notes should be written for any PR that:
- introduces a notable new feature
- fixes a significant bug
- changes an API or configuration model
- makes any other visible change to the end-user experience.
Release notes should be added to a PR-specific release note file at
`/doc/release-notes-<PR number>.md` to avoid conflicts between multiple PRs.
All `release-notes*` files are merged into a single
[/doc/release-notes.md](/doc/release-notes.md) file prior to the release.
RPC interface guidelines
--------------------------
A few guidelines for introducing and reviewing new RPC interfaces:
- Method naming: use consecutive lower-case names such as `getrawtransaction` and `submitblock`.
- *Rationale*: Consistency with the existing interface.
- Argument and field naming: please consider whether there is already a naming
style or spelling convention in the API for the type of object in question
(`blockhash`, for example), and if so, try to use that. If not, use snake case
`fee_delta` (and not, e.g. `feedelta` or camel case `feeDelta`).
- *Rationale*: Consistency with the existing interface.
- Use the JSON parser for parsing, don't manually parse integers or strings from
arguments unless absolutely necessary.
- *Rationale*: Introduces hand-rolled string manipulation code at both the caller and callee sites,
which is error-prone, and it is easy to get things such as escaping wrong.
JSON already supports nested data structures, no need to re-invent the wheel.
- *Exception*: AmountFromValue can parse amounts as string. This was introduced because many JSON
parsers and formatters hard-code handling decimal numbers as floating-point
values, resulting in potential loss of precision. This is unacceptable for
monetary values. **Always** use `AmountFromValue` and `ValueFromAmount` when
inputting or outputting monetary values. The only exceptions to this are
`prioritisetransaction` and `getblocktemplate` because their interface
is specified as-is in BIP22.
- Missing arguments and 'null' should be treated the same: as default values. If there is no
default value, both cases should fail in the same way. The easiest way to follow this
guideline is to detect unspecified arguments with `params[x].isNull()` instead of
`params.size() <= x`. The former returns true if the argument is either null or missing,
while the latter returns true if is missing, and false if it is null.
- *Rationale*: Avoids surprises when switching to name-based arguments. Missing name-based arguments
are passed as 'null'.
- Try not to overload methods on argument type. E.g. don't make `getblock(true)` and `getblock("hash")`
do different things.
- *Rationale*: This is impossible to use with `bitcoin-cli`, and can be surprising to users.
- *Exception*: Some RPC calls can take both an `int` and `bool`, most notably when a bool was switched
to a multi-value, or due to other historical reasons. **Always** have false map to 0 and
true to 1 in this case.
- Don't forget to fill in the argument names correctly in the RPC command table.
- *Rationale*: If not, the call cannot be used with name-based arguments.
- Add every non-string RPC argument `(method, idx, name)` to the table `vRPCConvertParams` in `rpc/client.cpp`.
- *Rationale*: `bitcoin-cli` and the GUI debug console use this table to determine how to
convert a plaintext command line to JSON. If the types don't match, the method can be unusable
from there.
- A RPC method must either be a wallet method or a non-wallet method. Do not
introduce new methods that differ in behavior based on the presence of a wallet.
- *Rationale*: As well as complicating the implementation and interfering
with the introduction of multi-wallet, wallet and non-wallet code should be
separated to avoid introducing circular dependencies between code units.
- Try to make the RPC response a JSON object.
- *Rationale*: If a RPC response is not a JSON object, then it is harder to avoid API breakage if
new data in the response is needed.
- Wallet RPCs call BlockUntilSyncedToCurrentChain to maintain consistency with
`getblockchaininfo`'s state immediately prior to the call's execution. Wallet
RPCs whose behavior does *not* depend on the current chainstate may omit this
call.
- *Rationale*: In previous versions of Bitcoin Core, the wallet was always
in-sync with the chainstate (by virtue of them all being updated in the
same cs_main lock). In order to maintain the behavior that wallet RPCs
return results as of at least the highest best-known block an RPC
client may be aware of prior to entering a wallet RPC call, we must block
until the wallet is caught up to the chainstate as of the RPC call's entry.
This also makes the API much easier for RPC clients to reason about.
- Be aware of RPC method aliases and generally avoid registering the same
callback function pointer for different RPCs.
- *Rationale*: RPC methods registered with the same function pointer will be
considered aliases and only the first method name will show up in the
`help` RPC command list.
- *Exception*: Using RPC method aliases may be appropriate in cases where a
new RPC is replacing a deprecated RPC, to avoid both RPCs confusingly
showing up in the command list.
- Use *invalid* bech32 addresses (e.g. in the constant array `EXAMPLE_ADDRESS`) for
`RPCExamples` help documentation.
- *Rationale*: Prevent accidental transactions by users and encourage the use
of bech32 addresses by default.
- Use the `UNIX_EPOCH_TIME` constant when describing UNIX epoch time or
timestamps in the documentation.
- *Rationale*: User-facing consistency.
- Use `fs::path::u8string()`/`fs::path::utf8string()` and `fs::u8path()` functions when converting path
to JSON strings, not `fs::PathToString` and `fs::PathFromString`
- *Rationale*: JSON strings are Unicode strings, not byte strings, and
RFC8259 requires JSON to be encoded as UTF-8.
Internal interface guidelines
-----------------------------
Internal interfaces between parts of the codebase that are meant to be
independent (node, wallet, GUI), are defined in
[`src/interfaces/`](../src/interfaces/). The main interface classes defined
there are [`interfaces::Chain`](../src/interfaces/chain.h), used by wallet to
access the node's latest chain state,
[`interfaces::Node`](../src/interfaces/node.h), used by the GUI to control the
node, and [`interfaces::Wallet`](../src/interfaces/wallet.h), used by the GUI
to control an individual wallet. There are also more specialized interface
types like [`interfaces::Handler`](../src/interfaces/handler.h)
[`interfaces::ChainClient`](../src/interfaces/chain.h) passed to and from
various interface methods.
Interface classes are written in a particular style so node, wallet, and GUI
code doesn't need to run in the same process, and so the class declarations
work more easily with tools and libraries supporting interprocess
communication:
- Interface classes should be abstract and have methods that are [pure
virtual](https://en.cppreference.com/w/cpp/language/abstract_class). This
allows multiple implementations to inherit from the same interface class,
particularly so one implementation can execute functionality in the local
process, and other implementations can forward calls to remote processes.
- Interface method definitions should wrap existing functionality instead of
implementing new functionality. Any substantial new node or wallet
functionality should be implemented in [`src/node/`](../src/node/) or
[`src/wallet/`](../src/wallet/) and just exposed in
[`src/interfaces/`](../src/interfaces/) instead of being implemented there,
so it can be more modular and accessible to unit tests.
- Interface method parameter and return types should either be serializable or
be other interface classes. Interface methods shouldn't pass references to
objects that can't be serialized or accessed from another process.
Examples:
```c++
// Good: takes string argument and returns interface class pointer
virtual unique_ptr<interfaces::Wallet> loadWallet(std::string filename) = 0;
// Bad: returns CWallet reference that can't be used from another process
virtual CWallet& loadWallet(std::string filename) = 0;
```
```c++
// Good: accepts and returns primitive types
virtual bool findBlock(const uint256& hash, int& out_height, int64_t& out_time) = 0;
// Bad: returns pointer to internal node in a linked list inaccessible to
// other processes
virtual const CBlockIndex* findBlock(const uint256& hash) = 0;
```
```c++
// Good: takes plain callback type and returns interface pointer
using TipChangedFn = std::function<void(int block_height, int64_t block_time)>;
virtual std::unique_ptr<interfaces::Handler> handleTipChanged(TipChangedFn fn) = 0;
// Bad: returns boost connection specific to local process
using TipChangedFn = std::function<void(int block_height, int64_t block_time)>;
virtual boost::signals2::scoped_connection connectTipChanged(TipChangedFn fn) = 0;
```
- Interface methods should not be overloaded.
*Rationale*: consistency and friendliness to code generation tools.
Example:
```c++
// Good: method names are unique
virtual bool disconnectByAddress(const CNetAddr& net_addr) = 0;
virtual bool disconnectById(NodeId id) = 0;
// Bad: methods are overloaded by type
virtual bool disconnect(const CNetAddr& net_addr) = 0;
virtual bool disconnect(NodeId id) = 0;
```
### Internal interface naming style
- Interface method names should be `lowerCamelCase` and standalone function names should be
`UpperCamelCase`.
*Rationale*: consistency and friendliness to code generation tools.
Examples:
```c++
// Good: lowerCamelCase method name
virtual void blockConnected(const CBlock& block, int height) = 0;
// Bad: uppercase class method
virtual void BlockConnected(const CBlock& block, int height) = 0;
```
```c++
// Good: UpperCamelCase standalone function name
std::unique_ptr<Node> MakeNode(LocalInit& init);
// Bad: lowercase standalone function
std::unique_ptr<Node> makeNode(LocalInit& init);
```
Note: This last convention isn't generally followed outside of
[`src/interfaces/`](../src/interfaces/), though it did come up for discussion
before in [#14635](https://github.com/bitcoin/bitcoin/pull/14635).
| 0 |
bitcoin | bitcoin/doc/release-process.md | Release Process
====================
## Branch updates
### Before every release candidate
* Update translations see [translation_process.md](https://github.com/bitcoin/bitcoin/blob/master/doc/translation_process.md#synchronising-translations).
* Update release candidate version in `configure.ac` (`CLIENT_VERSION_RC`).
* Update manpages (after rebuilding the binaries), see [gen-manpages.py](https://github.com/bitcoin/bitcoin/blob/master/contrib/devtools/README.md#gen-manpagespy).
* Update bitcoin.conf and commit, see [gen-bitcoin-conf.sh](https://github.com/bitcoin/bitcoin/blob/master/contrib/devtools/README.md#gen-bitcoin-confsh).
### Before every major and minor release
* Update [bips.md](bips.md) to account for changes since the last release.
* Update version in `configure.ac` (don't forget to set `CLIENT_VERSION_RC` to `0`).
* Update manpages (see previous section)
* Write release notes (see "Write the release notes" below).
### Before every major release
* On both the master branch and the new release branch:
- update `CLIENT_VERSION_MAJOR` in [`configure.ac`](../configure.ac)
* On the new release branch in [`configure.ac`](../configure.ac)(see [this commit](https://github.com/bitcoin/bitcoin/commit/742f7dd)):
- set `CLIENT_VERSION_MINOR` to `0`
- set `CLIENT_VERSION_BUILD` to `0`
- set `CLIENT_VERSION_IS_RELEASE` to `true`
#### Before branch-off
* Update hardcoded [seeds](/contrib/seeds/README.md), see [this pull request](https://github.com/bitcoin/bitcoin/pull/27488) for an example.
* Update the following variables in [`src/kernel/chainparams.cpp`](/src/kernel/chainparams.cpp) for mainnet, testnet, and signet:
- `m_assumed_blockchain_size` and `m_assumed_chain_state_size` with the current size plus some overhead (see
[this](#how-to-calculate-assumed-blockchain-and-chain-state-size) for information on how to calculate them).
- The following updates should be reviewed with `reindex-chainstate` and `assumevalid=0` to catch any defect
that causes rejection of blocks in the past history.
- `chainTxData` with statistics about the transaction count and rate. Use the output of the `getchaintxstats` RPC with an
`nBlocks` of 4096 (28 days) and a `bestblockhash` of RPC `getbestblockhash`; see
[this pull request](https://github.com/bitcoin/bitcoin/pull/28591) for an example. Reviewers can verify the results by running
`getchaintxstats <window_block_count> <window_final_block_hash>` with the `window_block_count` and `window_final_block_hash` from your output.
- `defaultAssumeValid` with the output of RPC `getblockhash` using the `height` of `window_final_block_height` above
(and update the block height comment with that height), taking into account the following:
- On mainnet, the selected value must not be orphaned, so it may be useful to set the height two blocks back from the tip.
- Testnet should be set with a height some tens of thousands back from the tip, due to reorgs there.
- `nMinimumChainWork` with the "chainwork" value of RPC `getblockheader` using the same height as that selected for the previous step.
* Consider updating the headers synchronization tuning parameters to account for the chainparams updates.
The optimal values change very slowly, so this isn't strictly necessary every release, but doing so doesn't hurt.
- Update configuration variables in [`contrib/devtools/headerssync-params.py`](/contrib/devtools/headerssync-params.py):
- Set `TIME` to the software's expected supported lifetime -- after this time, its ability to defend against a high bandwidth timewarp attacker will begin to degrade.
- Set `MINCHAINWORK_HEADERS` to the height used for the `nMinimumChainWork` calculation above.
- Check that the other variables still look reasonable.
- Run the script. It works fine in CPython, but PyPy is much faster (seconds instead of minutes): `pypy3 contrib/devtools/headerssync-params.py`.
- Paste the output defining `HEADER_COMMITMENT_PERIOD` and `REDOWNLOAD_BUFFER_SIZE` into the top of [`src/headerssync.cpp`](/src/headerssync.cpp).
- Clear the release notes and move them to the wiki (see "Write the release notes" below).
- Translations on Transifex:
- Pull translations from Transifex into the master branch.
- Create [a new resource](https://www.transifex.com/bitcoin/bitcoin/content/) named after the major version with the slug `qt-translation-<RRR>x`, where `RRR` is the major branch number padded with zeros. Use `src/qt/locale/bitcoin_en.xlf` to create it.
- In the project workflow settings, ensure that [Translation Memory Fill-up](https://help.transifex.com/en/articles/6224817-setting-up-translation-memory-fill-up) is enabled and that [Translation Memory Context Matching](https://help.transifex.com/en/articles/6224753-translation-memory-with-context) is disabled.
- Update the Transifex slug in [`.tx/config`](/.tx/config) to the slug of the resource created in the first step. This identifies which resource the translations will be synchronized from.
- Make an announcement that translators can start translating for the new version. You can use one of the [previous announcements](https://www.transifex.com/bitcoin/communication/) as a template.
- Change the auto-update URL for the resource to `master`, e.g. `https://raw.githubusercontent.com/bitcoin/bitcoin/master/src/qt/locale/bitcoin_en.xlf`. (Do this only after the previous steps, to prevent an auto-update from interfering.)
#### After branch-off (on the major release branch)
- Update the versions.
- Create the draft, named "*version* Release Notes Draft", as a [collaborative wiki](https://github.com/bitcoin-core/bitcoin-devwiki/wiki/_new).
- Clear the release notes: `cp doc/release-notes-empty-template.md doc/release-notes.md`
- Create a pinned meta-issue for testing the release candidate (see [this issue](https://github.com/bitcoin/bitcoin/issues/27621) for an example) and provide a link to it in the release announcements where useful.
- Translations on Transifex
- Change the auto-update URL for the new major version's resource away from `master` and to the branch, e.g. `https://raw.githubusercontent.com/bitcoin/bitcoin/<branch>/src/qt/locale/bitcoin_en.xlf`. Do not forget this or it will keep tracking the translations on master instead, drifting away from the specific major release.
- Prune inputs from the qa-assets repo (See [pruning
inputs](https://github.com/bitcoin-core/qa-assets#pruning-inputs)).
#### Before final release
- Merge the release notes from [the wiki](https://github.com/bitcoin-core/bitcoin-devwiki/wiki/) into the branch.
- Ensure the "Needs release note" label is removed from all relevant pull
requests and issues:
https://github.com/bitcoin/bitcoin/issues?q=label%3A%22Needs+release+note%22
#### Tagging a release (candidate)
To tag the version (or release candidate) in git, use the `make-tag.py` script from [bitcoin-maintainer-tools](https://github.com/bitcoin-core/bitcoin-maintainer-tools). From the root of the repository run:
../bitcoin-maintainer-tools/make-tag.py v(new version, e.g. 25.0)
This will perform a few last-minute consistency checks in the build system files, and if they pass, create a signed tag.
## Building
### First time / New builders
Install Guix using one of the installation methods detailed in
[contrib/guix/INSTALL.md](/contrib/guix/INSTALL.md).
Check out the source code in the following directory hierarchy.
cd /path/to/your/toplevel/build
git clone https://github.com/bitcoin-core/guix.sigs.git
git clone https://github.com/bitcoin-core/bitcoin-detached-sigs.git
git clone https://github.com/bitcoin/bitcoin.git
### Write the release notes
Open a draft of the release notes for collaborative editing at https://github.com/bitcoin-core/bitcoin-devwiki/wiki.
For the period during which the notes are being edited on the wiki, the version on the branch should be wiped and replaced with a link to the wiki which should be used for all announcements until `-final`.
Generate list of authors:
git log --format='- %aN' v(current version, e.g. 25.0)..v(new version, e.g. 25.1) | grep -v 'merge-script' | sort -fiu
### Setup and perform Guix builds
Checkout the Bitcoin Core version you'd like to build:
```sh
pushd ./bitcoin
SIGNER='(your builder key, ie bluematt, sipa, etc)'
VERSION='(new version without v-prefix, e.g. 25.0)'
git fetch origin "v${VERSION}"
git checkout "v${VERSION}"
popd
```
Ensure your guix.sigs are up-to-date if you wish to `guix-verify` your builds
against other `guix-attest` signatures.
```sh
git -C ./guix.sigs pull
```
### Create the macOS SDK tarball (first time, or when SDK version changes)
Create the macOS SDK tarball, see the [macdeploy
instructions](/contrib/macdeploy/README.md#deterministic-macos-app-notes) for
details.
### Build and attest to build outputs
Follow the relevant Guix README.md sections:
- [Building](/contrib/guix/README.md#building)
- [Attesting to build outputs](/contrib/guix/README.md#attesting-to-build-outputs)
### Verify other builders' signatures to your own (optional)
- [Verifying build output attestations](/contrib/guix/README.md#verifying-build-output-attestations)
### Commit your non codesigned signature to guix.sigs
```sh
pushd ./guix.sigs
git add "${VERSION}/${SIGNER}"/noncodesigned.SHA256SUMS{,.asc}
git commit -m "Add attestations by ${SIGNER} for ${VERSION} non-codesigned"
popd
```
Then open a Pull Request to the [guix.sigs repository](https://github.com/bitcoin-core/guix.sigs).
## Codesigning
### macOS codesigner only: Create detached macOS signatures (assuming [signapple](https://github.com/achow101/signapple/) is installed and up to date with master branch)
tar xf bitcoin-osx-unsigned.tar.gz
./detached-sig-create.sh /path/to/codesign.p12
Enter the keychain password and authorize the signature
signature-osx.tar.gz will be created
### Windows codesigner only: Create detached Windows signatures
tar xf bitcoin-win-unsigned.tar.gz
./detached-sig-create.sh -key /path/to/codesign.key
Enter the passphrase for the key when prompted
signature-win.tar.gz will be created
### Windows and macOS codesigners only: test code signatures
It is advised to test that the code signature attaches properly prior to tagging by performing the `guix-codesign` step.
However if this is done, once the release has been tagged in the bitcoin-detached-sigs repo, the `guix-codesign` step must be performed again in order for the guix attestation to be valid when compared against the attestations of non-codesigner builds.
### Windows and macOS codesigners only: Commit the detached codesign payloads
```sh
pushd ./bitcoin-detached-sigs
# checkout the appropriate branch for this release series
rm -rf ./*
tar xf signature-osx.tar.gz
tar xf signature-win.tar.gz
git add -A
git commit -m "point to ${VERSION}"
git tag -s "v${VERSION}" HEAD
git push the current branch and new tag
popd
```
### Non-codesigners: wait for Windows and macOS detached signatures
- Once the Windows and macOS builds each have 3 matching signatures, they will be signed with their respective release keys.
- Detached signatures will then be committed to the [bitcoin-detached-sigs](https://github.com/bitcoin-core/bitcoin-detached-sigs) repository, which can be combined with the unsigned apps to create signed binaries.
### Create the codesigned build outputs
- [Codesigning build outputs](/contrib/guix/README.md#codesigning-build-outputs)
### Verify other builders' signatures to your own (optional)
- [Verifying build output attestations](/contrib/guix/README.md#verifying-build-output-attestations)
### Commit your codesigned signature to guix.sigs (for the signed macOS/Windows binaries)
```sh
pushd ./guix.sigs
git add "${VERSION}/${SIGNER}"/all.SHA256SUMS{,.asc}
git commit -m "Add attestations by ${SIGNER} for ${VERSION} codesigned"
popd
```
Then open a Pull Request to the [guix.sigs repository](https://github.com/bitcoin-core/guix.sigs).
## After 3 or more people have guix-built and their results match
Combine the `all.SHA256SUMS.asc` file from all signers into `SHA256SUMS.asc`:
```bash
cat "$VERSION"/*/all.SHA256SUMS.asc > SHA256SUMS.asc
```
- Upload to the bitcoincore.org server (`/var/www/bin/bitcoin-core-${VERSION}/`):
1. The contents of each `./bitcoin/guix-build-${VERSION}/output/${HOST}/` directory, except for
`*-debug*` files.
Guix will output all of the results into host subdirectories, but the SHA256SUMS
file does not include these subdirectories. In order for downloads via torrent
to verify without directory structure modification, all of the uploaded files
need to be in the same directory as the SHA256SUMS file.
The `*-debug*` files generated by the guix build contain debug symbols
for troubleshooting by developers. It is assumed that anyone that is
interested in debugging can run guix to generate the files for
themselves. To avoid end-user confusion about which file to pick, as well
as save storage space *do not upload these to the bitcoincore.org server,
nor put them in the torrent*.
```sh
find guix-build-${VERSION}/output/ -maxdepth 2 -type f -not -name "SHA256SUMS.part" -and -not -name "*debug*" -exec scp {} user@bitcoincore.org:/var/www/bin/bitcoin-core-${VERSION} \;
```
2. The `SHA256SUMS` file
3. The `SHA256SUMS.asc` combined signature file you just created
- Create a torrent of the `/var/www/bin/bitcoin-core-${VERSION}` directory such
that at the top level there is only one file: the `bitcoin-core-${VERSION}`
directory containing everything else. Name the torrent
`bitcoin-${VERSION}.torrent` (note that there is no `-core-` in this name).
Optionally help seed this torrent. To get the `magnet:` URI use:
```sh
transmission-show -m <torrent file>
```
Insert the magnet URI into the announcement sent to mailing lists. This permits
people without access to `bitcoincore.org` to download the binary distribution.
Also put it into the `optional_magnetlink:` slot in the YAML file for
bitcoincore.org.
- Update other repositories and websites for new version
- bitcoincore.org blog post
- bitcoincore.org maintained versions update:
[table](https://github.com/bitcoin-core/bitcoincore.org/commits/master/_includes/posts/maintenance-table.md)
- Delete post-EOL [release branches](https://github.com/bitcoin/bitcoin/branches/all) and create a tag `v${branch_name}-final`.
- Delete ["Needs backport" labels](https://github.com/bitcoin/bitcoin/labels?q=backport) for non-existing branches.
- bitcoincore.org RPC documentation update
- See https://github.com/bitcoin-core/bitcoincore.org/blob/master/contrib/doc-gen/
- Update packaging repo
- Push the flatpak to flathub, e.g. https://github.com/flathub/org.bitcoincore.bitcoin-qt/pull/2
- Push the snap, see https://github.com/bitcoin-core/packaging/blob/main/snap/local/build.md
- This repo
- Archive the release notes for the new version to `doc/release-notes/` (branch `master` and branch of the release)
- Create a [new GitHub release](https://github.com/bitcoin/bitcoin/releases/new) with a link to the archived release notes
- Announce the release:
- bitcoin-dev and bitcoin-core-dev mailing list
- Bitcoin Core announcements list https://bitcoincore.org/en/list/announcements/join/
- Bitcoin Core Twitter https://twitter.com/bitcoincoreorg
- Celebrate
### Additional information
#### <a name="how-to-calculate-assumed-blockchain-and-chain-state-size"></a>How to calculate `m_assumed_blockchain_size` and `m_assumed_chain_state_size`
Both variables are used as a guideline for how much space the user needs on their drive in total, not just strictly for the blockchain.
Note that all values should be taken from a **fully synced** node and have an overhead of 5-10% added on top of its base value.
To calculate `m_assumed_blockchain_size`, take the size in GiB of these directories:
- For `mainnet` -> the data directory, excluding the `/testnet3`, `/signet`, and `/regtest` directories and any overly large files, e.g. a huge `debug.log`
- For `testnet` -> `/testnet3`
- For `signet` -> `/signet`
To calculate `m_assumed_chain_state_size`, take the size in GiB of these directories:
- For `mainnet` -> `/chainstate`
- For `testnet` -> `/testnet3/chainstate`
- For `signet` -> `/signet/chainstate`
Notes:
- When taking the size for `m_assumed_blockchain_size`, there's no need to exclude the `/chainstate` directory since it's a guideline value and an overhead will be added anyway.
- The expected overhead for growth may change over time. Consider whether the percentage needs to be changed in response; if so, update it here in this section.
| 0 |
bitcoin | bitcoin/doc/dnsseed-policy.md | Expectations for DNS Seed operators
====================================
Bitcoin Core attempts to minimize the level of trust in DNS seeds,
but DNS seeds still pose a small amount of risk for the network.
As such, DNS seeds must be run by entities which have some minimum
level of trust within the Bitcoin community.
Other implementations of Bitcoin software may also use the same
seeds and may be more exposed. In light of this exposure, this
document establishes some basic expectations for operating dnsseeds.
0. A DNS seed operating organization or person is expected to follow good
host security practices, maintain control of applicable infrastructure,
and not sell or transfer control of the DNS seed. Any hosting services
contracted by the operator are equally expected to uphold these expectations.
1. The DNS seed results must consist exclusively of fairly selected and
functioning Bitcoin nodes from the public network to the best of the
operator's understanding and capability.
2. For the avoidance of doubt, the results may be randomized but must not
single-out any group of hosts to receive different results unless due to an
urgent technical necessity and disclosed.
3. The results may not be served with a DNS TTL of less than one minute.
4. Any logging of DNS queries should be only that which is necessary
for the operation of the service or urgent health of the Bitcoin
network and must not be retained longer than necessary nor disclosed
to any third party.
5. Information gathered as a result of the operators node-spidering
(not from DNS queries) may be freely published or retained, but only
if this data was not made more complete by biasing node connectivity
(a violation of expectation (1)).
6. Operators are encouraged, but not required, to publicly document the
details of their operating practices.
7. A reachable email contact address must be published for inquiries
related to the DNS seed operation.
If these expectations cannot be satisfied the operator should
discontinue providing services and contact the active Bitcoin
Core development team as well as posting on
[bitcoin-dev](https://lists.linuxfoundation.org/mailman/listinfo/bitcoin-dev).
Behavior outside of these expectations may be reasonable in some
situations but should be discussed in public in advance.
See also
----------
- [bitcoin-seeder](https://github.com/sipa/bitcoin-seeder) is a reference implementation of a DNS seed.
| 0 |