diff --git a/.github/node_upgrade.sh b/.github/node_upgrade.sh index 6d2fcc1a7..989fb1a8a 100755 --- a/.github/node_upgrade.sh +++ b/.github/node_upgrade.sh @@ -88,23 +88,22 @@ if [ -z "${BASE_TAR_URL:-}" ]; then cardano_bins_build_all "$BASE_REVISION" "" "_base" PATH_PREPEND_BASE="$(cardano_bins_print_path_prepend "" "_base")" fi -export PATH_PREPEND_BASE # Prepare cardano-node for the upgrade revision cardano_bins_build_all "${UPGRADE_REVISION:-"master"}" "${UPGRADE_CLI_REVISION:-}" "_upgrade" PATH_PREPEND_UPGRADE="$(cardano_bins_print_path_prepend "${UPGRADE_CLI_REVISION:-}" "_upgrade")" -export PATH_PREPEND_UPGRADE # Prepare cardano-cli for the upgrade revision if UPGRADE_CLI_REVISION is set if [ -n "${UPGRADE_CLI_REVISION:-}" ]; then - export CARDANO_CLI_REV="$UPGRADE_CLI_REVISION" # shellcheck disable=SC1090,SC1091 . .github/source_cardano_cli.sh cardano_cli_build "$UPGRADE_CLI_REVISION" "_upgrade" PATH_PREPEND_UPGRADE="$(cardano_cli_print_path_prepend "_upgrade")${PATH_PREPEND_UPGRADE}" - export PATH_PREPEND_UPGRADE fi +export PATH_PREPEND_BASE +export PATH_PREPEND_UPGRADE + # optimize nix store space if requested if [ "${CI_OPTIMIZE_SPACE:-"false"}" != "false" ]; then nix store gc || : diff --git a/.github/node_upgrade_pytest.sh b/.github/node_upgrade_pytest.sh index 06d64ea69..b1382577f 100755 --- a/.github/node_upgrade_pytest.sh +++ b/.github/node_upgrade_pytest.sh @@ -22,9 +22,6 @@ export REPORTS_DIR="${REPORTS_DIR:-".reports"}" rm -rf "${REPORTS_DIR:?}" mkdir -p "$REPORTS_DIR" -# Non-P2P mode is no longer supported since cardano-node 10.6.0 -unset ENABLE_LEGACY MIXED_P2P - # # STEP1 - start local cluster and run smoke tests for the first time # @@ -117,27 +114,27 @@ elif [ "$1" = "step2" ]; then # add binaries saved in step1 to the PATH export PATH="${STEP1_BIN}:${PATH}" - # generate config and topology files for the "mixed" mode - CARDANO_NODE_SOCKET_PATH="$WORKDIR/dry_mixed/state-cluster0/bft1.socket" \ + # re-generate config and topology files + CARDANO_NODE_SOCKET_PATH="$WORKDIR/dry_config_step2/state-cluster0/bft1.socket" \ DRY_RUN=1 \ "$CLUSTER_SCRIPTS_DIR/start-cluster" # copy newly generated topology files to the cluster state dir - cp -f "$WORKDIR"/dry_mixed/state-cluster0/topology-*.json "$STATE_CLUSTER" + cp -f "$WORKDIR"/dry_config_step2/state-cluster0/topology-*.json "$STATE_CLUSTER" if [ -n "${REPLACE_GENESIS_STEP2:-""}" ]; then # Copy newly generated Alonzo genesis to the cluster state dir - cp -f "$WORKDIR/dry_mixed/state-cluster0/shelley/genesis.alonzo.json" "$STATE_CLUSTER/shelley" + cp -f "$WORKDIR/dry_config_step2/state-cluster0/shelley/genesis.alonzo.json" "$STATE_CLUSTER/shelley" # Copy newly generated Conway genesis file to the cluster state dir, use committee members from the original # Conway genesis. jq \ --argfile src "$STATE_CLUSTER/shelley/genesis.conway.step1.json" \ '.committee.members = $src.committee.members' \ - "$WORKDIR/dry_mixed/state-cluster0/shelley/genesis.conway.json" > "$STATE_CLUSTER/shelley/genesis.conway.json" + "$WORKDIR/dry_config_step2/state-cluster0/shelley/genesis.conway.json" > "$STATE_CLUSTER/shelley/genesis.conway.json" fi - # use the original shelley and byron genesis files + # use the original Shelley and Byron genesis files BYRON_GENESIS_HASH="$(jq -r ".ByronGenesisHash" "$STATE_CLUSTER/config-bft1.json")" SHELLEY_GENESIS_HASH="$(jq -r ".ShelleyGenesisHash" "$STATE_CLUSTER/config-bft1.json")" # hashes of the original alonzo and conway genesis files @@ -150,7 +147,7 @@ elif [ "$1" = "step2" ]; then "$STATE_CLUSTER/shelley/genesis.conway.json")" # copy newly generated config files to the cluster state dir - for conf in "$WORKDIR"/dry_mixed/state-cluster0/config-*.json; do + for conf in "$WORKDIR"/dry_config_step2/state-cluster0/config-*.json; do fname="${conf##*/}" if [ "$fname" = "config-pool3.json" ]; then @@ -200,10 +197,12 @@ elif [ "$1" = "step2" ]; then "$STATE_CLUSTER/supervisorctl" status # print path to cardano-node binaries + echo "pool1 node binary:" pool1_pid="$("$STATE_CLUSTER/supervisorctl" pid nodes:pool1)" - ls -l "/proc/$pool1_pid/exe" + readlink -m "/proc/$pool1_pid/exe" + echo "pool3 node binary:" pool3_pid="$("$STATE_CLUSTER/supervisorctl" pid nodes:pool3)" - ls -l "/proc/$pool3_pid/exe" + readlink -m "/proc/$pool3_pid/exe" # check that nodes are running if [ "$pool1_pid" = 0 ] || [ "$pool3_pid" = 0 ]; then @@ -270,20 +269,20 @@ elif [ "$1" = "step3" ]; then NETWORK_MAGIC="$(jq '.networkMagic' "$STATE_CLUSTER/shelley/genesis.json")" export NETWORK_MAGIC - # generate config and topology files for p2p mode - CARDANO_NODE_SOCKET_PATH="$WORKDIR/dry_p2p/state-cluster0/bft1.socket" \ + # re-generate config and topology files + CARDANO_NODE_SOCKET_PATH="$WORKDIR/dry_config_step3/state-cluster0/bft1.socket" \ DRY_RUN=1 \ "$CLUSTER_SCRIPTS_DIR/start-cluster" # copy newly generated topology files to the cluster state dir - cp -f "$WORKDIR"/dry_p2p/state-cluster0/topology-*.json "$STATE_CLUSTER" + cp -f "$WORKDIR"/dry_config_step3/state-cluster0/topology-*.json "$STATE_CLUSTER" # Copy newly generated config files to the cluster state dir, but use the original genesis files BYRON_GENESIS_HASH="$(jq -r ".ByronGenesisHash" "$STATE_CLUSTER/config-bft1.json")" SHELLEY_GENESIS_HASH="$(jq -r ".ShelleyGenesisHash" "$STATE_CLUSTER/config-bft1.json")" ALONZO_GENESIS_HASH="$(jq -r ".AlonzoGenesisHash" "$STATE_CLUSTER/config-bft1.json")" CONWAY_GENESIS_HASH="$(jq -r ".ConwayGenesisHash" "$STATE_CLUSTER/config-bft1.json")" - for conf in "$WORKDIR"/dry_p2p/state-cluster0/config-*.json; do + for conf in "$WORKDIR"/dry_config_step3/state-cluster0/config-*.json; do fname="${conf##*/}" jq \ --arg byron_hash "$BYRON_GENESIS_HASH" \ @@ -306,10 +305,12 @@ elif [ "$1" = "step3" ]; then "$STATE_CLUSTER/supervisorctl" status # print path to cardano-node binaries + echo "pool1 node binary:" pool1_pid="$("$STATE_CLUSTER/supervisorctl" pid nodes:pool1)" - ls -l "/proc/$pool1_pid/exe" + readlink -m "/proc/$pool1_pid/exe" + echo "pool3 node binary:" pool3_pid="$("$STATE_CLUSTER/supervisorctl" pid nodes:pool3)" - ls -l "/proc/$pool3_pid/exe" + readlink -m "/proc/$pool3_pid/exe" # check that nodes are running if [ "$pool1_pid" = 0 ] || [ "$pool3_pid" = 0 ]; then