|
| 1 | +{ pkgs |
| 2 | +, profile |
| 3 | +, nodeSpecs |
| 4 | +, workload |
| 5 | +}: |
| 6 | + |
| 7 | +let |
| 8 | + |
| 9 | + # supervisor: make playground-development-hydra |
| 10 | + # nomadexec: nix-shell -A 'workbench-shell' --max-jobs 8 --cores 0 --show-trace --argstr profileName development-hydra-coay --argstr backendName nomadexec |
| 11 | + |
| 12 | + # Packages |
| 13 | + ########## |
| 14 | + |
| 15 | + bashInteractive = pkgs.bashInteractive; |
| 16 | + coreutils = pkgs.coreutils; |
| 17 | + jq = pkgs.jq; |
| 18 | + # Avoid rebuilding on every commit because of `set-git-rev`. |
| 19 | + cardano-cli = pkgs.cardanoNodePackages.cardano-cli.passthru.noGitRev; |
| 20 | + # Hyra (Release 1.0.0). |
| 21 | + commit = "b5e33b55e9fba442c562f82cec6c36b1716d9847"; |
| 22 | + flake = (__getFlake "github:cardano-scaling/hydra/${commit}"); |
| 23 | + hydra = flake.packages.${builtins.currentSystem}.hydra-node; |
| 24 | + |
| 25 | + # Parameters |
| 26 | + ############ |
| 27 | + |
| 28 | + testnet_magic = 42; |
| 29 | + baseport = workload.parameters.baseport or 31000; |
| 30 | + heads_per_cardano_node = workload.parameters.heads_per_cardano_node or 1; |
| 31 | + # Filter producers from "node-specs.json". |
| 32 | + producers = |
| 33 | + builtins.filter |
| 34 | + (nodeSpec: nodeSpec.isProducer) |
| 35 | + (builtins.attrValues nodeSpecs) |
| 36 | + ; |
| 37 | + # Construct an "array" with node producers to use in BASH `for` loops. |
| 38 | + producers_bash_array = |
| 39 | + "(" |
| 40 | + + (builtins.concatStringsSep |
| 41 | + " " |
| 42 | + (builtins.map |
| 43 | + (x: "\"" + x.name + "\"") |
| 44 | + producers |
| 45 | + ) |
| 46 | + ) |
| 47 | + + ")" |
| 48 | + ; |
| 49 | + |
| 50 | +in '' |
| 51 | +${import ./utils/keys.nix |
| 52 | + { inherit coreutils jq cardano-cli testnet_magic; |
| 53 | + } |
| 54 | +} |
| 55 | +
|
| 56 | +${import ./utils/utxo.nix |
| 57 | + { inherit coreutils jq cardano-cli testnet_magic; |
| 58 | + } |
| 59 | +} |
| 60 | +
|
| 61 | +# Waits for all jobs to finish independent of their exit status! |
| 62 | +# Returns the first error code obtained if any one fails. |
| 63 | +wait_all () { |
| 64 | + wait_internal 0 "false" "$@" |
| 65 | +} |
| 66 | +
|
| 67 | +# Waits for any job to fail or all to be OK! |
| 68 | +# All processes are killed as soon as one fails! |
| 69 | +# Returns the first error code obtained if any one fails. |
| 70 | +wait_kill_em_all () { |
| 71 | + # We are scanning the scene in the city tonite ... searching, seek and destroy |
| 72 | + wait_internal 0 "true" "$@" |
| 73 | +} |
| 74 | +
|
| 75 | +# Returns 0/success if no process fails, else returns the first error code |
| 76 | +# obtained that is not zero. |
| 77 | +wait_internal () { |
| 78 | + # The initial status for recursion, on first call it should always be zero! |
| 79 | + local initial_exit_status=''${1}; shift |
| 80 | + # Should all processes be killed as soon as one fails? Else waits for all |
| 81 | + # processes to finish independent of their exit status. |
| 82 | + local kill_em_all=''${1}; shift |
| 83 | + # Array of processes IDs or a jobs specifications. |
| 84 | + # If ID is a job specification, waits for all processes in that job's pipeline |
| 85 | + local processes_ids=("$@") |
| 86 | + # Are there any processes left to wait for ? |
| 87 | + if test -n "''${processes_ids[*]:-}" |
| 88 | + then |
| 89 | + local wait_exit_status |
| 90 | + local exited_process_id |
| 91 | + # Wait for a single job from the list of processes and returns its exit |
| 92 | + # status and the process or job identifier of the job for which the exit |
| 93 | + # status is returned is assigned to the variable provided by `-p VAR`. |
| 94 | + wait -n -p exited_process_id "''${processes_ids[@]}" |
| 95 | + wait_exit_status=$? |
| 96 | + # Only if the exit status to return is still zero we care about the |
| 97 | + # new exit status. |
| 98 | + if test "''${initial_exit_status}" -eq 0 |
| 99 | + then |
| 100 | + initial_exit_status="''${wait_exit_status}" |
| 101 | + fi |
| 102 | + # Create a wew array without the newly exited process. |
| 103 | + local processes_ids_p=() |
| 104 | + for p in "''${processes_ids[@]}" |
| 105 | + do |
| 106 | + if test "''${p}" != "''${exited_process_id}" |
| 107 | + then |
| 108 | + processes_ids_p+=("''${p}") |
| 109 | + fi |
| 110 | + done |
| 111 | + # Are there still any processes left to wait for ? |
| 112 | + if test -n "''${processes_ids_p[*]:-}" |
| 113 | + then |
| 114 | + # Keep waiting or kill 'em all ?' |
| 115 | + if ! test "''${wait_exit_status}" -eq 0 && test "''${kill_em_all}" = "true" |
| 116 | + then |
| 117 | + kill "''${processes_p[@]}" 2>/dev/null || true |
| 118 | + return "''${wait_exit_status}" |
| 119 | + else |
| 120 | + # Recursion, wiiiiiiiii! |
| 121 | + wait_internal \ |
| 122 | + "''${initial_exit_status}" "''${kill_em_all}" "''${processes_ids_p[@]}" |
| 123 | + fi |
| 124 | + else |
| 125 | + return "''${initial_exit_status}" |
| 126 | + fi |
| 127 | + else |
| 128 | + return 0 |
| 129 | + fi |
| 130 | +} |
| 131 | +
|
| 132 | +############################################################################### |
| 133 | +# Workload entrypoint ######################################################### |
| 134 | +############################################################################### |
| 135 | +
|
| 136 | +function hydra { |
| 137 | + # Run the workflow for each deployed producer node. |
| 138 | + local producers=${toString producers_bash_array} |
| 139 | + local producers_jobs_array=() |
| 140 | + for producer_name in ''${producers[*]} |
| 141 | + do |
| 142 | + # Checks if the producer node is deployed in this machine. |
| 143 | + if test -d "../../''${producer_name}" |
| 144 | + then |
| 145 | + hydra_producer "''${producer_name}" & |
| 146 | + producers_jobs_array+=("$!") |
| 147 | + fi |
| 148 | + done |
| 149 | + wait_all "''${producers_jobs_array[@]}" |
| 150 | +} |
| 151 | +
|
| 152 | +############################################################################### |
| 153 | +# Producer node entrypoint #################################################### |
| 154 | +############################################################################### |
| 155 | +
|
| 156 | +function hydra_producer { |
| 157 | + # Function arguments. |
| 158 | + local producer_name=$1 # node name / folder to find the socket to use. |
| 159 | +
|
| 160 | + local producer_i |
| 161 | + # Lookup producer numeric index by name. |
| 162 | + producer_i="$( \ |
| 163 | + ${jq}/bin/jq --raw-output \ |
| 164 | + --arg keyName "''${producer_name}" \ |
| 165 | + '.[$keyName].i' \ |
| 166 | + ../../node-specs.json \ |
| 167 | + )" |
| 168 | +
|
| 169 | + msg "Starting producer \"''${producer_name}\" (''${producer_i})" |
| 170 | +
|
| 171 | + # Parameters for this producer node: |
| 172 | + # - Where to obtain the genesis funds for this producer. |
| 173 | + genesis_funds_vkey="../../genesis/cache-entry/utxo-keys/utxo$((producer_i + 1)).vkey" |
| 174 | + genesis_funds_skey="../../genesis/cache-entry/utxo-keys/utxo$((producer_i + 1)).skey" |
| 175 | + # - IP address and port. |
| 176 | + producer_ip="127.0.0.1" |
| 177 | + producer_port="$((${toString baseport} + producer_i * 10))" |
| 178 | +
|
| 179 | + msg "Producer params: ''${genesis_funds_vkey} - ''${genesis_funds_skey} - ''${producer_port}" |
| 180 | +
|
| 181 | + # Split funds to each of this producer's head. |
| 182 | + producer_addr="$(build_x_y_z_address "''${producer_name}" "''${producer_i}" 0 0)" |
| 183 | + producer_vkey="$(create_x_y_z_key_files "''${producer_name}" "''${producer_i}" 0 0)".vkey |
| 184 | + producer_skey="$(create_x_y_z_key_files "''${producer_name}" "''${producer_i}" 0 0)".skey |
| 185 | + local producer_head_addr_array=() |
| 186 | + for head_i in {1..${toString heads_per_cardano_node}} |
| 187 | + do |
| 188 | + local producer_head_addr |
| 189 | + producer_head_addr="$(build_x_y_z_address "''${producer_name}" "''${producer_i}" "''${head_i}" 0)" |
| 190 | + producer_head_addr_array+=("''${producer_head_addr}") |
| 191 | + ${coreutils}/bin/echo "hydra_producer: Splitting to: ''${producer_name} - ''${producer_i} - ''${head_i} - ''${producer_head_addr}" |
| 192 | + done |
| 193 | + # Split (no need to wait for the funds or re-submit, function takes care)! |
| 194 | + funds_from_to \ |
| 195 | + "''${producer_name}" \ |
| 196 | + "''${genesis_funds_vkey}" \ |
| 197 | + "''${genesis_funds_skey}" \ |
| 198 | + 0 \ |
| 199 | + 0 \ |
| 200 | + "''${producer_head_addr_array[@]}" |
| 201 | +
|
| 202 | + for head_i in $(seq 1 ${toString heads_per_cardano_node}); do |
| 203 | + hydra_producer_head \ |
| 204 | + "''${producer_name}" \ |
| 205 | + "''${producer_i}" \ |
| 206 | + "''${producer_port}" \ |
| 207 | + "''${head_i}" |
| 208 | + done |
| 209 | +} |
| 210 | +
|
| 211 | +############################################################################### |
| 212 | +# Producer-head entrypoint #################################################### |
| 213 | +############################################################################### |
| 214 | +
|
| 215 | +function hydra_producer_head { |
| 216 | + # Function arguments. |
| 217 | + local producer_name=$1 # node name / folder to find the socket to use. |
| 218 | + local producer_i=$2 |
| 219 | + local producer_port=$3 |
| 220 | + local head_i=$4 |
| 221 | +
|
| 222 | + msg "Starting head: Producer \"''${producer_name}\" (''${producer_i}) head ''${head_i}" |
| 223 | +
|
| 224 | + # Head parameters. |
| 225 | + head_vkey="$(create_x_y_z_key_files "''${producer_name}" "''${producer_i}" "''${head_i}" 0)".vkey |
| 226 | + head_skey="$(create_x_y_z_key_files "''${producer_name}" "''${producer_i}" "''${head_i}" 0)".skey |
| 227 | + head_addr="$(build_x_y_z_address "''${producer_name}" "''${producer_i}" "''${head_i}" 0)" |
| 228 | + # - IP address and port. |
| 229 | + head_ip="127.0.0.1" |
| 230 | + head_port="$((producer_port + head_i))" |
| 231 | +
|
| 232 | + msg "Head parameters: ''${head_port}" |
| 233 | +
|
| 234 | + wait_any_utxo "''${producer_name}" "''${head_addr}" |
| 235 | +
|
| 236 | + ${hydra}/bin/hydra-node \ |
| 237 | + --node-id "''${producer_i}" \ |
| 238 | + --listen "''${head_ip}:''${head_port}" \ |
| 239 | + --advertise "''${head_ip}:''${head_port}" \ |
| 240 | + --cardano-verification-key "''${genesis_funds_vkey}" \ |
| 241 | + --cardano-signing-key "''${genesis_funds_skey}" \ |
| 242 | + --testnet-magic ${toString testnet_magic} \ |
| 243 | + --node-socket "../../''${producer_name}/node.socket" |
| 244 | +} |
| 245 | +
|
| 246 | +############################################################################### |
| 247 | +# Utils ####################################################################### |
| 248 | +############################################################################### |
| 249 | +
|
| 250 | +function msg { |
| 251 | + # Outputs to stdout, unbuffered if not the message may be lost! |
| 252 | + ${coreutils}/bin/stdbuf -o0 \ |
| 253 | + ${bashInteractive}/bin/sh -c \ |
| 254 | + "${coreutils}/bin/echo -e \"$(${coreutils}/bin/date --rfc-3339=seconds): $1\"" |
| 255 | +} |
| 256 | +'' |
0 commit comments