diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index d7f8af04f8f..b495a8f3c06 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -182,6 +182,7 @@ mod alert; mod blueprints; mod db_metadata; mod ereport; +mod multicast; mod saga; mod sitrep; mod user_data_export; @@ -401,6 +402,8 @@ enum DbCommands { /// Print information about migrations #[clap(alias = "migration")] Migrations(MigrationsArgs), + /// Print information about multicast groups + Multicast(multicast::MulticastArgs), /// Print information about snapshots Snapshots(SnapshotArgs), /// Validate the contents of the database @@ -1367,6 +1370,33 @@ impl DbArgs { }) => { cmd_db_migrations_list(&datastore, &fetch_opts, args).await } + DbCommands::Multicast(multicast::MulticastArgs { + command: multicast::MulticastCommands::Groups(args), + }) => { + multicast::cmd_db_multicast_groups( + &opctx, &datastore, &fetch_opts, &args, + ) + .await + } + DbCommands::Multicast(multicast::MulticastArgs { + command: multicast::MulticastCommands::Members(args), + }) => { + multicast::cmd_db_multicast_members(&datastore, &fetch_opts, &args) + .await + } + DbCommands::Multicast(multicast::MulticastArgs { + command: multicast::MulticastCommands::Pools, + }) => { + multicast::cmd_db_multicast_pools(&datastore, &fetch_opts).await + } + DbCommands::Multicast(multicast::MulticastArgs { + command: multicast::MulticastCommands::Info(args), + }) => { + multicast::cmd_db_multicast_info( + &opctx, &datastore, &fetch_opts, &args, + ) + .await + } DbCommands::Snapshots(SnapshotArgs { command: SnapshotCommands::Info(uuid), }) => cmd_db_snapshot_info(&opctx, &datastore, uuid).await, diff --git a/dev-tools/omdb/src/bin/omdb/db/multicast.rs b/dev-tools/omdb/src/bin/omdb/db/multicast.rs new file mode 100644 index 00000000000..86e72d09835 --- /dev/null +++ b/dev-tools/omdb/src/bin/omdb/db/multicast.rs @@ -0,0 +1,876 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! `omdb db multicast` subcommands +//! +//! # Command Outputs +//! +//! ## `omdb db multicast pools` +//! +//! | Column | Description | +//! |---------------|----------------------------| +//! | POOL_ID | Pool UUID | +//! | POOL_NAME | Pool name | +//! | FIRST_ADDRESS | Range start IP | +//! | LAST_ADDRESS | Range end IP | +//! | CREATED | Creation timestamp | +//! +//! ## `omdb db multicast groups` +//! +//! | Column | Description | +//! |--------------|--------------------------------------| +//! | ID | Group UUID | +//! | NAME | Group name | +//! | STATE | Group state ("Active"/"Creating") | +//! | MULTICAST_IP | Allocated multicast IP | +//! | RANGE | ASM or SSM based on IP range | +//! | UNDERLAY_IP | Underlay group IP (blank if none) | +//! | SOURCES | Union of member source IPs, or "-" | +//! | MEMBERS | Comma-separated "instance@sled" list | +//! | VNI | Virtual network ID | +//! | CREATED | Creation timestamp | +//! +//! Filters: `--state`, `--pool` +//! +//! ## `omdb db multicast members` +//! +//! | Column | Description | +//! |--------------|------------------------------------------| +//! | ID | Member UUID | +//! | GROUP_NAME | Parent group name | +//! | PARENT_ID | Instance UUID | +//! | STATE | Member state ("Joining"/"Joined"/"Left") | +//! | MULTICAST_IP | Group multicast IP | +//! | SOURCES | SSM source IPs, or "-" for ASM | +//! | SLED_ID | Assigned sled UUID (blank if none) | +//! | CREATED | Creation timestamp | +//! +//! Filters: `--group-id`, `--group-ip`, `--group-name`, `--state`, +//! `--sled-id`, `--source-ip` +//! +//! ## `omdb db multicast info` +//! +//! Detailed view of a single group with sections: +//! +//! **MULTICAST GROUP** +//! - id, name, state, multicast_ip, vni, source_ips +//! - ip_pool (name + ID), underlay_group, tag, created +//! +//! **UNDERLAY GROUP** (if present) +//! - id, multicast_ip, tag, created +//! +//! **MEMBERS** (table) +//! +//! | Column | Description | +//! |--------------|--------------------------------| +//! | ID | Member UUID | +//! | INSTANCE | Instance name | +//! | STATE | Member state | +//! | MULTICAST_IP | Group multicast IP | +//! | SOURCES | SSM source IPs, or "-" for ASM | +//! | SLED | Sled serial number, or "-" | +//! | CREATED | Creation timestamp | +//! +//! Lookup: `--group-id`, `--ip`, `--name` (exactly one required) + +use std::collections::{BTreeSet, HashMap}; + +use anyhow::Context; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::{DateTime, Utc}; +use clap::builder::{PossibleValue, PossibleValuesParser, TypedValueParser}; +use clap::{Args, Subcommand}; +use diesel::prelude::*; +use tabled::Tabled; +use uuid::Uuid; + +use nexus_db_model::{ + ExternalMulticastGroup, IpPool, IpPoolRange, IpPoolType, + MulticastGroupMember, MulticastGroupMemberState, MulticastGroupState, + UnderlayMulticastGroup, +}; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_types::identity::Resource; +use omicron_common::address::is_ssm_address; +use omicron_uuid_kinds::{GenericUuid, MulticastGroupUuid, SledUuid}; + +use crate::db::{DbFetchOptions, check_limit}; +use crate::helpers::{datetime_rfc3339_concise, display_option_blank}; + +// Display labels for multicast address range classification +const RANGE_SSM: &str = "SSM"; +const RANGE_ASM: &str = "ASM"; + +/// `omdb db multicast` subcommand +#[derive(Debug, Args, Clone)] +pub(super) struct MulticastArgs { + #[command(subcommand)] + pub command: MulticastCommands, +} + +#[derive(Debug, Subcommand, Clone)] +pub(super) enum MulticastCommands { + /// List all multicast groups. + /// + /// Shows ID, name, state, multicast IP, address range type (ASM/SSM), + /// underlay IP, source IPs union, VNI, and creation time. + #[clap(alias = "ls")] + Groups(MulticastGroupsArgs), + + /// List all multicast group members. + /// + /// Shows member ID, group name, parent instance ID, state, multicast IP, + /// source IPs, sled ID, and creation time. + Members(MulticastMembersArgs), + + /// List multicast IP pools and their ranges. + /// + /// Shows pool ID, name, first/last addresses, and creation time. + Pools, + + /// Get detailed info for a multicast group. + /// + /// Shows group details, associated underlay group, and all members. + #[clap(alias = "show")] + Info(MulticastInfoArgs), +} + +#[derive(Debug, Args, Clone)] +pub(super) struct MulticastGroupsArgs { + /// Filter by state + #[arg( + long, + ignore_case = true, + value_parser = PossibleValuesParser::new( + MulticastGroupState::ALL_STATES + .iter() + .map(|v| PossibleValue::new(v.label())) + ).try_map(|s| s.parse::()), + )] + state: Option, + /// Filter by pool name + #[arg(long)] + pool: Option, +} + +#[derive(Debug, Args, Clone)] +pub(super) struct MulticastMembersArgs { + /// Filter by group ID + #[arg(long)] + group_id: Option, + /// Filter by group IP address (e.g., 239.1.2.3) + #[arg(long)] + group_ip: Option, + /// Filter by group name + #[arg(long)] + group_name: Option, + /// Filter by state + #[arg( + long, + ignore_case = true, + value_parser = PossibleValuesParser::new( + MulticastGroupMemberState::ALL_STATES + .iter() + .map(|v| PossibleValue::new(v.label())) + ).try_map(|s| s.parse::()), + )] + state: Option, + /// Filter by sled ID + #[arg(long)] + sled_id: Option, + /// Filter by source IP (members subscribed to this source) + #[arg(long)] + source_ip: Option, +} + +#[derive(Debug, Args, Clone)] +#[group(required = true, multiple = false)] +pub(super) struct MulticastInfoArgs { + /// Multicast group ID + #[arg(long)] + group_id: Option, + /// Multicast IP address (e.g., 239.1.2.3) + #[arg(long)] + ip: Option, + /// Multicast group name + #[arg(long)] + name: Option, +} + +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct MulticastGroupRow { + id: Uuid, + name: String, + state: MulticastGroupState, + multicast_ip: std::net::IpAddr, + /// ASM (any-source) or SSM (source-specific) based on IP range + range: &'static str, + #[tabled(display_with = "display_option_blank")] + underlay_ip: Option, + /// Source IPs union from members ("-" = any source) + sources: String, + /// Members formatted as "inst_1@sled, ..., inst_n@sled" + members: String, + vni: u32, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: DateTime, +} + +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct MulticastMemberRow { + id: Uuid, + group_name: String, + parent_id: Uuid, + state: MulticastGroupMemberState, + multicast_ip: std::net::IpAddr, + /// Source IPs for source filtering ("-" = any source) + sources: String, + #[tabled(display_with = "display_option_blank")] + sled_id: Option, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: DateTime, +} + +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct MulticastInfoMemberRow { + id: Uuid, + instance: String, + state: MulticastGroupMemberState, + multicast_ip: std::net::IpAddr, + /// Source IPs for source filtering ("-" = any source) + sources: String, + sled: String, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: DateTime, +} + +// Build output combining pools and ranges +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct MulticastPoolRow { + pool_id: Uuid, + pool_name: String, + first_address: std::net::IpAddr, + last_address: std::net::IpAddr, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: chrono::DateTime, +} + +pub(super) async fn cmd_db_multicast_groups( + opctx: &OpContext, + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + args: &MulticastGroupsArgs, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::instance::dsl as instance_dsl; + use nexus_db_schema::schema::ip_pool::dsl as pool_dsl; + use nexus_db_schema::schema::multicast_group::dsl; + use nexus_db_schema::schema::multicast_group_member::dsl as member_dsl; + use nexus_db_schema::schema::sled::dsl as sled_dsl; + use nexus_db_schema::schema::underlay_multicast_group::dsl as underlay_dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + let mut query = dsl::multicast_group.into_boxed(); + if !fetch_opts.include_deleted { + query = query.filter(dsl::time_deleted.is_null()); + } + if let Some(state) = args.state { + query = query.filter(dsl::state.eq(state)); + } + if let Some(ref pool_name) = args.pool { + let pool_id: Uuid = pool_dsl::ip_pool + .filter(pool_dsl::name.eq(pool_name.clone())) + .filter(pool_dsl::time_deleted.is_null()) + .select(pool_dsl::id) + .first_async(&*conn) + .await + .with_context(|| { + format!("no pool found with name '{pool_name}'") + })?; + query = query.filter(dsl::ip_pool_id.eq(pool_id)); + } + + let groups: Vec = query + .order_by(dsl::time_created.desc()) + .limit(i64::from(u32::from(fetch_opts.fetch_limit))) + .select(ExternalMulticastGroup::as_select()) + .get_results_async(&*conn) + .await?; + + check_limit(&groups, fetch_opts.fetch_limit, || { + String::from("listing multicast groups") + }); + + // Batch lookup underlay IPs for groups + let underlay_ids: Vec = + groups.iter().filter_map(|group| group.underlay_group_id).collect(); + let underlay_map: HashMap = + if underlay_ids.is_empty() { + HashMap::new() + } else { + underlay_dsl::underlay_multicast_group + .filter(underlay_dsl::id.eq_any(underlay_ids)) + .select((underlay_dsl::id, underlay_dsl::multicast_ip)) + .get_results_async::<(Uuid, ipnetwork::IpNetwork)>(&*conn) + .await + .unwrap_or_default() + .into_iter() + .collect() + }; + + // Derive source IPs union from members for each group + // Source IPs are stored per-member; groups show the union of all member sources + let group_uuids: Vec = groups + .iter() + .map(|group| MulticastGroupUuid::from_untyped_uuid(group.identity.id)) + .collect(); + let source_ips_raw = datastore + .multicast_groups_source_ips_union(opctx, &group_uuids) + .await + .context("failed to fetch source IPs union")?; + let source_ips_map: HashMap> = + source_ips_raw + .into_iter() + .map(|(k, v)| (k, v.into_iter().collect())) + .collect(); + + // Fetch members for all groups + let group_ids: Vec = groups.iter().map(|g| g.identity.id).collect(); + let members: Vec = if group_ids.is_empty() { + Vec::new() + } else { + let mut mq = member_dsl::multicast_group_member + .filter(member_dsl::external_group_id.eq_any(group_ids)) + .into_boxed(); + if !fetch_opts.include_deleted { + mq = mq.filter(member_dsl::time_deleted.is_null()); + } + mq.select(MulticastGroupMember::as_select()) + .get_results_async(&*conn) + .await + .unwrap_or_default() + }; + + // Batch lookup instance names + let parent_ids: Vec = members.iter().map(|m| m.parent_id).collect(); + let instance_names: HashMap = if parent_ids.is_empty() { + HashMap::new() + } else { + instance_dsl::instance + .filter(instance_dsl::id.eq_any(parent_ids)) + .select((instance_dsl::id, instance_dsl::name)) + .get_results_async::<(Uuid, String)>(&*conn) + .await + .unwrap_or_default() + .into_iter() + .collect() + }; + + // Batch lookup sled serials + let sled_ids: Vec = members + .iter() + .filter_map(|m| m.sled_id.map(|s| s.into_untyped_uuid())) + .collect(); + let sled_serials: HashMap = if sled_ids.is_empty() { + HashMap::new() + } else { + sled_dsl::sled + .filter(sled_dsl::id.eq_any(sled_ids)) + .select((sled_dsl::id, sled_dsl::serial_number)) + .get_results_async::<(Uuid, String)>(&*conn) + .await + .unwrap_or_default() + .into_iter() + .collect() + }; + + // Build group_id -> formatted members string + let mut members_map: HashMap> = HashMap::new(); + for member in &members { + let inst_name = instance_names + .get(&member.parent_id) + .cloned() + .unwrap_or_else(|| member.parent_id.to_string()); + let sled_serial = member + .sled_id + .and_then(|s| sled_serials.get(&s.into_untyped_uuid()).cloned()) + .unwrap_or_else(|| "-".to_string()); + let formatted = format!("{inst_name}@{sled_serial}"); + members_map + .entry(member.external_group_id) + .or_default() + .push(formatted); + } + + let rows: Vec = groups + .into_iter() + .map(|group| { + let mcast_ip = group.multicast_ip.ip(); + let range = + if is_ssm_address(mcast_ip) { RANGE_SSM } else { RANGE_ASM }; + // Format source IPs union (derived from members) + let sources = source_ips_map + .get(&group.identity.id) + .filter(|source_ips| !source_ips.is_empty()) + .map(|source_ips| { + source_ips + .iter() + .map(|ip| ip.to_string()) + .collect::>() + .join(",") + }) + .unwrap_or_else(|| "-".to_string()); + let underlay_ip = group + .underlay_group_id + .and_then(|id| underlay_map.get(&id)) + .map(|ip| ip.ip()); + let members = members_map + .get(&group.identity.id) + .map(|v| v.join(", ")) + .unwrap_or_else(|| "-".to_string()); + MulticastGroupRow { + id: group.identity.id, + name: group.identity.name.to_string(), + state: group.state, + multicast_ip: mcast_ip, + range, + underlay_ip, + sources, + members, + vni: u32::from(group.vni.0), + created: group.identity.time_created, + } + }) + .collect(); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + + Ok(()) +} + +pub(super) async fn cmd_db_multicast_members( + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + args: &MulticastMembersArgs, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::multicast_group::dsl as group_dsl; + use nexus_db_schema::schema::multicast_group_member::dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + // Resolve group_ip or group_name to a group_id if specified + let resolved_group_id = match (&args.group_ip, &args.group_name) { + (Some(ip), _) => { + let group: ExternalMulticastGroup = group_dsl::multicast_group + .filter(group_dsl::time_deleted.is_null()) + .filter( + group_dsl::multicast_ip.eq(ipnetwork::IpNetwork::from(*ip)), + ) + .select(ExternalMulticastGroup::as_select()) + .first_async(&*conn) + .await + .with_context(|| format!("no multicast group with IP {ip}"))?; + Some(group.id()) + } + (None, Some(name)) => { + let group: ExternalMulticastGroup = group_dsl::multicast_group + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::name.eq(name.clone())) + .select(ExternalMulticastGroup::as_select()) + .first_async(&*conn) + .await + .with_context(|| { + format!("no multicast group with name '{name}'") + })?; + Some(group.id()) + } + (None, None) => args.group_id, + }; + + let mut query = dsl::multicast_group_member.into_boxed(); + if !fetch_opts.include_deleted { + query = query.filter(dsl::time_deleted.is_null()); + } + if let Some(group_id) = resolved_group_id { + query = query.filter(dsl::external_group_id.eq(group_id)); + } + if let Some(state) = args.state { + query = query.filter(dsl::state.eq(state)); + } + if let Some(sled_id) = args.sled_id { + query = query.filter(dsl::sled_id.eq(sled_id.into_untyped_uuid())); + } + if let Some(source_ip) = args.source_ip { + let ip_network = ipnetwork::IpNetwork::from(source_ip); + query = query.filter(dsl::source_ips.contains(vec![ip_network])); + } + + let members: Vec = query + .order_by(dsl::time_created.desc()) + .limit(i64::from(u32::from(fetch_opts.fetch_limit))) + .select(MulticastGroupMember::as_select()) + .get_results_async(&*conn) + .await?; + + check_limit(&members, fetch_opts.fetch_limit, || { + String::from("listing multicast group members") + }); + + // Batch lookup group names + let group_ids: Vec = + members.iter().map(|member| member.external_group_id).collect(); + let group_names: HashMap = if group_ids.is_empty() { + HashMap::new() + } else { + group_dsl::multicast_group + .filter(group_dsl::id.eq_any(group_ids)) + .select((group_dsl::id, group_dsl::name)) + .get_results_async::<(Uuid, String)>(&*conn) + .await + .unwrap_or_default() + .into_iter() + .collect() + }; + + let rows: Vec = members + .into_iter() + .map(|member| { + let group_name = group_names + .get(&member.external_group_id) + .cloned() + .unwrap_or_else(|| member.external_group_id.to_string()); + let sources = Some(&member.source_ips) + .filter(|ips| !ips.is_empty()) + .map(|ips| { + ips.iter() + .map(|ip| ip.ip().to_string()) + .collect::>() + .join(",") + }) + .unwrap_or_else(|| "-".to_string()); + MulticastMemberRow { + id: member.id, + group_name, + parent_id: member.parent_id, + state: member.state, + multicast_ip: member.multicast_ip.ip(), + sources, + sled_id: member.sled_id.map(SledUuid::from), + created: member.time_created, + } + }) + .collect(); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + + Ok(()) +} + +pub(super) async fn cmd_db_multicast_pools( + datastore: &DataStore, + fetch_opts: &DbFetchOptions, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::ip_pool::dsl as pool_dsl; + use nexus_db_schema::schema::ip_pool_range::dsl as range_dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + // Get multicast pools + let mut query = pool_dsl::ip_pool.into_boxed(); + query = query.filter(pool_dsl::pool_type.eq(IpPoolType::Multicast)); + if !fetch_opts.include_deleted { + query = query.filter(pool_dsl::time_deleted.is_null()); + } + + let pools: Vec = query + .order_by(pool_dsl::time_created.desc()) + .limit(i64::from(u32::from(fetch_opts.fetch_limit))) + .select(IpPool::as_select()) + .get_results_async(&*conn) + .await?; + + check_limit(&pools, fetch_opts.fetch_limit, || { + String::from("listing multicast pools") + }); + + if pools.is_empty() { + println!("no multicast IP pools found"); + return Ok(()); + } + + // Get ranges for each pool + let pool_ids: Vec = pools.iter().map(|pool| pool.id()).collect(); + + let mut range_query = range_dsl::ip_pool_range.into_boxed(); + range_query = + range_query.filter(range_dsl::ip_pool_id.eq_any(pool_ids.clone())); + if !fetch_opts.include_deleted { + range_query = range_query.filter(range_dsl::time_deleted.is_null()); + } + + let ranges: Vec = range_query + .order_by(range_dsl::first_address) + .select(IpPoolRange::as_select()) + .get_results_async(&*conn) + .await?; + + let pool_map: HashMap = + pools.iter().map(|pool| (pool.id(), pool)).collect(); + + let rows: Vec = ranges + .into_iter() + .filter_map(|range| { + pool_map.get(&range.ip_pool_id).map(|pool| MulticastPoolRow { + pool_id: pool.id(), + pool_name: pool.name().to_string(), + first_address: range.first_address.ip(), + last_address: range.last_address.ip(), + created: range.time_created, + }) + }) + .collect(); + + if rows.is_empty() { + println!("no multicast IP pool ranges found"); + return Ok(()); + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + + Ok(()) +} + +pub(super) async fn cmd_db_multicast_info( + opctx: &OpContext, + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + args: &MulticastInfoArgs, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::instance::dsl as instance_dsl; + use nexus_db_schema::schema::ip_pool::dsl as pool_dsl; + use nexus_db_schema::schema::multicast_group::dsl as group_dsl; + use nexus_db_schema::schema::multicast_group_member::dsl as member_dsl; + use nexus_db_schema::schema::sled::dsl as sled_dsl; + use nexus_db_schema::schema::underlay_multicast_group::dsl as underlay_dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + // Find the group by ID, IP, or name, pairing filter with error message + let (mut query, not_found_msg) = + match (&args.group_id, &args.ip, &args.name) { + (Some(id), _, _) => ( + group_dsl::multicast_group + .filter(group_dsl::id.eq(*id)) + .into_boxed(), + format!("no multicast group found with ID {id}"), + ), + (None, Some(ip), _) => ( + group_dsl::multicast_group + .filter( + group_dsl::multicast_ip + .eq(ipnetwork::IpNetwork::from(*ip)), + ) + .into_boxed(), + format!("no multicast group found with IP {ip}"), + ), + (None, None, Some(name)) => ( + group_dsl::multicast_group + .filter(group_dsl::name.eq(name.clone())) + .into_boxed(), + format!("no multicast group found with name \"{name}\""), + ), + (None, None, None) => { + anyhow::bail!("must specify --group-id, --ip, or --name") + } + }; + + if !fetch_opts.include_deleted { + query = query.filter(group_dsl::time_deleted.is_null()); + } + + // Fetch group with underlay in single query using LEFT JOIN + let result: Option<( + ExternalMulticastGroup, + Option, + )> = + query + .left_join(underlay_dsl::underlay_multicast_group.on( + underlay_dsl::id.nullable().eq(group_dsl::underlay_group_id), + )) + .select(( + ExternalMulticastGroup::as_select(), + Option::::as_select(), + )) + .first_async(&*conn) + .await + .optional()?; + + let (group, underlay) = match result { + Some((grp, ulay)) => (grp, ulay), + None => { + println!("{not_found_msg}"); + return Ok(()); + } + }; + + // Look up the pool name + let pool_name: String = pool_dsl::ip_pool + .filter(pool_dsl::id.eq(group.ip_pool_id)) + .select(pool_dsl::name) + .first_async(&*conn) + .await + .unwrap_or_else(|_| "".into()); + + // Derive source_ips union from members (source_ips is per-member, not per-group) + let group_uuid = MulticastGroupUuid::from_untyped_uuid(group.identity.id); + let source_ips_map = datastore + .multicast_groups_source_ips_union(opctx, &[group_uuid]) + .await + .context("failed to fetch source IPs union")?; + let source_ips_display = source_ips_map + .get(&group.identity.id) + .filter(|ips| !ips.is_empty()) + .map(|ips| { + ips.iter().map(|ip| ip.to_string()).collect::>().join(",") + }) + .unwrap_or_else(|| "-".to_string()); + + // Print group details + println!("MULTICAST GROUP"); + println!(" id: {}", group.identity.id); + println!(" name: {}", group.identity.name); + println!(" state: {:?}", group.state); + println!(" multicast_ip: {}", group.multicast_ip); + println!(" vni: {}", u32::from(group.vni.0)); + println!(" source_ips: {source_ips_display}"); + println!(" ip_pool: {pool_name} ({})", group.ip_pool_id); + println!(" underlay_group: {:?}", group.underlay_group_id); + println!(" tag: {:?}", group.tag); + println!(" created: {}", group.identity.time_created); + if let Some(deleted) = group.identity.time_deleted { + println!(" deleted: {deleted}"); + } + + // Display underlay group if present + if let Some(underlay_group) = underlay { + println!("\nUNDERLAY GROUP"); + println!(" id: {}", underlay_group.id); + println!(" multicast_ip: {}", underlay_group.multicast_ip); + println!(" tag: {:?}", underlay_group.tag); + println!(" created: {}", underlay_group.time_created); + if let Some(deleted) = underlay_group.time_deleted { + println!(" deleted: {deleted}"); + } + } + + // Find members for this group + let mut member_query = member_dsl::multicast_group_member.into_boxed(); + member_query = member_query + .filter(member_dsl::external_group_id.eq(group.identity.id)); + if !fetch_opts.include_deleted { + member_query = member_query.filter(member_dsl::time_deleted.is_null()); + } + + let members: Vec = member_query + .order_by(member_dsl::time_created.desc()) + .select(MulticastGroupMember::as_select()) + .get_results_async(&*conn) + .await?; + + if members.is_empty() { + println!("\nMEMBERS: (none)"); + } else { + println!("\nMEMBERS ({}):", members.len()); + + // Batch lookup instance names (parent_id references instances) + let parent_ids: Vec = + members.iter().map(|member| member.parent_id).collect(); + let instances: Vec<(Uuid, String)> = instance_dsl::instance + .filter(instance_dsl::id.eq_any(parent_ids)) + .select((instance_dsl::id, instance_dsl::name)) + .get_results_async(&*conn) + .await + .unwrap_or_default(); + let instance_map: HashMap = + instances.into_iter().collect(); + + // Batch lookup sled serials + let sled_ids: Vec = members + .iter() + .filter_map(|member| member.sled_id.map(|s| s.into_untyped_uuid())) + .collect(); + let sleds: Vec<(Uuid, String)> = if sled_ids.is_empty() { + Vec::new() + } else { + sled_dsl::sled + .filter(sled_dsl::id.eq_any(sled_ids)) + .select((sled_dsl::id, sled_dsl::serial_number)) + .get_results_async(&*conn) + .await + .unwrap_or_default() + }; + let sled_map: HashMap = sleds.into_iter().collect(); + + let rows: Vec = members + .into_iter() + .map(|member| { + let instance_name = instance_map + .get(&member.parent_id) + .cloned() + .unwrap_or_else(|| member.parent_id.to_string()); + let sled_serial = member + .sled_id + .and_then(|s| sled_map.get(&s.into_untyped_uuid()).cloned()) + .unwrap_or_else(|| "-".to_string()); + let sources = Some(&member.source_ips) + .filter(|ips| !ips.is_empty()) + .map(|ips| { + ips.iter() + .map(|ip| ip.ip().to_string()) + .collect::>() + .join(",") + }) + .unwrap_or_else(|| "-".to_string()); + MulticastInfoMemberRow { + id: member.id, + instance: instance_name, + state: member.state, + multicast_ip: member.multicast_ip.ip(), + sources, + sled: sled_serial, + created: member.time_created, + } + }) + .collect(); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + } + + Ok(()) +} diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 661e3780c98..9a8c11cd81b 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -60,6 +60,7 @@ use nexus_types::internal_api::background::InstanceReincarnationStatus; use nexus_types::internal_api::background::InstanceUpdaterStatus; use nexus_types::internal_api::background::InventoryLoadStatus; use nexus_types::internal_api::background::LookupRegionPortStatus; +use nexus_types::internal_api::background::MulticastGroupReconcilerStatus; use nexus_types::internal_api::background::ProbeDistributorStatus; use nexus_types::internal_api::background::ReadOnlyRegionReplacementStartStatus; use nexus_types::internal_api::background::RegionReplacementDriverStatus; @@ -1193,6 +1194,9 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { "lookup_region_port" => { print_task_lookup_region_port(details); } + "multicast_reconciler" => { + print_task_multicast_reconciler(details); + } "phantom_disks" => { print_task_phantom_disks(details); } @@ -2109,6 +2113,76 @@ fn print_task_lookup_region_port(details: &serde_json::Value) { } } +fn print_task_multicast_reconciler(details: &serde_json::Value) { + let status = match serde_json::from_value::( + details.clone(), + ) { + Err(error) => { + eprintln!( + "warning: failed to interpret task details: {error:?}: {details:?}" + ); + return; + } + Ok(status) => status, + }; + + if status.disabled { + println!(" multicast feature is disabled"); + return; + } + + const GROUPS_CREATED: &str = "groups created (Creating->Active):"; + const GROUPS_DELETED: &str = "groups deleted (cleanup):"; + const GROUPS_VERIFIED: &str = "groups verified (Active):"; + const EMPTY_GROUPS_MARKED: &str = "empty groups marked for deletion:"; + const MEMBERS_PROCESSED: &str = "members processed:"; + const MEMBERS_DELETED: &str = "members deleted:"; + const WIDTH: usize = const_max_len(&[ + GROUPS_CREATED, + GROUPS_DELETED, + GROUPS_VERIFIED, + EMPTY_GROUPS_MARKED, + MEMBERS_PROCESSED, + MEMBERS_DELETED, + ]) + 1; + const NUM_WIDTH: usize = 3; + + if !status.errors.is_empty() { + println!( + " task did not complete successfully! ({} errors)", + status.errors.len() + ); + for error in &status.errors { + println!(" > {error}"); + } + } + + println!( + " {GROUPS_CREATED:NUM_WIDTH$}", + status.groups_created + ); + println!( + " {GROUPS_DELETED:NUM_WIDTH$}", + status.groups_deleted + ); + println!( + " {GROUPS_VERIFIED:NUM_WIDTH$}", + status.groups_verified + ); + println!( + " {EMPTY_GROUPS_MARKED:NUM_WIDTH$}", + status.empty_groups_marked + ); + println!( + " {MEMBERS_PROCESSED:NUM_WIDTH$}", + status.members_processed + ); + println!( + " {MEMBERS_DELETED:NUM_WIDTH$}", + status.members_deleted + ); +} + fn print_task_phantom_disks(details: &serde_json::Value) { #[derive(Deserialize)] struct TaskSuccess { diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index cd38fea5bb3..ae53a7f14a1 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -85,6 +85,36 @@ stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () ============================================= +EXECUTING COMMAND: omdb ["db", "multicast", "groups"] +termination: Exited(0) +--------------------------------------------- +stdout: +ID NAME STATE MULTICAST_IP RANGE UNDERLAY_IP SOURCES MEMBERS VNI CREATED +--------------------------------------------- +stderr: +note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable +note: database schema version matches expected () +============================================= +EXECUTING COMMAND: omdb ["db", "multicast", "members"] +termination: Exited(0) +--------------------------------------------- +stdout: +ID GROUP_NAME PARENT_ID STATE MULTICAST_IP SOURCES SLED_ID CREATED +--------------------------------------------- +stderr: +note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable +note: database schema version matches expected () +============================================= +EXECUTING COMMAND: omdb ["db", "multicast", "pools"] +termination: Exited(0) +--------------------------------------------- +stdout: +no multicast IP pools found +--------------------------------------------- +stderr: +note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable +note: database schema version matches expected () +============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) --------------------------------------------- @@ -713,7 +743,12 @@ task: "multicast_reconciler" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms -warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "empty_groups_marked": Number(0), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) + groups created (Creating->Active): 0 + groups deleted (cleanup): 0 + groups verified (Active): 0 + empty groups marked for deletion: 0 + members processed: 0 + members deleted: 0 task: "phantom_disks" configured period: every s @@ -1281,7 +1316,12 @@ task: "multicast_reconciler" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms -warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "empty_groups_marked": Number(0), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) + groups created (Creating->Active): 0 + groups deleted (cleanup): 0 + groups verified (Active): 0 + empty groups marked for deletion: 0 + members processed: 0 + members deleted: 0 task: "phantom_disks" configured period: every s diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 1c9104e7f8c..8ce516f6817 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -96,6 +96,7 @@ async fn test_omdb_usage_errors() { &["db", "saga"], &["db", "snapshots"], &["db", "network"], + &["db", "multicast"], &["mgs"], &["nexus"], &["nexus", "background-tasks"], @@ -198,6 +199,9 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { &["db", "dns", "diff", "external", "2"], &["db", "dns", "names", "external", "2"], &["db", "instances"], + &["db", "multicast", "groups"], + &["db", "multicast", "members"], + &["db", "multicast", "pools"], &["db", "sleds"], &["db", "sleds", "-F", "discretionary"], &["mgs", "inventory"], diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index bd28399bc68..3a8bef15cce 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -140,6 +140,7 @@ Commands: instances Alias to `omdb instance list` network Print information about the network migrations Print information about migrations + multicast Print information about multicast groups snapshots Print information about snapshots validate Validate the contents of the database volumes Print information about volumes @@ -205,6 +206,7 @@ Commands: instances Alias to `omdb instance list` network Print information about the network migrations Print information about migrations + multicast Print information about multicast groups snapshots Print information about snapshots validate Validate the contents of the database volumes Print information about volumes @@ -847,6 +849,41 @@ Database Options: --include-deleted whether to include soft-deleted records when enumerating objects that can be soft-deleted +Safety Options: + -w, --destructive Allow potentially-destructive subcommands +============================================= +EXECUTING COMMAND: omdb ["db", "multicast"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +Print information about multicast groups + +Usage: omdb db multicast [OPTIONS] + +Commands: + groups List all multicast groups + members List all multicast group members + pools List multicast IP pools and their ranges + info Get detailed info for a multicast group + help Print this message or the help of the given subcommand(s) + +Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + --color Color output [default: auto] [possible values: auto, always, never] + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] + --include-deleted whether to include soft-deleted records when enumerating objects + that can be soft-deleted + Safety Options: -w, --destructive Allow potentially-destructive subcommands ============================================= diff --git a/nexus/db-model/src/multicast_group.rs b/nexus/db-model/src/multicast_group.rs index 89094e430ba..69a6d4538d5 100644 --- a/nexus/db-model/src/multicast_group.rs +++ b/nexus/db-model/src/multicast_group.rs @@ -121,24 +121,90 @@ impl_enum_type!( Left => b"left" ); +impl MulticastGroupState { + pub const ALL_STATES: &'static [Self] = + &[Self::Creating, Self::Active, Self::Deleting, Self::Deleted]; + + pub fn label(&self) -> &'static str { + match self { + Self::Creating => "Creating", + Self::Active => "Active", + Self::Deleting => "Deleting", + Self::Deleted => "Deleted", + } + } +} + impl std::fmt::Display for MulticastGroupState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match self { - MulticastGroupState::Creating => "Creating", - MulticastGroupState::Active => "Active", - MulticastGroupState::Deleting => "Deleting", - MulticastGroupState::Deleted => "Deleted", - }) + f.write_str(self.label()) + } +} + +/// Error returned when parsing a `MulticastGroupState` from a string. +#[derive(Debug)] +pub struct MulticastGroupStateParseError(()); + +impl std::fmt::Display for MulticastGroupStateParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "invalid multicast group state") + } +} + +impl std::error::Error for MulticastGroupStateParseError {} + +impl std::str::FromStr for MulticastGroupState { + type Err = MulticastGroupStateParseError; + fn from_str(s: &str) -> Result { + for &v in Self::ALL_STATES { + if s.eq_ignore_ascii_case(v.label()) { + return Ok(v); + } + } + Err(MulticastGroupStateParseError(())) + } +} + +impl MulticastGroupMemberState { + pub const ALL_STATES: &'static [Self] = + &[Self::Joining, Self::Joined, Self::Left]; + + pub fn label(&self) -> &'static str { + match self { + Self::Joining => "Joining", + Self::Joined => "Joined", + Self::Left => "Left", + } } } impl std::fmt::Display for MulticastGroupMemberState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match self { - MulticastGroupMemberState::Joining => "Joining", - MulticastGroupMemberState::Joined => "Joined", - MulticastGroupMemberState::Left => "Left", - }) + f.write_str(self.label()) + } +} + +/// Error returned when parsing a `MulticastGroupMemberState` from a string. +#[derive(Debug)] +pub struct MulticastGroupMemberStateParseError(()); + +impl std::fmt::Display for MulticastGroupMemberStateParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "invalid multicast group member state") + } +} + +impl std::error::Error for MulticastGroupMemberStateParseError {} + +impl std::str::FromStr for MulticastGroupMemberState { + type Err = MulticastGroupMemberStateParseError; + fn from_str(s: &str) -> Result { + for &v in Self::ALL_STATES { + if s.eq_ignore_ascii_case(v.label()) { + return Ok(v); + } + } + Err(MulticastGroupMemberStateParseError(())) } } diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 2be438e9d32..3f789fac144 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -2817,8 +2817,12 @@ table! { } } -// Allow multicast tables to appear together for NOT EXISTS subqueries +// Allow multicast tables to appear together for JOINs and NOT EXISTS subqueries allow_tables_to_appear_in_same_query!(multicast_group, multicast_group_member); +allow_tables_to_appear_in_same_query!( + multicast_group, + underlay_multicast_group +); allow_tables_to_appear_in_same_query!(user_data_export, snapshot, image); diff --git a/nexus/tests/integration_tests/multicast/mod.rs b/nexus/tests/integration_tests/multicast/mod.rs index 563ba039b1e..2e0e043553f 100644 --- a/nexus/tests/integration_tests/multicast/mod.rs +++ b/nexus/tests/integration_tests/multicast/mod.rs @@ -63,6 +63,7 @@ mod failures; mod groups; mod instances; mod networking_integration; +mod omdb; mod pool_selection; // Timeout constants for test operations diff --git a/nexus/tests/integration_tests/multicast/omdb.rs b/nexus/tests/integration_tests/multicast/omdb.rs new file mode 100644 index 00000000000..6727e9c83e0 --- /dev/null +++ b/nexus/tests/integration_tests/multicast/omdb.rs @@ -0,0 +1,609 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests for omdb multicast commands with real data. +//! +//! These tests verify that omdb correctly formats multicast data by creating +//! actual multicast pools, groups, and members, then running omdb commands +//! and checking the output. + +use std::net::IpAddr; +use std::path::PathBuf; +use std::process::Command; + +use futures::future::join3; +use nexus_test_utils::http_testing::{AuthnMode, NexusRequest}; +use nexus_test_utils::resource_helpers::{ + create_default_ip_pool, create_project, +}; +use nexus_test_utils_macros::nexus_test; + +use super::*; + +const PROJECT_NAME: &str = "omdb-test-project"; +const TARGET_DIR: &str = "target"; +const OMDB_BIN: &str = "omdb"; +const BUILD_PROFILES: &[&str] = &["debug", "release"]; + +/// Find the omdb binary path. +/// +/// Since omdb is not built as part of omicron-nexus tests, we look for it +/// in the target directory relative to CARGO_MANIFEST_DIR. +fn find_omdb() -> PathBuf { + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let workspace_root = manifest_dir.parent().unwrap(); + + for profile in BUILD_PROFILES { + let omdb_path = + workspace_root.join(TARGET_DIR).join(profile).join(OMDB_BIN); + if omdb_path.exists() { + return omdb_path; + } + } + + PathBuf::from(OMDB_BIN) +} + +/// Run an omdb command and return its stdout. +fn run_omdb(db_url: &str, args: &[&str]) -> String { + let cmd_path = find_omdb(); + let output = Command::new(&cmd_path) + .env("OMDB_DB_URL", db_url) + .args(args) + .output() + .expect( + "failed to execute `omdb` - ensure `cargo build -p omicron-omdb` has been run", + ); + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + panic!("omdb command failed with args {args:?}:\nstderr: {stderr}"); + } + + String::from_utf8_lossy(&output.stdout).to_string() +} + +/// Test omdb multicast pools command. +#[nexus_test] +async fn test_omdb_multicast_pools(cptestctx: &ControlPlaneTestContext) { + let db_url = cptestctx.database.listen_url().to_string(); + let client = &cptestctx.external_client; + + // Before creating any pools, should show "no multicast IP pools found" + let output = run_omdb(&db_url, &["db", "multicast", "pools"]); + assert!( + output.contains("no multicast IP pools found"), + "Expected empty pool message, got: {output}" + ); + + // Create a multicast pool + create_multicast_ip_pool(client, "test-mcast-pool").await; + + // Now should show the pool with all columns + let output = run_omdb(&db_url, &["db", "multicast", "pools"]); + // pool name + assert!( + output.contains("test-mcast-pool"), + "Expected pool name in output, got: {output}" + ); + // first address + assert!( + output.contains("224.2.0.0"), + "Expected first address in output, got: {output}" + ); + // last address + assert!( + output.contains("224.2.255.255"), + "Expected last address in output, got: {output}" + ); +} + +/// Test omdb multicast groups, members, and info commands. +/// +/// This consolidated test verifies all multicast commands work with actual data. +#[nexus_test] +async fn test_omdb_multicast_commands(cptestctx: &ControlPlaneTestContext) { + let db_url = cptestctx.database.listen_url().to_string(); + let client = &cptestctx.external_client; + + // Setup: create pools and project + let (_, _, _) = join3( + create_default_ip_pool(client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(client, "test-mcast-pool"), + ) + .await; + + // Create an instance without multicast groups first + let instance = instance_for_multicast_groups( + cptestctx, + PROJECT_NAME, + "test-instance", + false, // don't start + &[], // no multicast groups yet + ) + .await; + + // Add a multicast member via API (this implicitly creates the group) + // Use instance-centric join endpoint: POST /v1/instances/{instance}/multicast-groups/{group} + let join_url = format!( + "/v1/instances/{}/multicast-groups/test-mcast-group?project={PROJECT_NAME}", + instance.identity.id + ); + + put_upsert::<_, MulticastGroupMember>( + client, + &join_url, + &InstanceMulticastGroupJoin { + source_ips: None, // ASM (Any-Source Multicast) + }, + ) + .await; + + // Wait for the group to become "Active" + wait_for_group_active(client, "test-mcast-group").await; + + // Get the group details for later tests + let group_url = mcast_group_url("test-mcast-group"); + let group: MulticastGroup = NexusRequest::object_get(client, &group_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get group") + .parsed_body() + .expect("failed to parse group"); + + // Test: omdb db multicast groups + let output = run_omdb(&db_url, &["db", "multicast", "groups"]); + // group id + assert!( + output.contains(&group.identity.id.to_string()), + "Expected group id in output, got: {output}" + ); + // group name + assert!( + output.contains("test-mcast-group"), + "Expected group name in output, got: {output}" + ); + // state + assert!( + output.contains("Active"), + "Expected state 'Active' in output, got: {output}" + ); + // multicast ip + assert!( + output.contains(&group.multicast_ip.to_string()), + "Expected multicast ip in output, got: {output}" + ); + // range (ASM for 224.x.x.x) + assert!( + output.contains("ASM"), + "Expected range 'ASM' in output, got: {output}" + ); + // vni (column exists but VNI is internal, not exposed in the external API) + assert!( + output.contains("VNI"), + "Expected VNI column in output, got: {output}" + ); + // members (instance@sled format, "-" when not started) + assert!( + output.contains("test-instance@-"), + "Expected member 'test-instance@-' in output, got: {output}" + ); + + // Test: omdb db multicast groups --state active + let output = + run_omdb(&db_url, &["db", "multicast", "groups", "--state", "active"]); + assert!( + output.contains("test-mcast-group"), + "Expected group name with state filter, got: {output}" + ); + + // Test: omdb db multicast groups --pool + let output = run_omdb( + &db_url, + &["db", "multicast", "groups", "--pool", "test-mcast-pool"], + ); + assert!( + output.contains("test-mcast-group"), + "Expected group name with pool filter, got: {output}" + ); + + // Test: omdb db multicast members + let output = run_omdb(&db_url, &["db", "multicast", "members"]); + // group name + assert!( + output.contains("test-mcast-group"), + "Expected group name in members output, got: {output}" + ); + // parent id (instance id) + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected parent id in members output, got: {output}" + ); + // multicast ip + let group_ip = group.multicast_ip.to_string(); + assert!( + output.contains(&group_ip), + "Expected multicast ip in members output, got: {output}" + ); + // sources ("-" for ASM) + assert!( + output.contains("-"), + "Expected sources '-' for ASM in members output, got: {output}" + ); + + // Test: omdb db multicast members --group-name + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--group-name", "test-mcast-group"], + ); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with group-name filter, got: {output}" + ); + + // Test: omdb db multicast members --group-ip (reuses group_ip from above) + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--group-ip", &group_ip], + ); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with group-ip filter, got: {output}" + ); + + // Test: omdb db multicast members --group-id + let group_id = group.identity.id.to_string(); + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--group-id", &group_id], + ); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with group-id filter, got: {output}" + ); + + // Test: omdb db multicast members --state left + // Wait for the RPW reconciler to transition member to "Left" state + // (instance isn't running, so no sled_id assignment) + wait_for_member_state( + cptestctx, + "test-mcast-group", + instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Left, + ) + .await; + let output = + run_omdb(&db_url, &["db", "multicast", "members", "--state", "left"]); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with state=left filter, got: {output}" + ); + + // Test: omdb db multicast members --sled-id + // Create a started instance so the member gets a sled_id + let started_instance = instance_for_multicast_groups( + cptestctx, + PROJECT_NAME, + "started-instance", + true, // start the instance + &[], + ) + .await; + + // Add member to a new group for the started instance + let sled_join_url = format!( + "/v1/instances/{}/multicast-groups/sled-test-group?project={PROJECT_NAME}", + started_instance.identity.id + ); + put_upsert::<_, MulticastGroupMember>( + client, + &sled_join_url, + &InstanceMulticastGroupJoin { source_ips: None }, + ) + .await; + + wait_for_group_active(client, "sled-test-group").await; + + // Query members by sled_id - the started instance should be on first_sled + let sled_id = cptestctx.first_sled_id().to_string(); + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--sled-id", &sled_id], + ); + assert!( + output.contains(&started_instance.identity.id.to_string()), + "Expected started instance ID with sled-id filter, got: {output}" + ); + + // Test: omdb db multicast members --state joined + // Wait for the started instance's member to reach "Joined" state + wait_for_member_state( + cptestctx, + "sled-test-group", + started_instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Joined, + ) + .await; + + // Now test the --state joined filter + let output_joined = + run_omdb(&db_url, &["db", "multicast", "members", "--state", "joined"]); + assert!( + output_joined.contains(&started_instance.identity.id.to_string()), + "Expected started instance in joined state, got: {output_joined}" + ); + // state column shows "Joined" + assert!( + output_joined.contains("Joined"), + "Expected 'Joined' state in members output, got: {output_joined}" + ); + // sled_id column shows the sled UUID + assert!( + output_joined.contains(&sled_id), + "Expected sled_id in members output, got: {output_joined}" + ); + + // Verify info for started instance shows sled serial (not "-") + let output_info = run_omdb( + &db_url, + &["db", "multicast", "info", "--name", "sled-test-group"], + ); + // member instance name + assert!( + output_info.contains("started-instance"), + "Expected 'started-instance' in info members, got: {output_info}" + ); + // underlay group should be present for active group + assert!( + output_info.contains("UNDERLAY GROUP"), + "Expected 'UNDERLAY GROUP' section in info output, got: {output_info}" + ); + + // Verify groups output shows started instance with sled serial (not "-") + let output_groups = run_omdb(&db_url, &["db", "multicast", "groups"]); + // sled-test-group should show "started-instance@" not "started-instance@-" + assert!( + output_groups.contains("started-instance@"), + "Expected 'started-instance@' in groups members column, got: {output_groups}" + ); + // The sled serial should appear (not just "-") + // Note: test sled serial is typically "serial0" or similar + assert!( + !output_groups.contains("started-instance@-"), + "Started instance should have sled serial, not '-', got: {output_groups}" + ); + + // Verify underlay_ip column shows an IP for active groups + // Active groups with joined members should have an underlay group assigned + // The underlay IP is in the ff04::/64 range (admin-scoped IPv6 multicast) + assert!( + output_groups.contains("ff04:"), + "Expected underlay_ip (ff04:*) for active group in groups output, got: {output_groups}" + ); + + // Verify started instance is not in "Left" state + let output_left = + run_omdb(&db_url, &["db", "multicast", "members", "--state", "left"]); + assert!( + !output_left.contains(&started_instance.identity.id.to_string()), + "Started instance should not be in 'Left' state, got: {output_left}" + ); + + // Test: combined filters (--group-name + --state) + // The started instance's member should appear when filtering by both + let output_combined = run_omdb( + &db_url, + &[ + "db", + "multicast", + "members", + "--group-name", + "sled-test-group", + "--state", + "joined", + ], + ); + assert!( + output_combined.contains(&started_instance.identity.id.to_string()), + "Expected started instance with combined filters, got: {output_combined}" + ); + + // Test: combined filters that should return empty (wrong group + state) + let output_combined_empty = run_omdb( + &db_url, + &[ + "db", + "multicast", + "members", + "--group-name", + "test-mcast-group", + "--state", + "joined", + ], + ); + // test-mcast-group has a non-started instance, so it should not be in "Joined" state + assert!( + !output_combined_empty + .contains(&started_instance.identity.id.to_string()), + "Started instance should not appear in wrong group filter, got: {output_combined_empty}" + ); + + // Test: omdb db multicast info --name + let output = run_omdb( + &db_url, + &["db", "multicast", "info", "--name", "test-mcast-group"], + ); + // section header + assert!( + output.contains("MULTICAST GROUP"), + "Expected 'MULTICAST GROUP' header in info output, got: {output}" + ); + // id + assert!( + output.contains(&group.identity.id.to_string()), + "Expected group id in info output, got: {output}" + ); + // name + assert!( + output.contains("test-mcast-group"), + "Expected group name in info output, got: {output}" + ); + // state + assert!( + output.contains("Active"), + "Expected state 'Active' in info output, got: {output}" + ); + // multicast ip + assert!( + output.contains(&group.multicast_ip.to_string()), + "Expected multicast ip in info output, got: {output}" + ); + // vni (field exists but VNI is internal, not exposed in the external API) + assert!( + output.contains("vni:"), + "Expected vni field in info output, got: {output}" + ); + // ip pool + assert!( + output.contains("test-mcast-pool"), + "Expected pool name in info output, got: {output}" + ); + // members section + assert!( + output.contains("MEMBERS"), + "Expected 'MEMBERS' section in info output, got: {output}" + ); + // member instance name + assert!( + output.contains("test-instance"), + "Expected instance name in info members, got: {output}" + ); + // member sled ("-" when not started) + // Note: info shows sled serial, not sled_id UUID + assert!( + output.contains("-"), + "Expected sled '-' for non-started instance in info members, got: {output}" + ); + + // Test: omdb db multicast info --ip + let output = + run_omdb(&db_url, &["db", "multicast", "info", "--ip", &group_ip]); + assert!( + output.contains("test-mcast-group"), + "Expected group name when querying by IP, got: {output}" + ); + + // Test: omdb db multicast info --group-id (reuses group_id from members test) + let output = run_omdb( + &db_url, + &["db", "multicast", "info", "--group-id", &group_id], + ); + assert!( + output.contains("test-mcast-group"), + "Expected group name when querying by ID, got: {output}" + ); + + // Test SSM (Source-Specific Multicast) - group in 232/8 range + // SSM range is 232.0.0.0/8 for IPv4, ff3x::/32 for IPv6 + create_multicast_ip_pool_with_range( + client, + "test-ssm-pool", + (232, 1, 0, 0), // SSM range start + (232, 1, 0, 255), // SSM range end + ) + .await; + + let ssm_instance = instance_for_multicast_groups( + cptestctx, + PROJECT_NAME, + "ssm-instance", + false, + &[], + ) + .await; + + let ssm_join_url = format!( + "/v1/instances/{}/multicast-groups/ssm-group?project={PROJECT_NAME}", + ssm_instance.identity.id + ); + put_upsert::<_, MulticastGroupMember>( + client, + &ssm_join_url, + &InstanceMulticastGroupJoin { + source_ips: Some(vec![ + "10.0.0.1".parse::().unwrap(), + "10.0.0.2".parse::().unwrap(), + ]), + }, + ) + .await; + + wait_for_group_active(client, "ssm-group").await; + + // Verify SSM group shows in groups list with sources + let output = run_omdb(&db_url, &["db", "multicast", "groups"]); + assert!( + output.contains("ssm-group"), + "Expected SSM group in output, got: {output}" + ); + // Verify SSM is shown in RANGE column (232.x.x.x = SSM range) + assert!( + output.contains("SSM"), + "Expected SSM in range column, got: {output}" + ); + // Verify ASM is shown for 224.x.x.x range (test-mcast-group) + assert!( + output.contains("ASM"), + "Expected ASM in range column, got: {output}" + ); + // Verify SSM source IPs + assert!( + output.contains("10.0.0.1") && output.contains("10.0.0.2"), + "Expected SSM source IPs in output, got: {output}" + ); + + // Verify SSM sources show in info command + let output = + run_omdb(&db_url, &["db", "multicast", "info", "--name", "ssm-group"]); + assert!( + output.contains("10.0.0.1") || output.contains("10.0.0.2"), + "Expected SSM source IPs in info output, got: {output}" + ); + + // Test: omdb db multicast members shows sources per member + let output = run_omdb(&db_url, &["db", "multicast", "members"]); + // SSM member should show its sources + assert!( + output.contains("10.0.0.1") || output.contains("10.0.0.2"), + "Expected SSM member sources in members output, got: {output}" + ); + + // Test: omdb db multicast members --source-ip + // Filter by SSM source IP - should find SSM member + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--source-ip", "10.0.0.1"], + ); + assert!( + output.contains(&ssm_instance.identity.id.to_string()), + "Expected SSM instance with source-ip filter, got: {output}" + ); + // Members without sources should not appear for any source-ip filter + assert!( + !output.contains(&instance.identity.id.to_string()), + "Member without sources should not appear, got: {output}" + ); + + // Test: --source-ip with non-existent IP returns no members + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--source-ip", "10.99.99.99"], + ); + assert!( + !output.contains(&ssm_instance.identity.id.to_string()), + "No members should match non-existent source IP, got: {output}" + ); +}