diff --git a/ci/scenarios/sno-1-bm/README.md b/ci/scenarios/sno-1-bm/README.md
new file mode 100644
index 00000000..655fcded
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/README.md
@@ -0,0 +1,111 @@
+# sno-1-bm Scenario
+
+## Overview
+
+A Single Node OpenShift (SNO) scenario designed to test OpenStack Ironic bare
+metal provisioning with 1 dedicated Ironic node using iPXE network boot. This
+scenario validates the complete OpenStack bare metal lifecycle including node
+enrollment, provisioning, and Tempest testing.
+
+## Architecture
+
+
+```mermaid
+graph TD
+ Internet[("Internet")]
+ Router{{"Neutron
Router"}}
+
+ MachineNet["Machine Network
192.168.32.0/24"]
+ CtlPlane["CtlPlane Network
192.168.122.0/24"]
+ VLANNets["VLAN Trunk Networks
Internal API: 172.17.0.0/24
Storage: 172.18.0.0/24
Tenant: 172.19.0.0/24"]
+ IronicNet["Ironic Network
172.20.1.0/24"]
+
+ Controller["Controller
192.168.32.254
DNS/HAProxy"]
+ Master["SNO Master
192.168.32.10"]
+ IronicNodes["Ironic Node x1
Virtual Baremetal"]
+
+ LVM["TopoLVM
20GB"]
+ CinderVols["Cinder Volumes x3
20GB each"]
+
+ Internet --- Router
+
+ Router --- MachineNet
+ Router --- CtlPlane
+ Router --- VLANNets
+ Router --- IronicNet
+
+ MachineNet --- Controller
+ MachineNet --- Master
+ CtlPlane --- Master
+ VLANNets --- Master
+ IronicNet --- Master
+ IronicNet --- IronicNodes
+
+ Master --- LVM
+ Master --- CinderVols
+
+ style Controller fill:#4A90E2,stroke:#2E5C8A,stroke-width:3px,color:#fff
+ style Master fill:#F5A623,stroke:#C87D0E,stroke-width:3px,color:#fff
+ style IronicNodes fill:#9B59B6,stroke:#6C3A82,stroke-width:2px,color:#fff
+ style Router fill:#27AE60,stroke:#1E8449,stroke-width:3px,color:#fff
+```
+
+
+### Component Details
+
+- **Controller**: Hotstack controller providing DNS, load balancing, and
+ orchestration services
+- **SNO Master**: Single-node OpenShift cluster running the complete OpenStack
+ control plane
+- **Ironic Node**: 1 virtual bare metal node for testing Ironic provisioning workflows
+
+## Networks
+
+- **machine-net**: 192.168.32.0/24 (OpenShift cluster network)
+- **ctlplane-net**: 192.168.122.0/24 (OpenStack control plane)
+- **internal-api-net**: 172.17.0.0/24 (OpenStack internal services)
+- **storage-net**: 172.18.0.0/24 (Storage backend communication)
+- **tenant-net**: 172.19.0.0/24 (Tenant network traffic)
+- **ironic-net**: 172.20.1.0/24 (Bare metal provisioning network)
+
+## OpenStack Services
+
+This scenario deploys a comprehensive OpenStack environment:
+
+### Core Services
+
+- **Keystone**: Identity service with LoadBalancer on Internal API
+- **Nova**: Compute service with Ironic driver for bare metal
+- **Neutron**: Networking service with OVN backend
+- **Glance**: Image service with Swift backend
+- **Swift**: Object storage service
+- **Placement**: Resource placement service
+
+### Bare Metal Services
+
+- **Ironic**: Bare metal provisioning service
+- **Ironic Inspector**: Hardware inspection service
+- **Ironic Neutron Agent**: Network management for bare metal
+
+## Usage
+
+```bash
+# Deploy the scenario
+ansible-playbook -i inventory.yml bootstrap.yml \
+ -e @scenarios/sno-1-bm/bootstrap_vars.yml \
+ -e @~/cloud-secrets.yaml
+
+# Run comprehensive tests
+ansible-playbook -i inventory.yml 06-test-operator.yml \
+ -e @scenarios/sno-1-bm/bootstrap_vars.yml \
+ -e @~/cloud-secrets.yaml
+```
+
+## Configuration Files
+
+- `bootstrap_vars.yml`: Infrastructure and OpenShift configuration.
+- `automation-vars.yml`: Hotloop deployment stages
+- `heat_template_ipxe.yaml`: OpenStack infrastructure template (iPXE network boot)
+- `manifests/control-plane/control-plane.yaml`: OpenStack service configuration
+- `test-operator/automation-vars.yml`: Comprehensive test automation
+- `test-operator/tempest-tests.yml`: Tempest test specifications
diff --git a/ci/scenarios/sno-1-bm/automation-vars.yml b/ci/scenarios/sno-1-bm/automation-vars.yml
new file mode 100644
index 00000000..3eb3e3db
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/automation-vars.yml
@@ -0,0 +1,139 @@
+---
+stages:
+ - name: TopoLVM Dependencies
+ stages: >-
+ {{
+ lookup("ansible.builtin.template",
+ "common/stages/topolvm-deps-stages.yaml.j2")
+ }}
+
+ - name: Dependencies
+ stages: >-
+ {{
+ lookup("ansible.builtin.template",
+ "common/stages/deps-stages.yaml.j2")
+ }}
+
+ - name: Cinder LVM
+ stages: >-
+ {{
+ lookup("ansible.builtin.file",
+ "common/stages/cinder-lvm-label-stages.yaml")
+ }}
+
+ - name: TopoLVM
+ stages: >-
+ {{
+ lookup("ansible.builtin.template",
+ "common/stages/topolvm-stages.yaml.j2")
+ }}
+
+ - name: OLM Openstack
+ stages: >-
+ {{
+ lookup("ansible.builtin.template",
+ "common/stages/olm-openstack-stages.yaml.j2")
+ }}
+
+ - name: NodeNetworkConfigurationPolicy (nncp)
+ documentation: |
+ Apply node network configuration policies to configure host networking.
+ Waits for all policies to be successfully configured.
+ j2_manifest: manifests/control-plane/networking/nncp.yaml.j2
+ wait_conditions:
+ - >-
+ oc wait -n openstack nncp -l osp/nncm-config-type=standard
+ --for jsonpath='{.status.conditions[0].reason}'=SuccessfullyConfigured
+ --timeout=180s
+
+ - name: NetworkAttchmentDefinition (NAD)
+ documentation: |
+ Create network attachment definitions for OpenStack services.
+ Defines additional network interfaces for pods.
+ manifest: manifests/control-plane/networking/nad.yaml
+
+ - name: MetalLB - L2Advertisement and IPAddressPool
+ documentation: |
+ Configure MetalLB load balancer with IP address pools and L2 advertisements.
+ Enables external access to OpenStack services.
+ manifest: manifests/control-plane/networking/metallb.yaml
+
+ - name: OpenstackControlPlane
+ documentation: |
+ Deploy the OpenStack control plane with all core services.
+ Waits for the control plane to be fully ready before proceeding.
+ j2_manifest: manifests/control-plane/control-plane.yaml.j2
+ wait_conditions:
+ - >-
+ oc -n openstack wait openstackcontrolplanes.core.openstack.org controlplane
+ --for condition=OpenStackControlPlaneDNSReadyCondition --timeout=600s
+
+ - name: Extra DNS LoadBalancer on Ironic network
+ documentation: |
+ Deploy additional DNS service on the Ironic network for bare metal provisioning.
+ Provides DNS resolution for ironic nodes during deployment and inspection.
+ manifest: manifests/control-plane/dnsmasq-dns-ironic.yaml
+ wait_conditions:
+ - >-
+ oc wait -n openstack service dnsmasq-dns-ironic
+ --for jsonpath='.status.loadBalancer' --timeout=60s
+
+ - name: Wait for OpenstackControlPlane
+ documentation: |
+ Wait for the OpenStack control plane to be fully ready and operational.
+ Ensures all services are running before proceeding with additional configurations.
+ wait_conditions:
+ - >-
+ oc wait -n openstack openstackcontrolplane controlplane
+ --for condition=Ready --timeout=30m
+
+ - name: Update openstack-operators OLM
+ stages: >-
+ {{
+ lookup('ansible.builtin.template',
+ 'common/stages/openstack-olm-update.yaml.j2')
+ }}
+ run_conditions:
+ - >-
+ {{
+ openstack_operators_update is defined and
+ openstack_operators_update | bool
+ }}
+
+ - name: Wait for condition MinorUpdateAvailable True
+ documentation: |
+ Wait for OpenStack version to indicate a minor update is available.
+ Required before proceeding with version updates.
+ wait_conditions:
+ - >-
+ oc -n openstack wait openstackversions.core.openstack.org controlplane
+ --for=condition=MinorUpdateAvailable=True --timeout=10m
+ run_conditions:
+ - "{{ openstack_update is defined and openstack_update | bool }}"
+
+ - name: "Minor update :: Create OpenStackVersion patch"
+ documentation: |
+ This creates a patch file `{{ manifests_dir }}/patches/openstack_version_patch.yaml`
+ If `openstack_update_custom_images` is defined it will populate the customContainerImages
+ in the OpenstackVersion YAML patch.
+ shell: >-
+ {{
+ lookup('ansible.builtin.template',
+ 'common/scripts/create_openstack_version_patch.sh.j2')
+ }}
+ run_conditions:
+ - "{{ openstack_update is defined and openstack_update | bool }}"
+
+ - name: "Minor update :: Update the target version in the OpenStackVersion custom resource (CR)"
+ documentation: |
+ The `hotstack-openstack-version-patch` script will get the `availableVersion`
+ and us it to replace the string `__TARGET_VERSION__` in the patch file and
+ apply the patch using `oc patch` command.
+ command: >-
+ hotstack-openstack-version-patch --namespace openstack --name controlplane
+ --file {{ manifests_dir }}/patches/openstack_version_patch.yaml
+ wait_conditions:
+ - oc -n openstack wait openstackversions.core.openstack.org controlplane
+ --for=condition=Ready --timeout=10m
+ run_conditions:
+ - "{{ openstack_update is defined and openstack_update | bool }}"
diff --git a/ci/scenarios/sno-1-bm/bootstrap_vars.yml b/ci/scenarios/sno-1-bm/bootstrap_vars.yml
new file mode 100644
index 00000000..b9853644
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/bootstrap_vars.yml
@@ -0,0 +1,57 @@
+---
+os_cloud: default
+os_floating_network: public
+os_router_external_network: public
+
+scenario: sno-1-bm
+scenario_dir: scenarios
+stack_template_path: "{{ scenario_dir }}/{{ scenario }}/heat_template_ipxe.yaml"
+automation_vars_file: "{{ scenario_dir }}/{{ scenario }}/automation-vars.yml"
+test_operator_automation_vars_file: "{{ scenario_dir }}/{{ scenario }}/test-operator/automation-vars.yml"
+
+openstack_operators_image: quay.io/openstack-k8s-operators/openstack-operator-index:latest
+openstack_operator_channel: alpha
+openstack_operator_starting_csv: null
+
+openshift_version: stable-4.18
+
+ntp_servers: []
+dns_servers:
+ - 172.31.0.129
+
+pull_secret_file: ~/pull-secret.txt
+
+ovn_k8s_gateway_config_host_routing: true
+enable_iscsi: true
+enable_multipath: true
+
+cinder_volume_pvs:
+ - /dev/vdc
+ - /dev/vdd
+ - /dev/vde
+
+# Nova console recorder NFS settings
+nova_console_recorder_nfs_server: controller-0.openstack.lab
+nova_console_recorder_nfs_path: /export/nova-console-recordings
+
+stack_name: "hs-{{ scenario }}-{{ zuul.build[:8] | default('no-zuul') }}"
+stack_parameters:
+ # On misconfigured clouds, uncomment these to avoid issues.
+ # Ref: https://access.redhat.com/solutions/7059376
+ # net_value_specs:
+ # mtu: 1442
+ dns_servers: "{{ dns_servers }}"
+ ntp_servers: "{{ ntp_servers }}"
+ controller_ssh_pub_key: "{{ controller_ssh_pub_key | default('') }}"
+ router_external_network: "{{ os_router_external_network | default('public') }}"
+ floating_ip_network: "{{ os_floating_network | default('public') }}"
+ controller_params:
+ image: hotstack-controller
+ flavor: hotstack.small
+ ocp_master_params:
+ image: ipxe-boot-usb
+ flavor: hotstack.xxlarge
+ ironic_params:
+ image: CentOS-Stream-GenericCloud-9
+ cd_image: sushy-tools-blank-image
+ flavor: hotstack.medium
diff --git a/ci/scenarios/sno-1-bm/heat_template_ipxe.yaml b/ci/scenarios/sno-1-bm/heat_template_ipxe.yaml
new file mode 100644
index 00000000..f17f41b0
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/heat_template_ipxe.yaml
@@ -0,0 +1,681 @@
+---
+heat_template_version: rocky
+
+description: >
+ Heat template to set up infrastructure for SNO with 1 bare metal node (iPXE boot)
+
+parameters:
+ dns_servers:
+ type: comma_delimited_list
+ default:
+ - 8.8.8.8
+ - 8.8.4.4
+ ntp_servers:
+ type: comma_delimited_list
+ default: []
+ controller_ssh_pub_key:
+ type: string
+ dataplane_ssh_pub_key:
+ type: string
+ router_external_network:
+ type: string
+ default: public
+ floating_ip_network:
+ type: string
+ default: public
+ net_value_specs:
+ type: json
+ default: {}
+
+ controller_params:
+ type: json
+ default:
+ image: hotstack-controller
+ flavor: hotstack.small
+ nat64_appliance_params:
+ type: json
+ default:
+ image: nat64-appliance
+ flavor: hotstack.small
+ ocp_master_params:
+ type: json
+ default:
+ image: ipxe-boot-usb
+ flavor: hotstack.xxlarge
+ ocp_worker_params:
+ type: json
+ default:
+ image: ipxe-boot-usb
+ flavor: hotstack.xxlarge
+ compute_params:
+ type: json
+ default:
+ image: CentOS-Stream-GenericCloud-9
+ flavor: hotstack.large
+ networker_params:
+ type: json
+ default:
+ image: CentOS-Stream-GenericCloud-9
+ flavor: hotstack.small
+ bmh_params:
+ type: json
+ default:
+ image: CentOS-Stream-GenericCloud-9
+ cd_image: sushy-tools-blank-image
+ flavor: hotstack.medium
+ ironic_params:
+ type: json
+ default:
+ image: CentOS-Stream-GenericCloud-9
+ cd_image: sushy-tools-blank-image
+ flavor: hotstack.medium
+ cdrom_disk_bus:
+ type: string
+ description: >
+ Disk bus type for CDROM device. 'sata' may be required for older versions
+ of OpenStack. Heat patch https://review.opendev.org/c/openstack/heat/+/966688
+ is needed for 'sata' support.
+ default: scsi
+ constraints:
+ - allowed_values:
+ - sata
+ - scsi
+
+resources:
+ #
+ # Networks
+ #
+ machine-net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ value_specs: {get_param: net_value_specs}
+
+ ctlplane-net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ value_specs: {get_param: net_value_specs}
+
+ internal-api-net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ value_specs: {get_param: net_value_specs}
+
+ storage-net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ value_specs: {get_param: net_value_specs}
+
+ tenant-net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ value_specs: {get_param: net_value_specs}
+
+ ironic-net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ value_specs: {get_param: net_value_specs}
+
+ #
+ # Subnets
+ #
+ machine-subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: machine-net}
+ ip_version: 4
+ cidr: 192.168.32.0/24
+ enable_dhcp: true
+ dns_nameservers:
+ - 192.168.32.254
+
+ ctlplane-subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: ctlplane-net}
+ ip_version: 4
+ cidr: 192.168.122.0/24
+ enable_dhcp: false
+ allocation_pools:
+ - start: 192.168.122.100
+ end: 192.168.122.150
+ dns_nameservers:
+ - 192.168.122.80
+
+ internal-api-subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: internal-api-net}
+ ip_version: 4
+ cidr: 172.17.0.0/24
+ enable_dhcp: false
+ allocation_pools:
+ - start: 172.17.0.100
+ end: 172.17.0.150
+
+ storage-subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: storage-net}
+ ip_version: 4
+ cidr: 172.18.0.0/24
+ enable_dhcp: false
+ allocation_pools:
+ - start: 172.18.0.100
+ end: 172.18.0.150
+
+ tenant-subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: tenant-net}
+ ip_version: 4
+ cidr: 172.19.0.0/24
+ enable_dhcp: false
+ allocation_pools:
+ - start: 172.19.0.100
+ end: 172.19.0.150
+
+ ironic-subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: {get_resource: ironic-net}
+ ip_version: 4
+ cidr: 172.20.1.0/24
+ enable_dhcp: false
+ allocation_pools: [{start: 172.20.1.100, end: 172.20.1.150}]
+
+ #
+ # Routers
+ #
+ router:
+ type: OS::Neutron::Router
+ properties:
+ admin_state_up: true
+ external_gateway_info:
+ network: {get_param: router_external_network}
+ # enable_snat: true
+
+ machine-net-router-interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router: {get_resource: router}
+ subnet: {get_resource: machine-subnet}
+
+ ctlplane-net-router-interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router: {get_resource: router}
+ subnet: {get_resource: ctlplane-subnet}
+
+ ironic-net-router-interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router: {get_resource: router}
+ subnet: {get_resource: ironic-subnet}
+
+ #
+ # Instances
+ #
+ controller_users:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ users:
+ - default
+ - name: zuul
+ gecos: "Zuul user"
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ ssh_authorized_keys:
+ - {get_param: controller_ssh_pub_key}
+
+ controller-write-files:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ write_files:
+ - path: /etc/dnsmasq.conf
+ content: |
+ # dnsmasq service config
+ # Include all files in /etc/dnsmasq.d except RPM backup files
+ conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig
+ no-resolv
+ owner: root:dnsmasq
+ - path: /etc/dnsmasq.d/forwarders.conf
+ content:
+ str_replace:
+ template: |
+ # DNS forwarders records
+ server=$dns1
+ server=$dns2
+ params:
+ $dns1: {get_param: [dns_servers, 0]}
+ $dns2: {get_param: [dns_servers, 1]}
+ owner: root:dnsmasq
+ - path: /etc/dnsmasq.d/host_records.conf
+ content:
+ str_replace:
+ template: |
+ # Host records
+ host-record=controller-0.openstack.lab,$controller0
+ host-record=api.sno.openstack.lab,$master0
+ host-record=api-int.sno.openstack.lab,$master0
+ host-record=master-0.sno.openstack.lab,$master0
+ params:
+ $controller0: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]}
+ $master0: {get_attr: [master0-machine-port, fixed_ips, 0, ip_address]}
+ owner: root:dnsmasq
+ - path: /etc/dnsmasq.d/wildcard_records.conf
+ content:
+ str_replace:
+ template: |
+ # Wildcard records
+ address=/apps.sno.openstack.lab/$addr
+ params:
+ $addr: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]}
+ owner: root:dnsmasq
+ - path: /etc/resolv.conf
+ content: |
+ nameserver: 127.0.0.1
+ owner: root:root
+ - path: /etc/NetworkManager/conf.d/98-rc-manager.conf
+ content: |
+ [main]
+ rc-manager=unmanaged
+ owner: root:root
+ - path: /etc/haproxy/haproxy.cfg
+ content: |
+ global
+ log 127.0.0.1 local2
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ daemon
+ defaults
+ mode http
+ log global
+ option dontlognull
+ option http-server-close
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+ listen api-server-6443
+ bind *:6443
+ mode tcp
+ server master-0 master-0.sno.openstack.lab:6443 check inter 1s
+ listen machine-config-server-22623
+ bind *:22623
+ mode tcp
+ server master-0 master-0.sno.openstack.lab:22623 check inter 1s
+ listen ingress-router-443
+ bind *:443
+ mode tcp
+ balance source
+ server master-0 master-0.sno.openstack.lab:443 check inter 1s
+ listen ingress-router-80
+ bind *:80
+ mode tcp
+ balance source
+ server master-0 master-0.sno.openstack.lab:80 check inter 1s
+ owner: root:root
+
+
+ controller-runcmd:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ runcmd:
+ - ['systemctl', 'enable', 'dnsmasq.service']
+ - ['systemctl', 'start', 'dnsmasq.service']
+ - ['setenforce', 'permissive']
+ - ['systemctl', 'enable', 'haproxy.service']
+ - ['systemctl', 'start', 'haproxy.service']
+ - ['sed', '-i', 's/Listen 80/Listen 8081/g', '/etc/httpd/conf/httpd.conf']
+ - ['systemctl', 'enable', 'httpd.service']
+ - ['systemctl', 'start', 'httpd.service']
+
+ controller-init:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: controller_users}
+ - config: {get_resource: controller-write-files}
+ - config: {get_resource: controller-runcmd}
+
+ controller-machine-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: machine-net}
+ mac_address: "fa:16:9e:81:f6:05"
+ fixed_ips:
+ - ip_address: 192.168.32.254
+
+ controller-floating-ip:
+ depends_on: machine-net-router-interface
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: {get_param: floating_ip_network}
+ port_id: {get_resource: controller-machine-port}
+
+ controller:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: [controller_params, image]}
+ flavor: {get_param: [controller_params, flavor]}
+ networks:
+ - port: {get_resource: controller-machine-port}
+ user_data_format: RAW
+ user_data: {get_resource: controller-init}
+
+ # OCP Masters
+
+ # DHCP Opts value
+ extra-dhcp-opts-value:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ extra_dhcp_opts:
+ - opt_name: "60"
+ opt_value: "HTTPClient"
+ ip_version: 4
+ - opt_name: "67"
+ opt_value:
+ str_replace:
+ template: http://$server_address:8081/boot-artifacts/agent.x86_64.ipxe
+ params:
+ $server_address: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]}
+
+
+ master0-machine-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: machine-net}
+ port_security_enabled: false
+ mac_address: "fa:16:9e:81:f6:10"
+ fixed_ips:
+ - ip_address: 192.168.32.10
+ value_specs: {get_attr: [extra-dhcp-opts-value, value]}
+
+ master0-ctlplane-trunk-parent-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: ctlplane-net}
+ port_security_enabled: false
+ fixed_ips:
+ - ip_address: 192.168.122.10
+
+ master0-internal-api-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: internal-api-net}
+ port_security_enabled: false
+ fixed_ips:
+ - ip_address: 172.17.0.10
+
+ master0-storage-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: storage-net}
+ port_security_enabled: false
+ fixed_ips:
+ - ip_address: 172.18.0.10
+
+ master0-tenant-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: tenant-net}
+ port_security_enabled: false
+ fixed_ips:
+ - ip_address: 172.19.0.10
+
+ master0-trunk0:
+ type: OS::Neutron::Trunk
+ properties:
+ port: {get_resource: master0-ctlplane-trunk-parent-port}
+ sub_ports:
+ - port: {get_resource: master0-internal-api-port}
+ segmentation_id: 20
+ segmentation_type: vlan
+ - port: {get_resource: master0-storage-port}
+ segmentation_id: 21
+ segmentation_type: vlan
+ - port: {get_resource: master0-tenant-port}
+ segmentation_id: 22
+ segmentation_type: vlan
+
+ master0-ironic-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: ironic-net}
+ port_security_enabled: false
+ fixed_ips: [{ip_address: 172.20.1.10}]
+
+ master0-lvms-vol0:
+ type: OS::Cinder::Volume
+ properties:
+ size: 20
+
+ master0-cinder-vol0:
+ type: OS::Cinder::Volume
+ properties:
+ size: 20
+
+ master0-cinder-vol1:
+ type: OS::Cinder::Volume
+ properties:
+ size: 20
+
+ master0-cinder-vol2:
+ type: OS::Cinder::Volume
+ properties:
+ size: 20
+
+ master0:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: [ocp_master_params, image]}
+ flavor: {get_param: [ocp_master_params, flavor]}
+ block_device_mapping_v2:
+ - boot_index: -1
+ device_type: disk
+ volume_id: {get_resource: master0-lvms-vol0}
+ - boot_index: -1
+ device_type: disk
+ volume_id: {get_resource: master0-cinder-vol0}
+ - boot_index: -1
+ device_type: disk
+ volume_id: {get_resource: master0-cinder-vol1}
+ - boot_index: -1
+ device_type: disk
+ volume_id: {get_resource: master0-cinder-vol2}
+ networks:
+ - port: {get_resource: master0-machine-port}
+ - port: {get_attr: [master0-trunk0, port_id]}
+ - port: {get_resource: master0-ironic-port}
+
+ #
+ # Ironics
+ #
+ ironic0-port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: ironic-net}
+ port_security_enabled: false
+
+ ironic0:
+ type: OS::Nova::Server
+ properties:
+ flavor: {get_param: [ironic_params, flavor]}
+ image: {get_param: [ironic_params, image]}
+ networks:
+ - port: {get_resource: ironic0-port}
+
+outputs:
+ controller_floating_ip:
+ description: Controller Floating IP
+ value: {get_attr: [controller-floating-ip, floating_ip_address]}
+
+ ironic_boot_interface:
+ description: Ironic boot interface configured for the redfish nodes
+ value: ipxe
+
+ sushy_emulator_uuids:
+ description: UUIDs of instances to manage with sushy-tools - RedFish virtual BMC
+ value:
+ ironic0: {get_resource: ironic0}
+
+ network_mtu:
+ description: MTU values for all networks
+ value:
+ machine: {get_attr: [machine-net, mtu]}
+ ctlplane: {get_attr: [ctlplane-net, mtu]}
+ internal_api: {get_attr: [internal-api-net, mtu]}
+ storage: {get_attr: [storage-net, mtu]}
+ tenant: {get_attr: [tenant-net, mtu]}
+ ironic: {get_attr: [ironic-net, mtu]}
+
+ ironic_nodes:
+ description: Ironic nodes YAML, used with openstack baremetal create to enroll nodes in Openstack Ironic
+ value:
+ nodes:
+ - name: ironic0
+ driver: redfish
+ bios_interface: no-bios
+ boot_interface: ipxe
+ driver_info:
+ redfish_address: http://sushy-emulator.apps.sno.openstack.lab
+ redfish_system_id:
+ str_replace:
+ template: "/redfish/v1/Systems/$SYS_ID"
+ params:
+ $SYS_ID: {get_resource: ironic0}
+ redfish_username: admin
+ redfish_password: password
+ properties:
+ capabilities: 'boot_mode:uefi'
+ ports:
+ - address: {get_attr: [ironic0-port, mac_address]}
+ physical_network: ironic
+
+ ocp_install_config:
+ description: OCP install-config.yaml
+ value:
+ apiVersion: v1
+ baseDomain: openstack.lab
+ controlPlane:
+ architecture: amd64
+ hyperthreading: Disabled
+ name: master
+ replicas: 1
+ compute:
+ - architecture: amd64
+ hyperthreading: Disabled
+ name: worker
+ replicas: 0
+ metadata:
+ name: sno
+ networking:
+ clusterNetwork:
+ - cidr: 10.128.0.0/16
+ hostPrefix: 23
+ machineNetwork:
+ - cidr: {get_attr: [machine-subnet, cidr]}
+ serviceNetwork:
+ - 172.30.0.0/16
+ networkType: OVNKubernetes
+ platform:
+ none: {}
+ pullSecret: _replaced_
+ sshKey: {get_param: dataplane_ssh_pub_key}
+
+ ocp_agent_config:
+ description: OCP agent-config.yaml
+ value:
+ apiVersion: v1beta1
+ kind: AgentConfig
+ metadata:
+ name: sno
+ rendezvousIP: {get_attr: [master0-machine-port, fixed_ips, 0, ip_address]}
+ additionalNTPSources: {get_param: ntp_servers}
+ bootArtifactsBaseURL:
+ str_replace:
+ template: http://$server_address:8081/boot-artifacts
+ params:
+ $server_address: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]}
+ hosts:
+ - hostname: master-0
+ role: master
+ interfaces:
+ - name: eth0
+ macAddress: {get_attr: [master0-machine-port, mac_address]}
+ - name: eth1
+ macAddress: {get_attr: [master0-ctlplane-trunk-parent-port, mac_address]}
+ - name: eth2
+ macAddress: {get_attr: [master0-ironic-port, mac_address]}
+ networkConfig:
+ interfaces:
+ - name: eth0
+ type: ethernet
+ state: up
+ mac-address: {get_attr: [master0-machine-port, mac_address]}
+ ipv4:
+ enabled: true
+ dhcp: true
+ ipv6:
+ enabled: false
+ - name: eth1
+ type: ethernet
+ state: down
+ mac-address: {get_attr: [master0-ctlplane-trunk-parent-port, mac_address]}
+ - name: eth2
+ type: ethernet
+ state: down
+ mac-address: {get_attr: [master0-ironic-port, mac_address]}
+ controller_ansible_host:
+ description: >
+ Controller ansible host, this struct can be passed to the ansible.builtin.add_host module
+ value:
+ name: controller-0
+ ansible_ssh_user: zuul
+ ansible_host: {get_attr: [controller-floating-ip, floating_ip_address]}
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ groups: controllers
+
+ ansible_inventory:
+ description: Ansible inventory
+ value:
+ all:
+ children:
+ controllers:
+ vars:
+ ocps:
+ vars:
+ localhosts:
+ hosts:
+ localhost:
+ ansible_connection: local
+ controllers:
+ hosts:
+ controller0:
+ ansible_host: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]}
+ ansible_user: zuul
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ ansible_ssh_private_key_file: '~/.ssh/id_rsa'
+ ocps:
+ hosts:
+ master0:
+ ansible_host: {get_attr: [master0-machine-port, fixed_ips, 0, ip_address]}
+ ansible_user: core
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ ansible_ssh_private_key_file: '~/.ssh/id_rsa'
diff --git a/ci/scenarios/sno-1-bm/manifests/control-plane/control-plane.yaml.j2 b/ci/scenarios/sno-1-bm/manifests/control-plane/control-plane.yaml.j2
new file mode 100644
index 00000000..6415eb35
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/manifests/control-plane/control-plane.yaml.j2
@@ -0,0 +1,484 @@
+{% raw %}
+---
+apiVersion: v1
+data:
+ server-ca-passphrase: MTIzNDU2Nzg=
+kind: Secret
+metadata:
+ name: octavia-ca-passphrase
+ namespace: openstack
+type: Opaque
+---
+apiVersion: v1
+data:
+ AdminPassword: MTIzNDU2Nzg=
+ AodhDatabasePassword: MTIzNDU2Nzg=
+ AodhPassword: MTIzNDU2Nzg=
+ BarbicanDatabasePassword: MTIzNDU2Nzg=
+ BarbicanPassword: MTIzNDU2Nzg=
+ BarbicanSimpleCryptoKEK: r0wDZ1zrD5upafX9RDfYqvDkW2LENBWH7Gz9+Tr3NdM=
+ CeilometerPassword: MTIzNDU2Nzg=
+ CinderDatabasePassword: MTIzNDU2Nzg=
+ CinderPassword: MTIzNDU2Nzg=
+ DatabasePassword: MTIzNDU2Nzg=
+ DbRootPassword: MTIzNDU2Nzg=
+ DesignateDatabasePassword: MTIzNDU2Nzg=
+ DesignatePassword: MTIzNDU2Nzg=
+ GlanceDatabasePassword: MTIzNDU2Nzg=
+ GlancePassword: MTIzNDU2Nzg=
+ HeatAuthEncryptionKey: NzY3YzNlZDA1NmNiYWEzYjlkZmVkYjhjNmY4MjViZjA=
+ HeatDatabasePassword: MTIzNDU2Nzg=
+ HeatPassword: MTIzNDU2Nzg=
+ IronicDatabasePassword: MTIzNDU2Nzg=
+ IronicInspectorDatabasePassword: MTIzNDU2Nzg=
+ IronicInspectorPassword: MTIzNDU2Nzg=
+ IronicPassword: MTIzNDU2Nzg=
+ KeystoneDatabasePassword: MTIzNDU2Nzg=
+ ManilaDatabasePassword: MTIzNDU2Nzg=
+ ManilaPassword: MTIzNDU2Nzg=
+ MetadataSecret: MTIzNDU2Nzg0Mg==
+ NeutronDatabasePassword: MTIzNDU2Nzg=
+ NeutronPassword: MTIzNDU2Nzg=
+ NovaAPIDatabasePassword: MTIzNDU2Nzg=
+ NovaCell0DatabasePassword: MTIzNDU2Nzg=
+ NovaCell1DatabasePassword: MTIzNDU2Nzg=
+ NovaPassword: MTIzNDU2Nzg=
+ OctaviaDatabasePassword: MTIzNDU2Nzg=
+ OctaviaHeartbeatKey: MTIzNDU2Nzg=
+ OctaviaPassword: MTIzNDU2Nzg=
+ PlacementDatabasePassword: MTIzNDU2Nzg=
+ PlacementPassword: MTIzNDU2Nzg=
+ SwiftPassword: MTIzNDU2Nzg=
+kind: Secret
+metadata:
+ name: osp-secret
+ namespace: openstack
+type: Opaque
+---
+apiVersion: core.openstack.org/v1beta1
+kind: OpenStackControlPlane
+metadata:
+ name: controlplane
+ namespace: openstack
+spec:
+ barbican:
+ enabled: false
+ ceilometer:
+ enabled: false
+ cinder:
+ apiOverride:
+ route:
+ haproxy.router.openshift.io/timeout: 60s
+ template:
+ cinderAPI:
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ cinderBackup:
+ customServiceConfig: |
+ [DEFAULT]
+ backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
+ networkAttachments:
+ - storage
+ replicas: 1
+ cinderScheduler:
+ replicas: 1
+ cinderVolumes:
+ lvm-iscsi:
+ customServiceConfig: |
+ [lvm]
+ image_volume_cache_enabled = false
+ volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+ volume_group = cinder-volumes
+ target_protocol = iscsi
+ target_helper = lioadm
+ volume_backend_name = lvm_iscsi
+ target_ip_address=172.18.0.10
+ target_secondary_ip_addresses = 172.19.0.10
+ nodeSelector:
+ openstack.org/cinder-lvm: ""
+ replicas: 1
+ customServiceConfig: |
+ # Debug logs by default, jobs can override as needed.
+ [DEFAULT]
+ debug = true
+ databaseInstance: openstack
+ preserveJobs: false
+ secret: osp-secret
+ uniquePodNames: true
+ designate:
+ enabled: false
+ dns:
+ template:
+ options:
+ - key: server
+ values:
+ - 192.168.32.254
+ override:
+ service:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: ctlplane
+ metallb.universe.tf/allow-shared-ip: ctlplane
+ metallb.universe.tf/loadBalancerIPs: 192.168.122.80
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ galera:
+ enabled: true
+ templates:
+ openstack:
+ replicas: 1
+ secret: osp-secret
+ storageRequest: 5G
+ openstack-cell1:
+ replicas: 1
+ secret: osp-secret
+ storageRequest: 5G
+ glance:
+ apiOverrides:
+ default:
+ route:
+ haproxy.router.openshift.io/timeout: 60s
+ template:
+ customServiceConfig: |
+ [DEFAULT]
+ debug = True
+ enabled_backends = default_backend:swift
+
+ [glance_store]
+ default_backend = default_backend
+
+ [default_backend]
+ swift_store_create_container_on_put = True
+ swift_store_auth_version = 3
+ swift_store_auth_address = {{ .KeystoneInternalURL }}
+ swift_store_endpoint_type = internalURL
+ swift_store_user = service:glance
+ swift_store_key = {{ .ServicePassword }}
+ databaseInstance: openstack
+ glanceAPIs:
+ default:
+ networkAttachments:
+ - storage
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ preserveJobs: false
+ storage:
+ storageClass: lvms-local-storage
+ storageRequest: 10G
+ uniquePodNames: true
+ heat:
+ enabled: false
+ horizon:
+ enabled: false
+ ironic:
+ enabled: true
+ template:
+ databaseInstance: openstack
+ ironicAPI:
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: ironic
+ metallb.universe.tf/allow-shared-ip: ironic
+ metallb.universe.tf/loadBalancerIPs: 172.20.1.80
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ ironicConductors:
+ - customServiceConfig: |
+ [conductor]
+ power_state_change_timeout = 120
+
+ [pxe]
+ kernel_append_params = console=ttyS0 ipa-debug=1
+
+ [redfish]
+ kernel_append_params = console=ttyS0 ipa-debug=1
+
+ [neutron]
+ cleaning_network = provisioning
+ provisioning_network = provisioning
+ rescuing_network = provisioning
+ inspection_network = provisioning
+ networkAttachments:
+ - ironic
+ provisionNetwork: ironic
+ replicas: 1
+ storageRequest: 10G
+ ironicInspector:
+ customServiceConfig: |
+ [capabilities]
+ boot_mode = true
+
+ [processing]
+ update_pxe_enabled = false
+ inspectionNetwork: ironic
+ networkAttachments:
+ - ironic
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: ironic
+ metallb.universe.tf/allow-shared-ip: ironic
+ metallb.universe.tf/loadBalancerIPs: 172.20.1.80
+ spec:
+ type: LoadBalancer
+ preserveJobs: false
+ replicas: 1
+ ironicNeutronAgent:
+ replicas: 1
+ preserveJobs: false
+ rpcTransport: oslo
+ secret: osp-secret
+ keystone:
+ apiOverride:
+ route: {}
+ template:
+ databaseInstance: openstack
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ preserveJobs: false
+ replicas: 1
+ secret: osp-secret
+ manila:
+ enabled: false
+ memcached:
+ templates:
+ memcached:
+ replicas: 1
+ neutron:
+ apiOverride:
+ route: {}
+ template:
+ customServiceConfig: |
+ [DEFAULT]
+ vlan_transparent = true
+ agent_down_time = 600
+ router_distributed = true
+ router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+ allow_automatic_l3agent_failover = true
+ debug = true
+
+ [agent]
+ report_interval = 300
+
+ [database]
+ max_retries = -1
+ db_max_retries = -1
+
+ [keystone_authtoken]
+ region_name = regionOne
+ memcache_use_advanced_pool = True
+
+ [oslo_messaging_notifications]
+ driver = noop
+
+ [oslo_middleware]
+ enable_proxy_headers_parsing = true
+
+ [oslo_policy]
+ policy_file = /etc/neutron/policy.yaml
+
+ [ovs]
+ igmp_snooping_enable = true
+
+ [ovn]
+ ovsdb_probe_interval = 60000
+ ovn_emit_need_to_frag = true
+
+ [ml2]
+{% endraw %}
+ global_physnet_mtu = {{ stack_outputs.network_mtu.ctlplane }}
+ path_mtu = {{ stack_outputs.network_mtu.tenant }}
+{% raw %}
+ type_drivers = geneve,vxlan,vlan,flat,local
+ tenant_network_types = geneve,flat
+ ml2MechanismDrivers:
+ - ovn
+ - baremetal
+ databaseInstance: openstack
+ networkAttachments:
+ - internalapi
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ preserveJobs: false
+ replicas: 1
+ secret: osp-secret
+ nova:
+ apiOverride:
+ route: {}
+ template:
+ apiServiceTemplate:
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ cellTemplates:
+ cell0:
+ cellDatabaseAccount: nova-cell0
+ cellDatabaseInstance: openstack
+ cellMessageBusInstance: rabbitmq
+ hasAPIAccess: true
+ cell1:
+ cellDatabaseAccount: nova-cell1
+ cellDatabaseInstance: openstack-cell1
+ cellMessageBusInstance: rabbitmq-cell1
+ hasAPIAccess: true
+ novaComputeTemplates:
+ compute-ironic:
+ computeDriver: ironic.IronicDriver
+ metadataServiceTemplate:
+ override:
+ service:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ preserveJobs: false
+ schedulerServiceTemplate:
+ replicas: 1
+ secret: osp-secret
+ octavia:
+ enabled: false
+ ovn:
+ template:
+ ovnController:
+ networkAttachment: tenant
+ nicMappings:
+ datacentre: ocpbr
+ ironic: ironic
+ ovnDBCluster:
+ ovndbcluster-nb:
+ dbType: NB
+ networkAttachment: internalapi
+ replicas: 1
+ storageRequest: 10G
+ ovndbcluster-sb:
+ dbType: SB
+ networkAttachment: internalapi
+ replicas: 1
+ storageRequest: 10G
+ ovnNorthd:
+ logLevel: info
+ nThreads: 1
+ replicas: 1
+ resources: {}
+ tls: {}
+ placement:
+ apiOverride:
+ route: {}
+ template:
+ databaseInstance: openstack
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ preserveJobs: false
+ replicas: 1
+ secret: osp-secret
+ rabbitmq:
+ templates:
+ rabbitmq:
+ override:
+ service:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.85
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ rabbitmq-cell1:
+ override:
+ service:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.86
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ secret: osp-secret
+ storageClass: lvms-local-storage
+ notificationsBus:
+ cluster: rabbitmq
+ swift:
+ enabled: true
+ proxyOverride:
+ route: {}
+ template:
+ swiftProxy:
+ override:
+ service:
+ internal:
+ metadata:
+ annotations:
+ metallb.universe.tf/address-pool: internalapi
+ metallb.universe.tf/allow-shared-ip: internalapi
+ metallb.universe.tf/loadBalancerIPs: 172.17.0.80
+ spec:
+ type: LoadBalancer
+ replicas: 1
+ swiftRing:
+ ringReplicas: 1
+ swiftStorage:
+ replicas: 1
+ telemetry:
+ enabled: false
+{% endraw %}
diff --git a/ci/scenarios/sno-1-bm/manifests/control-plane/dnsmasq-dns-ironic.yaml b/ci/scenarios/sno-1-bm/manifests/control-plane/dnsmasq-dns-ironic.yaml
new file mode 100644
index 00000000..fc4ae709
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/manifests/control-plane/dnsmasq-dns-ironic.yaml
@@ -0,0 +1,27 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ core.openstack.org/ingress_create: "false"
+ metallb.io/ip-allocated-from-pool: ironic
+ metallb.universe.tf/address-pool: ironic
+ metallb.universe.tf/allow-shared-ip: ironic
+ metallb.universe.tf/loadBalancerIPs: 172.20.1.80
+ name: dnsmasq-dns-ironic
+ namespace: openstack
+ labels:
+ service: dnsmasq
+spec:
+ ports:
+ - name: dnsmasq
+ port: 53
+ protocol: UDP
+ targetPort: 5353
+ - name: dnsmasq-tcp
+ port: 53
+ protocol: TCP
+ targetPort: 5353
+ selector:
+ service: dnsmasq
+ type: LoadBalancer
diff --git a/ci/scenarios/sno-1-bm/manifests/control-plane/networking/metallb.yaml b/ci/scenarios/sno-1-bm/manifests/control-plane/networking/metallb.yaml
new file mode 100644
index 00000000..19c3c639
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/manifests/control-plane/networking/metallb.yaml
@@ -0,0 +1,110 @@
+---
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ labels:
+ osp/lb-addresses-type: standard
+ name: ctlplane
+ namespace: metallb-system
+spec:
+ addresses:
+ - 192.168.122.80-192.168.122.90
+---
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ labels:
+ osp/lb-addresses-type: standard
+ name: internalapi
+ namespace: metallb-system
+spec:
+ addresses:
+ - 172.17.0.80-172.17.0.90
+---
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ labels:
+ osp/lb-addresses-type: standard
+ name: storage
+ namespace: metallb-system
+spec:
+ addresses:
+ - 172.18.0.80-172.18.0.90
+---
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ labels:
+ osp/lb-addresses-type: standard
+ name: tenant
+ namespace: metallb-system
+spec:
+ addresses:
+ - 172.19.0.80-172.19.0.90
+---
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ labels:
+ osp/lb-addresses-type: standard
+ name: ironic
+ namespace: metallb-system
+spec:
+ addresses:
+ - 172.20.1.80-172.20.1.90
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: ctlplane
+ namespace: metallb-system
+spec:
+ interfaces:
+ - ospbr
+ ipAddressPools:
+ - ctlplane
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: internalapi
+ namespace: metallb-system
+spec:
+ interfaces:
+ - internalapi
+ ipAddressPools:
+ - internalapi
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: storage
+ namespace: metallb-system
+spec:
+ interfaces:
+ - storage
+ ipAddressPools:
+ - storage
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: tenant
+ namespace: metallb-system
+spec:
+ interfaces:
+ - tenant
+ ipAddressPools:
+ - tenant
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: ironic
+ namespace: metallb-system
+spec:
+ interfaces:
+ - ironic
+ ipAddressPools:
+ - ironic
diff --git a/ci/scenarios/sno-1-bm/manifests/control-plane/networking/nad.yaml b/ci/scenarios/sno-1-bm/manifests/control-plane/networking/nad.yaml
new file mode 100644
index 00000000..88b94f0b
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/manifests/control-plane/networking/nad.yaml
@@ -0,0 +1,133 @@
+---
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ labels:
+ osp/net: ctlplane
+ osp/net-attach-def-type: standard
+ name: ctlplane
+ namespace: openstack
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "ctlplane",
+ "type": "macvlan",
+ "master": "ospbr",
+ "ipam": {
+ "type": "whereabouts",
+ "range": "192.168.122.0/24",
+ "range_start": "192.168.122.30",
+ "range_end": "192.168.122.70"
+ }
+ }
+---
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ labels:
+ osp/net: ironic
+ osp/net-attach-def-type: standard
+ name: ironic
+ namespace: openstack
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "ironic",
+ "type": "bridge",
+ "bridge": "ironic",
+ "ipam": {
+ "type": "whereabouts",
+ "range": "172.20.1.0/24",
+ "range_start": "172.20.1.30",
+ "range_end": "172.20.1.70"
+ }
+ }
+---
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ labels:
+ osp/net: datacentre
+ osp/net-attach-def-type: standard
+ name: datacentre
+ namespace: openstack
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "datacentre",
+ "type": "bridge",
+ "bridge": "ospbr",
+ "ipam": {}
+ }
+---
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ labels:
+ osp/net: internalapi
+ osp/net-attach-def-type: standard
+ name: internalapi
+ namespace: openstack
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "internalapi",
+ "type": "macvlan",
+ "master": "internalapi",
+ "ipam": {
+ "type": "whereabouts",
+ "range": "172.17.0.0/24",
+ "range_start": "172.17.0.30",
+ "range_end": "172.17.0.70"
+ }
+ }
+---
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ labels:
+ osp/net: storage
+ osp/net-attach-def-type: standard
+ name: storage
+ namespace: openstack
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "storage",
+ "type": "macvlan",
+ "master": "storage",
+ "ipam": {
+ "type": "whereabouts",
+ "range": "172.18.0.0/24",
+ "range_start": "172.18.0.30",
+ "range_end": "172.18.0.70"
+ }
+ }
+---
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ labels:
+ osp/net: tenant
+ osp/net-attach-def-type: standard
+ name: tenant
+ namespace: openstack
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "tenant",
+ "type": "macvlan",
+ "master": "tenant",
+ "ipam": {
+ "type": "whereabouts",
+ "range": "172.19.0.0/24",
+ "range_start": "172.19.0.30",
+ "range_end": "172.19.0.70"
+ }
+ }
diff --git a/ci/scenarios/sno-1-bm/manifests/control-plane/networking/nncp.yaml.j2 b/ci/scenarios/sno-1-bm/manifests/control-plane/networking/nncp.yaml.j2
new file mode 100644
index 00000000..065c80cb
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/manifests/control-plane/networking/nncp.yaml.j2
@@ -0,0 +1,108 @@
+---
+apiVersion: nmstate.io/v1
+kind: NodeNetworkConfigurationPolicy
+metadata:
+ labels:
+ osp/nncm-config-type: standard
+ name: master-0
+ namespace: openstack
+spec:
+ desiredState:
+ interfaces:
+ - name: internalapi
+ type: vlan
+ description: internalapi vlan interface
+ ipv4:
+ address:
+ - ip: 172.17.0.10
+ prefix-length: "24"
+ dhcp: false
+ enabled: true
+ ipv6:
+ enabled: false
+ mtu: {{ stack_outputs.network_mtu.internal_api }}
+ state: up
+ vlan:
+ base-iface: eth1
+ id: "20"
+ - name: storage
+ type: vlan
+ description: storage vlan interface
+ ipv4:
+ address:
+ - ip: 172.18.0.10
+ prefix-length: "24"
+ dhcp: false
+ enabled: true
+ ipv6:
+ enabled: false
+ mtu: {{ stack_outputs.network_mtu.storage }}
+ state: up
+ vlan:
+ base-iface: eth1
+ id: "21"
+ - name: tenant
+ type: vlan
+ description: tenant vlan interface
+ ipv4:
+ address:
+ - ip: 172.19.0.10
+ prefix-length: "24"
+ dhcp: false
+ enabled: true
+ ipv6:
+ enabled: false
+ mtu: {{ stack_outputs.network_mtu.tenant }}
+ state: up
+ vlan:
+ base-iface: eth1
+ id: "22"
+ - description: ctlplane interface
+ mtu: {{ stack_outputs.network_mtu.ctlplane }}
+ name: eth1
+ state: up
+ type: ethernet
+ - name: ospbr
+ type: linux-bridge
+ description: linux-bridge over ctlplane interface
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: eth1
+ vlan: {}
+ ipv4:
+ address:
+ - ip: 192.168.122.10
+ prefix-length: "24"
+ dhcp: false
+ enabled: true
+ ipv6:
+ enabled: false
+ mtu: {{ stack_outputs.network_mtu.ctlplane }}
+ state: up
+ - name: ironic
+ type: linux-bridge
+ description: Ironic bridge
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: eth2
+ ipv4:
+ address:
+ - ip: 172.20.1.10
+ prefix-length: "24"
+ enabled: true
+ ipv6:
+ enabled: false
+ mtu: {{ stack_outputs.network_mtu.ironic }}
+ route-rules:
+ config: []
+ routes:
+ config: []
+ nodeSelector:
+ kubernetes.io/hostname: master-0
+ node-role.kubernetes.io/worker: ""
diff --git a/ci/scenarios/sno-1-bm/test-operator/README.md b/ci/scenarios/sno-1-bm/test-operator/README.md
new file mode 100644
index 00000000..dc44ea00
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/test-operator/README.md
@@ -0,0 +1,42 @@
+
+# AI generated README
+
+## Tempest Tests Configuration for Test Operator
+
+The YAML file, `tempest-tests.yml`, is a configuration for running Tempest
+tests, which is a validation framework for OpenStack. Here's a breakdown of the
+configuration:
+
+1. **apiVersion, kind, and metadata**: These fields define the API version,
+ kind (type) of resource, and metadata (name and namespace) for the Tempest
+ test job.
+
+2. **spec**: This section contains the configuration for the Tempest test job.
+
+ - **networkAttachments**: This field specifies the network attachment for
+ the test job. In this case, it's set to `ctlplane`.
+ - **storageClass**: This field sets the storage class for the test job to
+ `lvms-local-storage`.
+ - **privileged**: This field is set to `true`, which means the test
+ containers will have elevated privileges.
+ - **workflow**: This section defines the steps to be executed in the test
+ job. There are two steps in this configuration:
+ - **ironic-scenario-testing**: This step runs scenario tests for Ironic,
+ the OpenStack bare-metal provisioning service. The `tempestconfRun`
+ section configures Tempest settings for this step, such as disabling
+ isolated networks, setting the number of available nodes, and specifying
+ the compute flavor and hypervisor type. The `tempestRun` section
+ specifies the concurrency level and the list of tests to include and
+ exclude.
+ - **ironic-api-testing**: This step runs API tests for Ironic. Similar to
+ the previous step, the `tempestconfRun` section configures Tempest
+ settings, and the `tempestRun` section specifies the concurrency level
+ and the list of tests to include and exclude.
+
+In summary, this YAML file configures a Tempest test job to run two types of
+tests for Ironic: scenario tests and API tests. The tests are executed with
+specific configurations and concurrency levels.
diff --git a/ci/scenarios/sno-1-bm/test-operator/automation-vars.yml b/ci/scenarios/sno-1-bm/test-operator/automation-vars.yml
new file mode 100644
index 00000000..979dbfd7
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/test-operator/automation-vars.yml
@@ -0,0 +1,366 @@
+---
+stages:
+ - name: Apply ironic network-attachement-definition
+ documentation: >-
+ Creates a Multus NetworkAttachmentDefinition that allows pods to attach to the ironic
+ provisioning network. This is required for sushy-emulator to communicate with baremetal
+ nodes during provisioning operations via the dedicated provisioning network.
+ manifest: manifests/nad.yaml
+ wait_conditions:
+ - >-
+ oc wait -n sushy-emulator network-attachment-definitions.k8s.cni.cncf.io ironic
+ --for jsonpath='{.metadata.annotations}' --timeout=30s
+
+ - name: Patch RedFish Sushy Emulator Deployment - add network attachment
+ documentation: >-
+ Modifies the sushy-emulator deployment to attach to the ironic provisioning network
+ via CNI network annotation. This enables the BMC simulator to receive Redfish API
+ calls on the correct network segment where baremetal nodes expect their BMC interfaces.
+ shell: |
+ set -xe -o pipefail
+
+ TMP_DIR="$(mktemp -d)"
+ trap 'rm -rf -- "$TMP_DIR"' EXIT
+
+ oc project sushy-emulator
+
+ cat << EOF > ${TMP_DIR}/sushy-emulator-network-annotations-patch.yaml
+ spec:
+ template:
+ metadata:
+ annotations:
+ k8s.v1.cni.cncf.io/networks: '[{"name":"ironic","namespace":"sushy-emulator","interface":"ironic"}]'
+ EOF
+
+ oc patch deployments.apps sushy-emulator --patch-file ${TMP_DIR}/sushy-emulator-network-annotations-patch.yaml
+ wait_conditions:
+ - "oc -n sushy-emulator wait deployments.apps sushy-emulator --for condition=Available --timeout=300s"
+
+ - name: Create public network if needed
+ documentation: >-
+ Establishes the external network that provides floating IP connectivity to instances.
+ This network is mapped to the physical 'datacentre' network and serves as the gateway
+ for external traffic routing. Essential for instances to reach external services and
+ for external clients to access instance services via floating IPs.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack network show public &>/dev/null || \
+ oc rsh openstackclient openstack network create public \
+ --external \
+ --no-share \
+ --default \
+ --provider-network-type flat \
+ --provider-physical-network datacentre
+
+ - name: Create subnet on public network if needed
+ documentation: >-
+ Defines the IP address pool and routing configuration for the public network.
+ The 192.168.122.0/24 range with allocation pool 171-250 provides floating IPs
+ while preserving lower addresses for infrastructure. Gateway 192.168.122.1
+ routes traffic to external networks.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack subnet show public_subnet &>/dev/null || \
+ oc rsh openstackclient openstack subnet create public_subnet \
+ --network public \
+ --subnet-range 192.168.122.0/24 \
+ --allocation-pool start=192.168.122.171,end=192.168.122.250 \
+ --gateway 192.168.122.1 \
+ --dhcp
+
+ - name: Create private network if needed
+ documentation: >-
+ Creates the default tenant network for instance-to-instance communication.
+ This shared network allows multiple tenants to deploy instances that can
+ communicate privately while being isolated from external networks until
+ floating IPs are assigned.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack network show private &>/dev/null || \
+ oc rsh openstackclient openstack network create private --share
+
+ - name: Create subnet on private network if needed
+ documentation: >-
+ Configures the private tenant network with RFC1918 addressing (10.2.0.0/24).
+ This subnet provides DHCP-assigned IP addresses for instances deployed on
+ the private network, enabling inter-instance communication before floating
+ IP assignment for external access.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack subnet show private_subnet &>/dev/null || \
+ oc rsh openstackclient openstack subnet create private_subnet \
+ --network private \
+ --subnet-range 10.2.0.0/24 \
+ --allocation-pool start=10.2.0.10,end=10.2.0.250 \
+ --gateway 10.2.0.1 \
+ --dhcp
+
+ - name: Create network for ironic provisioning if needed
+ documentation: >-
+ Establishes the dedicated baremetal provisioning network mapped to the physical
+ 'ironic' network interface. This isolated network carries PXE boot traffic, DHCP
+ for baremetal nodes, and communication between Ironic services and nodes during
+ deployment operations, keeping provisioning traffic separate from tenant networks.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack network show provisioning &>/dev/null || \
+ oc rsh openstackclient \
+ openstack network create provisioning \
+ --share \
+ --provider-physical-network ironic \
+ --provider-network-type flat
+
+ - name: Create subnet for ironic provisioning if needed
+ documentation: >-
+ Configures IP addressing for the baremetal provisioning network (172.20.1.0/24).
+ DNS server 172.20.1.80 provides name resolution during node deployment.
+ The allocation pool 100-200 reserves addresses for DHCP assignment to baremetal
+ nodes during their provisioning lifecycle.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack subnet show provisioning-subnet &>/dev/null || \
+ oc rsh openstackclient \
+ openstack subnet create provisioning-subnet \
+ --network provisioning \
+ --subnet-range 172.20.1.0/24 \
+ --gateway 172.20.1.1 \
+ --dns-nameserver 172.20.1.80 \
+ --allocation-pool start=172.20.1.100,end=172.20.1.200
+
+ - name: Create baremetal flavor if needed
+ documentation: >-
+ Defines Nova flavor for baremetal instances with custom resource requirements.
+ Uses CUSTOM_BAREMETAL=1 to match against baremetal node resource classes,
+ while setting standard resources (VCPU, MEMORY_MB, DISK_GB) to 0 since
+ physical resources are managed by Ironic rather than Nova's scheduler.
+ UEFI boot mode ensures compatibility with modern baremetal hardware.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack flavor show baremetal &>/dev/null || \
+ oc rsh openstackclient \
+ openstack flavor create baremetal \
+ --id 123456789-1234-1234-1234-000000000001 \
+ --ram 1024 \
+ --vcpus 1 \
+ --disk 15 \
+ --property resources:VCPU=0 \
+ --property resources:MEMORY_MB=0 \
+ --property resources:DISK_GB=0 \
+ --property resources:CUSTOM_BAREMETAL=1 \
+ --property capabilities:boot_mode=uefi
+
+ - name: Copy ironic_nodes.yaml to the openstackclient pod
+ documentation: >-
+ Transfers the baremetal node definition file containing BMC credentials,
+ hardware specifications, and network configurations from the local filesystem
+ to the OpenStack client pod. This file defines the physical infrastructure
+ that Ironic will manage for baremetal provisioning.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+ oc cp ~/data/ironic_nodes.yaml openstackclient:ironic_nodes.yaml
+
+ - name: Enroll nodes in ironic
+ documentation: >-
+ Registers physical baremetal nodes with the Ironic service using the node
+ definitions from ironic_nodes.yaml. This creates Ironic node records with
+ BMC access credentials, hardware profiles, and port configurations, marking
+ the beginning of the node lifecycle management in OpenStack.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+ oc rsh openstackclient openstack baremetal create ironic_nodes.yaml
+
+ - name: Wait for ironic nodes to get to state - enroll
+ documentation: >-
+ Monitors node state transition to 'enroll' status, indicating that Ironic
+ has successfully registered the nodes and validated basic BMC connectivity.
+ This is the first state in the baremetal provisioning lifecycle, confirming
+ that nodes are recognized by the system before management operations begin.
+ shell: |
+ oc project openstack
+
+ counter=0
+ max_retries=100
+ node_state=enroll
+ until ! oc rsh openstackclient openstack baremetal node list -f value -c "Provisioning State" | grep -P "^(?!${node_state}).*$"; do
+ ((counter++))
+ if (( counter > max_retries )); then
+ echo "ERROR: Timeout. Nodes did not reach state: enroll"
+ exit 1
+ fi
+ echo "Waiting for nodes to reach state enroll"
+ sleep 10
+ done
+
+ - name: Manage ironic nodes
+ documentation: >-
+ Initiates the transition from 'enroll' to 'manageable' state by instructing
+ Ironic to perform hardware introspection and validation. During this process,
+ Ironic will power on nodes, inspect hardware capabilities, and prepare them
+ for provisioning operations while validating BMC access and power management.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack baremetal node manage ironic0
+
+ - name: Wait for ironic nodes to get to state - manageable
+ documentation: >-
+ Waits for nodes to complete hardware introspection and reach 'manageable' state.
+ In this state, Ironic has successfully inventoried hardware resources, validated
+ BMC functionality, and confirmed the nodes are ready for cleaning and provisioning
+ operations. This is a prerequisite for making nodes available to tenants.
+ shell: |
+ oc project openstack
+
+ counter=0
+ max_retries=100
+ node_state=manageable
+ until ! oc rsh openstackclient openstack baremetal node list -f value -c "Provisioning State" | grep -P "^(?!${node_state}).*$"; do
+ ((counter++))
+ if (( counter > max_retries )); then
+ echo "ERROR: Timeout. Nodes did not reach state: manageable"
+ exit 1
+ fi
+ echo "Waiting for nodes to reach state manageable"
+ sleep 10
+ done
+
+ - name: Power off the ironic nodes
+ documentation: >-
+ Ensures all baremetal nodes are powered down before configuration changes.
+ This prevents potential issues during capability updates and ensures a clean
+ state before transitioning nodes to 'available'. Power management validation
+ also confirms BMC functionality is working correctly.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack baremetal node power off ironic0
+
+ - name: Ensure ironic nodes are powered off
+ documentation: >-
+ Verifies that power management commands have taken effect and all nodes
+ report 'power off' status. This confirmation is critical before transitioning
+ to 'available' state, as Nova expects baremetal nodes to be powered off
+ when not actively hosting instances.
+ shell: |
+ oc project openstack
+
+ counter=0
+ max_retries=100
+ power_state="off"
+ until ! oc rsh openstackclient openstack baremetal node list -f value -c "Power State" | grep -P "^power.(?!${power_state}).*$"; do
+ ((counter++))
+ if (( counter > max_retries )); then
+ echo "ERROR: Timeout. Nodes did not reach power state: power off"
+ exit 1
+ fi
+ echo "Waiting for nodes to reach power state off"
+ sleep 10
+ done
+
+ - name: Provide ironic nodes
+ documentation: >-
+ Transitions nodes from 'manageable' to 'available' state, making them eligible
+ for tenant provisioning. This triggers automated cleaning processes to ensure
+ nodes are in a pristine state, removing any residual data from previous
+ deployments and preparing them for new instance launches.
+ shell: |
+ set -xe -o pipefail
+ oc project openstack
+
+ oc rsh openstackclient openstack baremetal node provide ironic0
+
+ - name: Wait for ironic nodes to get to state - available
+ documentation: >-
+ Confirms nodes have completed the cleaning process and reached 'available' state.
+ Available nodes appear in Nova's resource inventory and can be allocated to
+ instance requests that match the baremetal flavor. This state indicates the
+ baremetal infrastructure is fully operational and ready for workload deployment.
+ shell: |
+ oc project openstack
+
+ counter=0
+ max_retries=100
+ node_state=available
+ while true; do
+ node_states=$(oc rsh openstackclient openstack baremetal node list -f value -c "Provisioning State")
+
+ # Check if all nodes are in available state
+ if ! echo "$node_states" | grep -P "^(?!${node_state}).*$" > /dev/null; then
+ echo "All nodes have reached state: ${node_state}"
+ break
+ fi
+
+ # Check for failed states and exit immediately if found
+ if echo "$node_states" | grep -q "failed"; then
+ echo "ERROR: One or more nodes are in a failed state:"
+ oc rsh openstackclient openstack baremetal node list
+ exit 1
+ fi
+
+ ((counter++))
+ if (( counter > max_retries )); then
+ echo "ERROR: Timeout. Nodes did not reach state: available"
+ exit 1
+ fi
+
+ echo "Waiting for nodes to reach state: available"
+ sleep 10
+ done
+
+ - name: Wait for expected compute services (OSPRH-10942)
+ documentation: >-
+ Waits for Nova compute services to register and become available in the
+ Nova service registry. The hotstack-nova-discover-hosts utility ensures
+ that Ironic conductor services are properly registered as compute nodes,
+ enabling Nova scheduler to place baremetal instances. References bug
+ OSPRH-10942 related to compute service discovery timing issues.
+ wait_conditions:
+ - >-
+ timeout --foreground 5m hotstack-nova-discover-hosts
+ --namespace openstack --num-computes 1
+
+ - name: Run tempest
+ documentation: >-
+ Executes comprehensive OpenStack validation tests using the Tempest framework.
+ These tests validate API functionality, resource management, and integration
+ between OpenStack services in the baremetal environment. Includes both the
+ test execution phase (workflowStep=0) and log collection phase (workflowStep=1)
+ to ensure complete validation results are captured for analysis.
+ manifest: tempest-tests.yml
+ wait_conditions:
+ - >-
+ oc wait -n openstack tempests.test.openstack.org tempest-tests
+ --for condition=ServiceConfigReady --timeout=120s
+ wait_pod_completion:
+ - namespace: openstack
+ labels:
+ operator: test-operator
+ service: tempest
+ workflowStep: "0"
+ timeout: 900
+ poll_interval: 15
+ - namespace: openstack
+ labels:
+ operator: test-operator
+ service: tempest
+ workflowStep: "1"
+ timeout: 900
+ poll_interval: 15
diff --git a/ci/scenarios/sno-1-bm/test-operator/manifests/nad.yaml b/ci/scenarios/sno-1-bm/test-operator/manifests/nad.yaml
new file mode 100644
index 00000000..bd83450d
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/test-operator/manifests/nad.yaml
@@ -0,0 +1,20 @@
+---
+apiVersion: k8s.cni.cncf.io/v1
+kind: NetworkAttachmentDefinition
+metadata:
+ name: ironic
+ namespace: sushy-emulator
+spec:
+ config: |
+ {
+ "cniVersion": "0.3.1",
+ "name": "ironic",
+ "type": "bridge",
+ "bridge": "ironic",
+ "ipam": {
+ "type": "whereabouts",
+ "range": "172.20.1.0/24",
+ "range_start": "172.20.1.71",
+ "range_end": "172.20.1.75"
+ }
+ }
diff --git a/ci/scenarios/sno-1-bm/test-operator/tempest-tests.yml b/ci/scenarios/sno-1-bm/test-operator/tempest-tests.yml
new file mode 100644
index 00000000..acf184e0
--- /dev/null
+++ b/ci/scenarios/sno-1-bm/test-operator/tempest-tests.yml
@@ -0,0 +1,93 @@
+---
+apiVersion: test.openstack.org/v1beta1
+kind: Tempest
+metadata:
+ name: tempest-tests
+ namespace: openstack
+spec:
+ networkAttachments:
+ - ctlplane
+ privileged: true
+ workflow:
+ - stepName: ironic-scenario-testing
+ storageClass: lvms-local-storage
+ tempestconfRun:
+ create: true
+ overrides: |
+ auth.create_isolated_networks false
+ baremetal.available_nodes 2
+ baremetal.max_microversion 1.82
+ compute-feature-enabled.config_drive true
+ compute-feature-enabled.disk_config false
+ compute-feature-enabled.interface_attach false
+ compute.fixed_network_name provisioning
+ compute.flavor_ref 123456789-1234-1234-1234-000000000001
+ compute.hypervisor_type ironic
+ compute.build_timeout 900
+ network.shared_physical_network true
+ service_available.ironic_inspector false
+ service_available.ironic true
+ service_available.murano false
+ validation.connect_method fixed
+ validation.network_for_ssh provisioning
+ tempestRun:
+ concurrency: 4
+ includeList: |
+ ^ironic_tempest_plugin.tests.scenario.test_baremetal_basic_ops.*
+ excludeList: |
+ ^ironic_tempest_plugin.tests.scenario.test_baremetal_basic_ops.BaremetalBasicOps.test_baremetal_server_ops_partition_image
+
+ - stepName: ironic-api-testing
+ storageClass: lvms-local-storage
+ tempestconfRun:
+ create: true
+ overrides: |
+ baremetal.driver fake-hardware
+ baremetal.max_microversion 1.82
+ service_available.ironic_inspector true
+ service_available.ironic true
+ service_available.murano false
+ tempestRun:
+ concurrency: 8
+ includeList: |
+ ^ironic_tempest_plugin.tests.api.*
+ excludeList: |
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_candidate_node
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_node_mismatch
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_resource_class_mismatch
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_traits_mismatch
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_with_traits
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_show_allocation
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_delete_allocation
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_delete_allocation_by_name
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_list_allocations
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_list_allocations_by_state
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_show_by_name
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestBackfill.test_backfill_allocation
+ ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestBackfill.test_backfill_without_resource_class
+ ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_11.test_set_node_provision_state
+ ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_1.test_set_node_provision_state
+ ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_2.test_set_node_provision_state
+ ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_4.test_set_node_provision_state
+ ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_6.test_set_node_provision_state
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestHardwareInterfaces.test_reset_interfaces
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected_from_deletion
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected_negative
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected_set_unset
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesProtectedOldApi.test_node_protected_old_api
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_already_attached_on_internal_info
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_already_attached_with_portgroups
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_already_set
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_no_args
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_no_free_port
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_no_port
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_port_not_in_portgroup
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_with_empty_portgroup
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_detach_not_existing
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_on_port
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_on_portgroup
+ ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestResetInterfaces.test_reset_interfaces
+ ^ironic_tempest_plugin.tests.api.rbac_defaults.test_nodes.TestNodeProjectReader.test_reader_cannot_update_owner_provisioned
+ ^ironic_tempest_plugin.tests.api.rbac_defaults.test_nodes.TestNodeSystemReader.test_reader_cannot_update_owner_provisioned
+ ^ironic_tempest_plugin.tests.api.rbac_defaults.test_nodes.TestNodeSystemReader.*
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
new file mode 100644
index 00000000..d5097fb7
--- /dev/null
+++ b/zuul.d/jobs.yaml
@@ -0,0 +1,27 @@
+- job:
+ name: hotstack-sno-1-bm
+ parent: base-hotstack-vexxhost
+ description: |
+ Hotstack scenario: sno-1-bm
+ timeout: 5400
+ attempts: 1
+ required-projects:
+ - openstack-k8s-operators/hotstack
+ - openstack-k8s-operators/ci-framework
+ dependencies:
+ - openstack-k8s-operators-content-provider
+ vars:
+ scenario: sno-1-bm
+ scenario_dir: >-
+ {{
+ [ansible_user_dir, zuul.project.src_dir, 'ci', 'scenarios']
+ | ansible.builtin.path_join
+ }}
+ openstack_operators_image: >-
+ {{
+ cifmw_operator_build_output.operators['openstack-operator'].image_catalog
+ if (cifmw_operator_build_output is defined and
+ cifmw_operator_build_output.operators is defined and
+ 'openstack-operator' in cifmw_operator_build_output.operators)
+ else 'quay.io/openstack-k8s-operators/openstack-operator-index:latest'
+ }}
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index aacc1f13..f815a7d0 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -8,3 +8,4 @@
- openstack-k8s-operators-content-provider:
vars:
cifmw_install_yamls_sdk_version: v1.41.1
+ - hotstack-sno-1-bm