From 1676b950a35c1a1d9557296aa6031a341c1efbee Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 20 Nov 2025 14:59:05 +0100 Subject: [PATCH 1/5] [multiple] Prepare cifmw to run with Zironic The Zironic tool would pre-deploy OCP cluster before CI job will "pick" the baremetal host. Signed-off-by: Daniel Pawlik --- reproducer.yml | 124 +++++++++++++----------- roles/devscripts/tasks/131_packages.yml | 3 +- roles/reproducer/tasks/push_code.yml | 16 +++ 3 files changed, 87 insertions(+), 56 deletions(-) diff --git a/reproducer.yml b/reproducer.yml index 75a92922a1..5dee7c8e30 100644 --- a/reproducer.yml +++ b/reproducer.yml @@ -5,64 +5,69 @@ - name: Reproducer prepare play hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: true - pre_tasks: - - name: Inherit from parent scenarios if needed - ansible.builtin.include_tasks: - file: "ci/playbooks/tasks/inherit_parent_scenario.yml" - - - name: Include common architecture parameter file + tasks: + - name: Run tasks - reproducer prepare when: - - cifmw_architecture_scenario is defined - - cifmw_architecture_scenario | length > 0 - ansible.builtin.include_vars: - file: "scenarios/reproducers/va-common.yml" + - cifmw_deploy_reproducer_env | default(true) | bool + block: + - name: Inherit from parent scenarios if needed + ansible.builtin.include_tasks: + file: "ci/playbooks/tasks/inherit_parent_scenario.yml" - - name: Run reproducer validations - ansible.builtin.import_role: - name: reproducer - tasks_from: validations.yml + - name: Include common architecture parameter file + when: + - cifmw_architecture_scenario is defined + - cifmw_architecture_scenario | length > 0 + ansible.builtin.include_vars: + file: "scenarios/reproducers/va-common.yml" - - name: Gather OS facts - ansible.builtin.setup: - gather_subset: - - "!all" - - "!min" - - "distribution" + - name: Run reproducer validations + ansible.builtin.import_role: + name: reproducer + tasks_from: validations.yml - - name: Tweak dnf configuration - become: true - community.general.ini_file: - no_extra_spaces: true - option: "{{ config.option }}" - path: "/etc/dnf/dnf.conf" - section: "{{ config.section | default('main') }}" - state: "{{ config.state | default(omit) }}" - value: "{{ config.value | default(omit) }}" - mode: "0644" - loop: "{{ cifmw_reproducer_dnf_tweaks }}" - loop_control: - label: "{{ config.option }}" - loop_var: 'config' - - - name: Install custom CA if needed - ansible.builtin.import_role: - name: install_ca + - name: Gather OS facts + ansible.builtin.setup: + gather_subset: + - "!all" + - "!min" + - "distribution" - - name: Setup repositories via rhos-release if needed - tags: - - packages - when: - - ansible_facts['distribution'] == 'RedHat' - - cifmw_reproducer_hp_rhos_release | bool - vars: - cifmw_repo_setup_output: /etc/yum.repos.d - cifmw_repo_setup_rhos_release_args: "rhel" - ansible.builtin.import_role: - name: repo_setup - tasks_from: rhos_release.yml + - name: Tweak dnf configuration + become: true + community.general.ini_file: + no_extra_spaces: true + option: "{{ config.option }}" + path: "/etc/dnf/dnf.conf" + section: "{{ config.section | default('main') }}" + state: "{{ config.state | default(omit) }}" + value: "{{ config.value | default(omit) }}" + mode: "0644" + loop: "{{ cifmw_reproducer_dnf_tweaks }}" + loop_control: + label: "{{ config.option }}" + loop_var: 'config' - roles: - - role: ci_setup + - name: Install custom CA if needed + ansible.builtin.import_role: + name: install_ca + + - name: Setup repositories via rhos-release if needed + tags: + - packages + when: + - ansible_facts['distribution'] == 'RedHat' + - cifmw_reproducer_hp_rhos_release | bool + vars: + cifmw_repo_setup_output: /etc/yum.repos.d + cifmw_repo_setup_rhos_release_args: "rhel" + ansible.builtin.import_role: + name: repo_setup + tasks_from: rhos_release.yml + + - name: Run ci_setup + ansible.builtin.import_role: + name: ci_setup - name: Prepare switches vars: @@ -72,9 +77,18 @@ - name: Reproducer run hosts: "{{ cifmw_target_host | default('localhost') }}" gather_facts: false - roles: - - role: reproducer - post_tasks: + tasks: + - name: Run reproducer role + when: + - cifmw_deploy_reproducer_env | default(true) | bool + ansible.builtin.import_role: + name: reproducer + + - name: Finish playbook if prepare environment executed + when: + - cifmw_deploy_reproducer_env | default(false) | bool + ansible.builtin.meta: end_play + - name: Allow traffic from OSP VMs to OSP API (needed for shiftstack) become: true when: cifmw_allow_vms_to_reach_osp_api | default (false) | bool diff --git a/roles/devscripts/tasks/131_packages.yml b/roles/devscripts/tasks/131_packages.yml index cd41d6c6f2..381aaae30f 100644 --- a/roles/devscripts/tasks/131_packages.yml +++ b/roles/devscripts/tasks/131_packages.yml @@ -29,7 +29,8 @@ name: ci_setup tasks_from: epel.yml -- name: Ensure conflicting package does not exist. +- name: Ensure conflicting package does not exist + when: not cifmw_devscripts_zironic_deployment_only_ocp become: true ansible.builtin.package: name: "ansible-core" diff --git a/roles/reproducer/tasks/push_code.yml b/roles/reproducer/tasks/push_code.yml index c32ef15f6a..89ac7f3d7b 100644 --- a/roles/reproducer/tasks/push_code.yml +++ b/roles/reproducer/tasks/push_code.yml @@ -1,4 +1,20 @@ --- +# FIXME: Normally that part should not be needed, but... +# if all "required" projects in Zuul var would not be available, +# later we will have an error: +# +# TASK [repo_setup : Make sure git-core package is installed] ******************** +# fatal: [hypervisor -> controller-0(controller-0.hypervisor)]: FAILED! => +# fact that ''no_log: true'' was specified +# +# OR +# +# TASK [repo_setup : Make sure git-core package is installed] ******************** +# ok: [hypervisor -> controller-0(controller-0.hypervisor)] +# TASK [repo_setup : Get repo-setup repository] ********************************** +# fatal: [hypervisor -> controller-0(controller-0.hypervisor)]: FAILED! => +# fact that ''no_log: true'' was specified +# - name: Sync zuul content if available when: - (zuul is defined) or (zuul_vars.zuul is defined) From 25b5cfcd3a27108d8d71b64d5fc8919cfea3c5d9 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Thu, 11 Dec 2025 11:49:45 +0100 Subject: [PATCH 2/5] [libvirt_manager] Always gather VMs facts Without gathering facts here, on executing reproducer playbook via Zironic or locally, the /etc/ci/env/networking-environment-definition.yml in later stage will not contain: "hostname" and "interface_name" keys, which would fail on generating network-values using ci_gen_kustomize_values. Signed-off-by: Daniel Pawlik --- roles/reproducer/tasks/configure_controller.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/reproducer/tasks/configure_controller.yml b/roles/reproducer/tasks/configure_controller.yml index 81db115a17..d794811d56 100644 --- a/roles/reproducer/tasks/configure_controller.yml +++ b/roles/reproducer/tasks/configure_controller.yml @@ -474,6 +474,11 @@ cifmw_networking_mapper_network_name: >- {{ _cifmw_libvirt_manager_layout.vms.controller.nets.1 }} cifmw_networking_mapper_basedir: "{{ cifmw_reproducer_controller_basedir }}" + # NOTE(dpawlik): Without gathering facts here, on executing reproducer + # playbook via Zironic or locally, the /etc/ci/env/networking-environment-definition.yml + # in later state will not contain: "hostname" and "interface_name" keys, + # which would fail on generating network-values using ci_gen_kustomize_values. + cifmw_networking_mapper_gather_facts: true ansible.builtin.import_role: name: networking_mapper From ce2f213878c148e386927be93a11c789543bc3ff Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 17 Dec 2025 11:20:30 +0100 Subject: [PATCH 3/5] [libvirt_manager] Generate VMs inventory files The libvirt_manager is adding the VMs into the inventory during the playbook execution. It means, if Zironic would bootstrap the host, later the CI job will continue running the CI job, but it would not be able to reach the VMs, like controller-0. Add a feature that dump the VMs groups into the inventory file. Signed-off-by: Daniel Pawlik --- roles/libvirt_manager/defaults/main.yml | 3 ++ .../tasks/add_vm_to_inventory.yml | 24 ++++++++++++++ .../tasks/generate_networking_data.yml | 31 +++++++++++++++---- 3 files changed, 52 insertions(+), 6 deletions(-) create mode 100644 roles/libvirt_manager/tasks/add_vm_to_inventory.yml diff --git a/roles/libvirt_manager/defaults/main.yml b/roles/libvirt_manager/defaults/main.yml index 5a9fdc9155..e797fd062d 100644 --- a/roles/libvirt_manager/defaults/main.yml +++ b/roles/libvirt_manager/defaults/main.yml @@ -89,3 +89,6 @@ cifmw_libvirt_manager_extra_network_configuration: {} cifmw_libvirt_manager_vm_users: [] cifmw_libvirt_manager_radvd_networks: [] + +cifmw_libvirt_manager_tmp_inv_file: "{{ ansible_user_dir }}/libvirt_inventory.ini" +cifmw_libvirt_manager_dst_inv_file: "{{ ansible_user_dir }}/libvirt_inventory.yaml" diff --git a/roles/libvirt_manager/tasks/add_vm_to_inventory.yml b/roles/libvirt_manager/tasks/add_vm_to_inventory.yml new file mode 100644 index 0000000000..b5642f9038 --- /dev/null +++ b/roles/libvirt_manager/tasks/add_vm_to_inventory.yml @@ -0,0 +1,24 @@ +--- +- name: Add host to runtime inventory + ansible.builtin.add_host: + name: "{{ _full_host_name }}" + groups: "{{ _group }}s" + ansible_ssh_user: "{{ _ssh_user }}" + ansible_host: "{{ _add_ansible_host | ternary(_ansible_host, omit) }}" + vm_type: "{{ _group }}" + +- name: Ensure group section exists + ansible.builtin.lineinfile: + path: "{{ cifmw_libvirt_manager_tmp_inv_file }}" + create: true + line: "[{{ _group }}s]" + regexp: "^\\[{{ _group }}s\\]$" + state: present + mode: "0644" + +- name: Append host under proper group + ansible.builtin.lineinfile: + path: "{{ cifmw_libvirt_manager_tmp_inv_file }}" + insertafter: "^\\[{{ _group }}s\\]$" + line: "{{ _ini_line }}" + regexp: "^{{ _full_host_name | regex_escape() }} " diff --git a/roles/libvirt_manager/tasks/generate_networking_data.yml b/roles/libvirt_manager/tasks/generate_networking_data.yml index 3f69c76cb1..fdca0d64e4 100644 --- a/roles/libvirt_manager/tasks/generate_networking_data.yml +++ b/roles/libvirt_manager/tasks/generate_networking_data.yml @@ -118,16 +118,35 @@ _cifmw_libvirt_manager_layout.vms[_vm_type].disk_file_name | default(_vm_type is not none) != 'blank' }} _ansible_host: "{{ _hostname }}.{{ inventory_hostname }}" - ansible.builtin.add_host: - name: "{{ item.key | replace('_', '-') }}" - groups: "{{ _group }}s" - ansible_ssh_user: "{{ _ssh_user }}" - ansible_host: "{{ _add_ansible_host | ternary(_ansible_host, omit) }}" - vm_type: "{{ _group }}" + _full_host_name: "{{ item.key | replace('_', '-') }}" + _ini_line: >- + {{ _full_host_name }} + {% if _add_ansible_host %} ansible_host={{ _ansible_host }}{% endif %} + ansible_user={{ _ssh_user }} + vm_type={{ _group }} + ansible.builtin.include_tasks: add_vm_to_inventory.yml loop: "{{ cifmw_libvirt_manager_mac_map | dict2items }}" loop_control: label: "Adding {{ item.key }} to {{ _group }}s" +- name: Check if ini inventory file exists + ansible.builtin.stat: + path: "{{ cifmw_libvirt_manager_tmp_inv_file }}" + register: _libvirt_inv_file + +- name: Convert ini inventory file into yaml + when: _libvirt_inv_file.stat.exists + ansible.builtin.shell: > + ansible-inventory + -i {{ cifmw_libvirt_manager_tmp_inv_file }} + -y --list > {{ cifmw_libvirt_manager_dst_inv_file }} + +- name: Remove ini file after conversion + when: _libvirt_inv_file.stat.exists + ansible.builtin.file: + path: "{{ cifmw_libvirt_manager_tmp_inv_file }}" + state: absent + - name: Generate all IPs based on MAC and networks vars: cifmw_networking_mapper_ifaces_info: >- From 7d6c1f3a0c149c524b7c48b38814c5eb2ccef336 Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 17 Dec 2025 11:25:24 +0100 Subject: [PATCH 4/5] [devscripts] Do not remove ansible-core By moving the bootstrap procedure from job execution to ZIronic, we changed the way, what is configuring the baremetal host (BM). Normally in CI job execution, Zuul is spawning a controller, then the controller is configuring the baremetal host, then the job continuation is done on controller-0 via baremetal host. With Zironic, to avoid creating additional VM - controller - we moved the procedure to be done directly on the baremetal host. It means, that the ansible-core package needs to be available, otherwise bootstrap procedure would fail because of missing modules, libraries etc. Signed-off-by: Daniel Pawlik --- roles/devscripts/tasks/131_packages.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/roles/devscripts/tasks/131_packages.yml b/roles/devscripts/tasks/131_packages.yml index 381aaae30f..324465108c 100644 --- a/roles/devscripts/tasks/131_packages.yml +++ b/roles/devscripts/tasks/131_packages.yml @@ -28,10 +28,3 @@ ansible.builtin.import_role: name: ci_setup tasks_from: epel.yml - -- name: Ensure conflicting package does not exist - when: not cifmw_devscripts_zironic_deployment_only_ocp - become: true - ansible.builtin.package: - name: "ansible-core" - state: absent From ae7880156e3f7dbc470f865fe85ad19a6ebf8dbe Mon Sep 17 00:00:00 2001 From: Daniel Pawlik Date: Wed, 17 Dec 2025 11:29:24 +0100 Subject: [PATCH 5/5] [dnsmasq] Retry restarting dnsmasq On busy host, if the restart is performed, Ansible is not waiting enough of time to continue. Sometimes it happens that the service was configured few seconds after the Ansible task that require it was executed. Retry and wait for dnsmasq to be restarted before continuation. Signed-off-by: Daniel Pawlik --- roles/dnsmasq/handlers/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/dnsmasq/handlers/main.yml b/roles/dnsmasq/handlers/main.yml index d359863ad9..6498f88210 100644 --- a/roles/dnsmasq/handlers/main.yml +++ b/roles/dnsmasq/handlers/main.yml @@ -24,6 +24,9 @@ - _dnsmasq.msg is defined - _dnsmasq.msg is not match('Could not find the requested service cifmw-dnsmasq.service') + retries: 5 + delay: 15 + until: _dnsmasq is succeeded - name: Reload dnsmasq become: true