diff --git a/.ansible-lint b/.ansible-lint
index 40b321754..fc315df23 100644
--- a/.ansible-lint
+++ b/.ansible-lint
@@ -15,16 +15,22 @@ exclude_paths:
#- roles/sap_hana_preconfigure
- roles/sap_hostagent
- roles/sap_hypervisor_node_preconfigure
- - roles/sap_install_media_detect
+ #- roles/sap_install_media_detect
#- roles/sap_netweaver_preconfigure
#- roles/sap_storage_setup
#- roles/sap_swpm
- roles/sap_vm_preconfigure
-
+ - tests/
enable_list:
- yaml
skip_list:
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
- experimental
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/.github/workflows/ansible-lint-sap_install_media_detect.yml b/.github/workflows/ansible-lint-sap_install_media_detect.yml
new file mode 100644
index 000000000..e8cb49a8a
--- /dev/null
+++ b/.github/workflows/ansible-lint-sap_install_media_detect.yml
@@ -0,0 +1,43 @@
+---
+
+# Workflow for ansible-lint of a role
+
+name: ansible-lint of the role sap_install_media_detect
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_install_media_detect/**'
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_install_media_detect/**'
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@main
+
+ - name: Set up Python 3
+ uses: actions/setup-python@main
+ with:
+ python-version: '3.9'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible==7.5.0
+ pip3 install ansible-compat==3.0.2
+ pip3 install ansible-core==2.14.5
+ pip3 install ansible-lint==6.8.6
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/work/community.sap_install/community.sap_install/roles/sap_install_media_detect
+ run: ansible-lint
diff --git a/.github/workflows/ansible-lint-sap_maintain_etc_hosts.yml b/.github/workflows/ansible-lint-sap_maintain_etc_hosts.yml
new file mode 100644
index 000000000..d0c3271ec
--- /dev/null
+++ b/.github/workflows/ansible-lint-sap_maintain_etc_hosts.yml
@@ -0,0 +1,43 @@
+---
+
+# Workflow for ansible-lint of a role
+
+name: ansible-lint of the role sap_maintain_etc_hosts
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_maintain_etc_hosts/**'
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_maintain_etc_hosts/**'
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@main
+
+ - name: Set up Python 3
+ uses: actions/setup-python@main
+ with:
+ python-version: '3.9'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible==7.5.0
+ pip3 install ansible-compat==3.0.2
+ pip3 install ansible-core==2.14.5
+ pip3 install ansible-lint==6.8.6
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/work/community.sap_install/community.sap_install/roles/sap_maintain_etc_hosts
+ run: ansible-lint
diff --git a/.github/workflows/ansible-lint-sap_vm_preconfigure.yml b/.github/workflows/ansible-lint-sap_vm_preconfigure.yml
index 4b6b38747..05d84672a 100644
--- a/.github/workflows/ansible-lint-sap_vm_preconfigure.yml
+++ b/.github/workflows/ansible-lint-sap_vm_preconfigure.yml
@@ -29,14 +29,14 @@ jobs:
- name: Set up Python 3
uses: actions/setup-python@main
with:
- python-version: '3.9'
+ python-version: '3.12'
- name: Install test dependencies
run: |
- pip3 install ansible==7.5.0
- pip3 install ansible-compat==3.0.2
- pip3 install ansible-core==2.14.5
- pip3 install ansible-lint==6.8.6
+ pip3 install ansible==9.1.0
+ pip3 install ansible-compat==4.1.10
+ pip3 install ansible-core==2.16.2
+ pip3 install ansible-lint==6.22.1
- name: Run ansible-lint
working-directory: /home/runner/work/community.sap_install/community.sap_install/roles/sap_vm_preconfigure
diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml
index 48249fc4c..b97f0a635 100644
--- a/.github/workflows/ansible-lint.yml
+++ b/.github/workflows/ansible-lint.yml
@@ -21,14 +21,14 @@ jobs:
- name: Set up Python 3
uses: actions/setup-python@main
with:
- python-version: '3.9'
+ python-version: '3.12'
- name: Install test dependencies
run: |
- pip3 install ansible==7.5.0
- pip3 install ansible-compat==3.0.2
- pip3 install ansible-core==2.14.5
- pip3 install ansible-lint==6.8.6
+ pip3 install ansible==9.1.0
+ pip3 install ansible-compat==4.1.10
+ pip3 install ansible-core==2.16.2
+ pip3 install ansible-lint==6.22.1
# - name: Install collection dependencies
# run: ansible-galaxy collection install community.general
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
new file mode 100644
index 000000000..24d07bcd4
--- /dev/null
+++ b/.github/workflows/codespell.yml
@@ -0,0 +1,20 @@
+name: CodeSpell
+
+on:
+ push:
+ branches:
+ - dev
+ pull_request:
+ branches:
+ - dev
+
+jobs:
+ codespell:
+ name: Check for spelling errors
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: codespell-project/actions-codespell@master
+ with:
+ ignore_words_list: aas,hsa,te,chage,addopt,sybsystem,uptodate
diff --git a/.github/workflows/yamllint-sap_hypervisor_node_preconfigure.yml b/.github/workflows/yamllint-sap_hypervisor_node_preconfigure.yml
deleted file mode 100644
index 603fd3617..000000000
--- a/.github/workflows/yamllint-sap_hypervisor_node_preconfigure.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: yamllint-sap_hypervisor_node_preconfigure
-
-on:
- push:
- branches: [ main ]
- paths:
- - 'roles/sap_hypervisor_node_preconfigure/**'
- pull_request:
- branches: [ main ]
- paths:
- - 'roles/sap_hypervisor_node_preconfigure/**'
-
- workflow_dispatch:
-
-jobs:
- # This workflow contains a single job called "build"
- yamllint:
- # The type of runner that the job will run on
- runs-on: ubuntu-latest
-
- # Steps represent a sequence of tasks that will be executed as part of the job
- steps:
- # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- - uses: actions/checkout@v2
-
- # Runs a single command using the runners shell
- - name: Install dependencies
- run: pip install yamllint
-
- # Runs a set of commands using the runners shell
- - name: yamllint
- run: yamllint ./roles/sap_hypervisor_node_preconfigure
diff --git a/.github/workflows/yamllint-sap_vm_preconfigure.yml b/.github/workflows/yamllint-sap_vm_preconfigure.yml
deleted file mode 100644
index e7d301518..000000000
--- a/.github/workflows/yamllint-sap_vm_preconfigure.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: yamllint-sap_vm_preconfigure
-
-on:
- push:
- branches: [ main ]
- paths:
- - 'roles/sap_vm_preconfigure/**'
- pull_request:
- branches: [ main ]
- paths:
- - 'roles/sap_vm_preconfigure/**'
-
- workflow_dispatch:
-
-jobs:
- # This workflow contains a single job called "build"
- yamllint:
- # The type of runner that the job will run on
- runs-on: ubuntu-latest
-
- # Steps represent a sequence of tasks that will be executed as part of the job
- steps:
- # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- - uses: actions/checkout@v2
-
- # Runs a single command using the runners shell
- - name: Install dependencies
- run: pip install yamllint
-
- # Runs a set of commands using the runners shell
- - name: yamllint
- run: yamllint ./roles/sap_vm_preconfigure
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 13541e313..cdf88a704 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -33,7 +33,7 @@ repos:
# types: [file, yaml]
# entry: yamllint --strict
- repo: https://github.com/ansible-community/ansible-lint.git
- rev: v6.8.6 # put latest release tag from https://github.com/ansible-community/ansible-lint/releases/
+ rev: v6.22.1 # put latest release tag from https://github.com/ansible-community/ansible-lint/releases/
hooks:
- id: ansible-lint
files: \.(yaml|yml)$
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index db32a0bb9..a1fbceb70 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -5,6 +5,30 @@ community.sap_install Release Notes
.. contents:: Topics
+v1.3.4
+======
+
+Release Summary
+---------------
+
+| Release Date: 2024-01-15
+| collection: Feature add for CodeSpell in git repository
+| collection: Bug fix for ansible-lint of each Ansible Role within Ansible Collection
+| collection: Bug Fix for Ansible Core minimum version update to 2.12.0 for import compliance with Ansible Galaxy
+| collection: Bug Fix for Ansible CVE-2023-5764
+| sap_general_preconfigure: Feature add for additional RHEL for SAP 8.8 and 9.2 release compatibility
+| sap_hana_preconfigure: Feature add for compatibility with SLES using sapconf and SLES for SAP using saptune
+| sap_hana_preconfigure: Feature add for additional RHEL for SAP 8.8 and 9.2 release compatibility
+| sap_hana_preconfigure: Feature add to reduce restrictions on new OS versions which are not yet supported by SAP
+| sap_netweaver_preconfigure: Feature add for compatibility with SLES using sapconf and SLES for SAP using saptune
+| sap_ha_pacemaker_cluster: Feature add for Virtual IP and Constraints logic with Cloud Hyperscaler vendors
+| sap_hypervisor_node_preconfigure: Feature add for preconfiguration of KubeVirt (OpenShift Virtualization) hypervisor nodes
+| sap_hypervisor_node_preconfigure: Bug fix for preconfiguration code structure of KVM (Red Hat Enterprise Virtualization) hypervisor nodes
+| sap_install_media_detect: Bug Fix for existing files
+| sap_maintain_etc_hosts: Feature add for maintaining the /etc/hosts file of an SAP software host
+| sap_swpm: Bug fix for runtime missing dependency python3-pip and advanced execution mode skipped tasks during certain installations
+| sap_swpm: Feature add for basic System Copy executions in default mode
+
v1.3.3
======
diff --git a/README.md b/README.md
index d714954d4..62e8b3ec7 100644
--- a/README.md
+++ b/README.md
@@ -2,167 +2,45 @@
![Ansible Lint](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint.yml/badge.svg?branch=main)
-This Ansible Collection executes various SAP Software installations and configuration tasks for running SAP software on Linux operating systems; with handlers for SAP HANA database lifecycle manager (HDBLCM) and SAP Software Provisioning Manager (SWPM) for programmatic deployment of any SAP solution scenario.
+This Ansible Collection executes various SAP Software installations and configuration tasks for running various SAP solutions and deployment scenarios on Linux operating systems (RHEL or SLES).
-This can be combined with other Ansible Collections to provide end-to-end automation, from download of SAP software installation media through to technical configuration and burstable SAP NetWeaver application servers (start/stop).
+This includes handlers for SAP HANA database lifecycle manager (HDBLCM) and SAP Software Provisioning Manager (SWPM), and can be combined with other Ansible Collections to provide end-to-end automation _(e.g. provision, download, install, operations)_.
-## Functionality
-This Ansible Collection executes various SAP Software installations for different SAP solution scenarios, including:
+**Examples of verified installations include:**
-- **SAP HANA installations via SAP HANA database lifecycle manager (HDBLCM)**
+- SAP S/4HANA AnyPremise (1809, 1909, 2020, 2021, 2022, 2023) with setup as Standard, Distributed, High Availability and optional Maintenance Planner or Restore System Copy
+- SAP Business Suite (ECC) on HANA and SAP Business Suite (ECC) with SAP AnyDB - SAP ASE, SAP MaxDB, IBM Db2, Oracle DB
+- SAP BW/4HANA (2021, 2023) with setup as Standard or Scale-Out
+- SAP HANA 2.0 (SPS04+) with setup as Scale-Up, Scale-Out, High Availability
+- Other SAP installation activities; such as System Rename, System Copy Export, SAP Solution Manager and SAP Web Dispatcher
- - Install SAP HANA database server, with any SAP HANA Component (e.g. Live Cache Apps, Application Function Library etc.)
- - Configure Firewall rules and Hosts file for SAP HANA database server instance/s
- - Apply license to SAP HANA
- - Configure storage layout for SAP HANA mount points (i.e. /hana/data, /hana/log, /hana/shared)
- - Install SAP Host Agent
- - Install Linux Pacemaker, configure Pacemaker Fencing Agents and Pacemaker Resource Agents
- - Install SAP HANA System Replication
- - Set HA/DR for SAP HANA System Replication
-- **Every SAP Software installation via SAP Software Provisioning Manager (SWPM)**
- - Run software install tasks using easy Ansible Variable to generate SWPM Unattended installations _(sap_swpm Ansible Role default mode)_.
- - Optional use of templating definitions for repeated installations _(sap_swpm Ansible Role default templates mode)_.
- - Run software install tasks with Ansible Variables one-to-one matched to SWPM Unattended Inifile parameters to generate bespoke SWPM Unattended installations _(sap_swpm Ansible Role advanced mode)_.
- - Optional use of templating definitions for repeated installations _(sap_swpm Ansible Role advanced templates mode)_.
- - Run previously-defined installations with an existing SWPM Unattended inifile.params _(sap_swpm Ansible Role inifile_reuse mode)_
- - Install Linux Pacemaker, configure Pacemaker Fencing Agents and Pacemaker Resource Agents
- - Set HA/DR with distributed SAP System installations (i.e. ERS)
+**Please read the [full documentation](/docs#readme) for how-to guidance, requirements, and all other details. Summary documentation is below:**
-### Note
-
-Starting with `ansible-core` versions 2.16.1, 2.15.8, and 2.14.12, templating operations inside the `that` statement of `assert` tasks
-are no longer allowed.
-
-A typical error message is:
-```
-fatal: [host01]: FAILED! =>
- msg: 'The conditional check ''13 <= 128'' failed. The error was: Conditional is marked as unsafe, and cannot be evaluated.'
-```
-
-This version of the collection ensures the compatibility with the above mentioned versions of `ansible-core` for the following roles:
-- sap_general_preconfigure
-- sap_netweaver_preconfigure
-- sap_hana_preconfigure
-- sap_hana_install
-
-When running the preconfigure roles with the above mentioned versions of `ansible-core` and with the parameters
-`sap_general_preconfigure_assert`, `sap_netweaver_preconfigure_assert`, or `sap_hana_preconfigure_assert`, the roles will abort
-in the first `assert` task which contains a templating operation.
## Contents
-An Ansible Playbook can call either an Ansible Role, or the individual Ansible Modules:
-
-- **Ansible Roles** (runs multiple Ansible Modules)
-- **Ansible Modules** (and adjoining Python/Bash Functions)
-
-For further information regarding the development, code structure and execution workflow please read the [Development documentation](./docs/DEVELOPMENT.md).
-
Within this Ansible Collection, there are various Ansible Roles and no custom Ansible Modules.
### Ansible Roles
-| Name | Summary |
+| Name | Summary |
| :--- | :--- |
-| [sap_anydb_install_oracle](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_anydb_install_oracle) | install Oracle DB 19.x for SAP |
-| [sap_general_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_general_preconfigure) | configure general OS settings for SAP software |
-| [sap_ha_install_hana_hsr](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_install_hana_hsr) | install SAP HANA System Replication |
-| [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) | install and configure pacemaker and SAP resources |
-| [sap_hana_install](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_install) | install SAP HANA via HDBLCM |
-| [sap_hana_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_preconfigure) | configure settings for SAP HANA database server |
-| [sap_hostagent](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hostagent) | install SAP Host Agent |
-| [sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hypervisor_node_preconfigure) | configure a hypervisor running VMs for SAP HANA |
-| [sap_install_media_detect](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_install_media_detect) | detect and extract SAP Software installation media |
-| [sap_netweaver_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_netweaver_preconfigure) | configure settings for SAP NetWeaver application server |
-| [sap_storage_setup](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_storage_setup) | configure storage for SAP HANA, with LVM partitions and XFS filesystem |
-| [sap_swpm](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_swpm) | install SAP Software via SWPM |
-| [sap_vm_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_vm_preconfigure) | configure settings for a guest (VM) running on RHV/KVM for SAP HANA |
-
-**_Notes_**:
-
-In general the "preconfigure" and "prepare" roles are prerequisites for the corresponding installation roles.
-The logic has been separated to support a flexible execution of the different steps.
-
-### Ansible Roles Lint Status
-
-| Role Name | Ansible Lint Status |
-| :--- | :--- |
-| [sap_anydb_install_oracle](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_anydb_install_oracle) | N/A |
-| [sap_general_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_general_preconfigure) | [![Ansible Lint for sap_general_preconfigure](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_general_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_general_preconfigure.yml) |
-| [sap_ha_install_hana_hsr](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_install_hana_hsr) | [![Ansible Lint for sap_ha_install_hana_hsr](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_install_hana_hsr.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_install_hana_hsr.yml) |
-| [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) | [![Ansible Lint for sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_pacemaker_cluster.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_pacemaker_cluster.yml) |
-| [sap_hana_install](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_install) | [![Ansible Lint for sap_hana_install](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_install.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_install.yml) |
-| [sap_hana_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_preconfigure) | [![Ansible Lint for sap_hana_preconfigure](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_preconfigure.yml) |
-| [sap_hostagent](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hostagent) | N/A |
-| [sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hypervisor_node_preconfigure) | N/A |
-| [sap_install_media_detect](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_install_media_detect) | N/A |
-| [sap_netweaver_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_netweaver_preconfigure) | [![Ansible Lint for sap_netweaver_preconfigure](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_netweaver_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_netweaver_preconfigure.yml) |
-| [sap_storage_setup](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_storage_setup) | N/A |
-| [sap_swpm](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_swpm) | [![Ansible Lint for sap_swpm](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_swpm.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_swpm.yml) |
-| [sap_vm_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_vm_preconfigure) | N/A |
-
-**_Notes:_**
-
-- Ansible Playbook localhost executions may have limitations on SAP Software installations
-- Ansible Roles for HA/DR are all designed for execution with Terraform
-
-## Execution examples
-
-There are various methods to execute the Ansible Collection, dependant on the use case. For more information, see [Execution examples with code samples](./docs/getting_started) and the summary below:
-
-| Execution Scenario | Use Case | Target |
-| ---- | ---- | ---- |
-| Ansible Playbook
-> source Ansible Collection
-> execute Ansible Task
--> run Ansible Role
---> run Ansible Module for Shell (built-in)
---> ... | Complex executions with various interlinked activities;
run in parallel or sequentially | Localhost or Remote |
-
-## Testing, Requirements and Dependencies
-
-### Testing
-
-Various SAP Software solutions have been extensively tested:
-
-- SAP HANA
- - Scale-Up
- - Scale-Out
- - High Availability
-- SAP NetWeaver AS (ABAP or JAVA) and additional addons (e.g. GRC, ADS)
-- SAP S/4HANA AnyPremise (1809, 1909, 2020, 2021, 2022)
- - Sandbox (One Host) installation
- - Standard (Dual Host) installation
- - Distributed installation
- - High Availability installation
- - System Copy (Homogeneous with SAP HANA Backup / Recovery) installation
- - Maintenance Planner installation
- - System Rename
-- SAP BW/4HANA
-- SAP Business Suite on HANA (SoH, i.e. SAP ECC on HANA)
-- SAP Business Suite (i.e. SAP ECC with SAP AnyDB - SAP ASE, SAP MaxDB, IBM Db2, Oracle DB)
-- SAP Solution Manager 7.2
-- SAP Web Dispatcher
-
-### Target host - Operating System requirements
-
-Designed for Linux operating systems, e.g. RHEL (7.x and 8.x) and SLES (15.x).
-
-This Ansible Collection has not been tested and amended for SAP NetWeaver Application Server instantiations on IBM AIX or Windows Server.
-
-Assumptions for executing the Ansible Roles from this Ansible Collection include:
-
-- Registered OS
-- OS Package repositories are available (from the relevant content delivery network of the OS vendor)
-
-N.B. The Ansible Collection works with SLES from version 15 SP3 and upwards, for the following reasons:
-
-- firewalld is used within the Ansible Collection. In SLES 15 SP3, firewalld became the replacement for nftables. See changelog [SLE-16300](https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15-SP3/index.html#jsc-SLE-16300)
-- SELinux is used within the Ansible Collection. While introduced earlier with community support, full support for SELinux was provided as of SLES 15 SP3. See changelog [SLE-17307](https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15-SP3/index.html#jsc-SLE-17307)
-
-### Execution/Controller host - Operating System requirements
-
-Execution of Ansible Playbooks using this Ansible Collection have been tested with:
-- Python 3.9.7 and above (i.e. CPython distribution)
-- Ansible Core 2.11.5 and above _(included with optional installation of Ansible Community Edition 4.0 and above)_
-- OS: macOS with Homebrew, RHEL, SLES, and containers in Task Runners (e.g. Azure DevOps)
+| [sap_anydb_install_oracle](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_anydb_install_oracle) | install Oracle DB 19.x for SAP |
+| [sap_general_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_general_preconfigure) | configure general OS settings for SAP software |
+| [sap_ha_install_hana_hsr](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_install_hana_hsr) | install SAP HANA System Replication |
+| [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) | install and configure pacemaker and SAP resources |
+| [sap_hana_install](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_install) | install SAP HANA via HDBLCM |
+| [sap_hana_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_preconfigure) | configure settings for SAP HANA database server |
+| [sap_hostagent](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hostagent) | install SAP Host Agent |
+| [sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hypervisor_node_preconfigure) | configure a hypervisor running VMs for SAP HANA |
+| [sap_install_media_detect](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_install_media_detect) | detect and extract SAP Software installation media |
+| [sap_maintain_etc_hosts](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_maintain_etc_hosts) | maintain the /etc/hosts file of an SAP software host |
+| [sap_netweaver_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_netweaver_preconfigure) | configure settings for SAP NetWeaver application server |
+| [sap_storage_setup](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_storage_setup) | configure storage for SAP HANA, with LVM partitions and XFS filesystem |
+| [sap_swpm](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_swpm) | install SAP Software via SWPM |
+| [sap_vm_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_vm_preconfigure) | configure settings for a guest (VM) running on RHV/KVM for SAP HANA |
## License
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
index 3c47209ba..cd27c4a59 100644
--- a/changelogs/changelog.yaml
+++ b/changelogs/changelog.yaml
@@ -126,3 +126,26 @@ releases:
'
release_date: '2023-12-22'
+ 1.3.4:
+ changes:
+ release_summary: '| Release Date: 2024-01-15
+
+ | collection: Feature add for CodeSpell in git repository
+ | collection: Bug fix for ansible-lint of each Ansible Role within Ansible Collection
+ | collection: Bug Fix for Ansible Core minimum version update to 2.12.0 for import compliance with Ansible Galaxy
+ | collection: Bug Fix for Ansible CVE-2023-5764
+ | sap_general_preconfigure: Feature add for additional RHEL for SAP 8.8 and 9.2 release compatibility
+ | sap_hana_preconfigure: Feature add for compatibility with SLES using sapconf and SLES for SAP using saptune
+ | sap_hana_preconfigure: Feature add for additional RHEL for SAP 8.8 and 9.2 release compatibility
+ | sap_hana_preconfigure: Feature add to reduce restrictions on new OS versions which are not yet supported by SAP
+ | sap_netweaver_preconfigure: Feature add for compatibility with SLES using sapconf and SLES for SAP using saptune
+ | sap_ha_pacemaker_cluster: Feature add for Virtual IP and Constraints logic with Cloud Hyperscaler vendors
+ | sap_hypervisor_node_preconfigure: Feature add for preconfiguration of KubeVirt (OpenShift Virtualization) hypervisor nodes
+ | sap_hypervisor_node_preconfigure: Bug fix for preconfiguration code structure of KVM (Red Hat Enterprise Virtualization) hypervisor nodes
+ | sap_install_media_detect: Bug Fix for existing files
+ | sap_maintain_etc_hosts: Feature add for maintaining the /etc/hosts file of an SAP software host
+ | sap_swpm: Bug fix for runtime missing dependency python3-pip and advanced execution mode skipped tasks during certain installations
+ | sap_swpm: Feature add for basic System Copy executions in default mode
+
+ '
+ release_date: '2024-01-15'
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 000000000..2cc867f9b
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,137 @@
+# Documentation of community.sap_install Ansible Collection
+
+## Introduction
+
+The `sap_install` Ansible Collection provides a variety of automated tasks for the configuration and installation of SAP Software.
+
+Each Ansible Role contained within this Ansible Collection, performs a distinct set of tasks and are designed to be run independently or cohesively - depending on the outcome desired by an end-user.
+
+
+## Functionality
+
+This Ansible Collection executes various SAP Software installations for different SAP solution scenarios. The code structure and logic has been separated to support a flexible execution of different steps for various scenarios.
+
+Any Ansible Roles labelled "preconfigure" and "prepare" are prerequisites, executed before the corresponding installation Ansible Roles (such as `sap_hana_install` or `sap_swpm`).
+
+At a high-level, the key installation functionality of this Ansible Collection includes:
+
+1. **OS Preparation activities for SAP HANA Database Server, SAP AnyDB Database Server or SAP NetWeaver Application Server**
+
+2. **SAP HANA installations via SAP HANA database lifecycle manager (HDBLCM)**
+ - Configure Firewall rules and Hosts file for SAP HANA database server instance/s
+ - Install SAP Host Agent
+ - Install SAP HANA database server, with any SAP HANA Component (e.g. Live Cache Apps, Application Function Library etc.)
+ - Apply license to SAP HANA
+
+3. **SAP HANA High Availability tasks**
+ - Install SAP HANA System Replication
+ - Install Linux Pacemaker, configure Pacemaker Fencing Agents for a given Infrastructure Platform
+ - Configure Linux Pacemaker Resource Agents for SAP HANA
+
+4. **Every SAP Software installation via SAP Software Provisioning Manager (SWPM)**
+ - Execute SAP SWPM Unattended installation
+ - Using on-the-fly generated inifile.params from Ansible Variables
+ - Using a list of inifile parameters in an Ansible Dictionary
+ - Re-using an existing inifile.params
+
+5. **SAP NetWeaver High Availability tasks**
+ - Install Linux Pacemaker, configure Pacemaker Fencing Agents for a given Infrastructure Platform
+ - Configure Linux Pacemaker Resource Agents for SAP NetWeaver ASCS/ERS
+
+
+## Execution
+
+An Ansible Playbook is the file created and executed by an end-user, which imports from Ansible Collections to perform various activities on the target hosts.
+
+The Ansible Playbook can call either an Ansible Role, or directly call the individual Ansible Modules:
+
+- **Ansible Roles** (runs multiple Ansible Modules)
+- **Ansible Modules** (and adjoining Python/Bash Functions)
+
+It is strongly recommended to execute these Ansible Roles in accordance to best practice Ansible usage, where an Ansible Playbook is executed from a host and Ansible will login to a target host to perform the activities.
+
+> If an Ansible Playbook is executed from the target host itself (similar to logging in and running a shell script), this is known as an Ansible Playbook 'localhost execution' and is not recommended as it has limitations on SAP Software installations (particularly installations across multiple hosts).
+
+At a high-level, complex executions with various interlinked activities are run in parallel or sequentially using the following execution structure:
+
+```
+Ansible Playbook
+-> source Ansible Collection
+-> execute Ansible Task
+---> run Ansible Role
+-----> run Ansible Module (e.g. built-in Ansible Module for Shell)
+```
+
+### Execution examples
+
+There are various methods to execute the Ansible Collection, dependent on the use case.
+
+For more information, see [Getting started](./getting_started#readme) and edit the [sample Ansible Playbooks in `/playbooks`](../playbooks/).
+
+
+## Requirements and Dependencies
+
+### Target host - Operating System requirements
+
+Designed for Linux operating systems, e.g. RHEL (7.x, 8.x, 9.x) and SLES (15 SPx).
+
+This Ansible Collection has not been tested and amended for SAP NetWeaver Application Server instantiations on IBM AIX or Windows Server.
+
+Assumptions for executing the Ansible Roles from this Ansible Collection include:
+
+- Registered OS
+- OS Package repositories are available (from the relevant content delivery network of the OS vendor)
+
+N.B. The Ansible Collection works with SLES from version 15 SP3 and upwards, for the following reasons:
+
+- firewalld is used within the Ansible Collection. In SLES 15 SP3, firewalld became the replacement for nftables. See changelog [SLE-16300](https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15-SP3/index.html#jsc-SLE-16300)
+- SELinux is used within the Ansible Collection. While introduced earlier with community support, full support for SELinux was provided as of SLES 15 SP3. See changelog [SLE-17307](https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15-SP3/index.html#jsc-SLE-17307)
+
+### Execution/Controller host - Operating System requirements
+
+Execution of Ansible Playbooks using this Ansible Collection have been tested with:
+- Python 3.9.7 and above (i.e. CPython distribution)
+- Ansible Core 2.12.0 and above _(included with optional installation of Ansible Community Edition 5.0 and above)_
+- OS: macOS with Homebrew, RHEL, SLES, and containers in Task Runners (e.g. Azure DevOps)
+
+#### Ansible Core version
+
+This Ansible Collection was designed for maximum backwards compatibility, with full compatibility starting from Ansible Core 2.12.0 and above.
+
+**Note 1:** Ansible 2.9 was the last release before the Ansible project was split into Ansible Core and Ansible Community Edition, and was before Ansible Collections functionality was introduced. This Ansible Collection should execute when Ansible 2.9 is used, but it is not recommended and errors should be expected (and will not be resolved).
+
+**Note 2:** Ansible Core versions prior to 2.14.12 , 2.15.8 , and 2.16.1 where `CVE-2023-5764` (templating inside `that` statement of `assert` Ansible Tasks) security fix was addressed, will work after `v1.3.4` of this Ansible Collection. Otherwise an error similar to the following will occur:
+
+```yaml
+fatal: [host01]: FAILED! =>
+ msg: 'The conditional check ''13 <= 128'' failed. The error was: Conditional is marked as unsafe, and cannot be evaluated.'
+```
+
+
+## Testing
+
+Various SAP Software solutions have been extensively tested.
+
+Prior to each release, basic scenarios are executed to confirm functionality is working as expected; including SAP S/4HANA installation.
+
+Important note: it is not possible for the project maintainers to test every SAP Software installation and solution scenario for each OS hosted on each Infrastructure Platform, if an error is identified please raise a [GitHub Issue](/../../issues/).
+
+
+### Ansible Roles Lint Status
+
+| Role Name | Ansible Lint Status |
+| :--- | :--- |
+| [sap_anydb_install_oracle](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_anydb_install_oracle) | N/A |
+| [sap_general_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_general_preconfigure) | [![Ansible Lint for sap_general_preconfigure](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_general_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_general_preconfigure.yml) |
+| [sap_ha_install_hana_hsr](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_install_hana_hsr) | [![Ansible Lint for sap_ha_install_hana_hsr](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_install_hana_hsr.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_install_hana_hsr.yml) |
+| [sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_ha_pacemaker_cluster) | [![Ansible Lint for sap_ha_pacemaker_cluster](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_pacemaker_cluster.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_ha_pacemaker_cluster.yml) |
+| [sap_hana_install](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_install) | [![Ansible Lint for sap_hana_install](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_install.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_install.yml) |
+| [sap_hana_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hana_preconfigure) | [![Ansible Lint for sap_hana_preconfigure](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hana_preconfigure.yml) |
+| [sap_hostagent](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hostagent) | N/A |
+| [sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_hypervisor_node_preconfigure) | [![Ansible Lint for sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml) |
+| [sap_install_media_detect](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_install_media_detect) | N/A |
+| [sap_maintain_etc_hosts](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_maintain_etc_hosts) | [![Ansible Lint for sap_maintain_etc_hosts](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_maintain_etc_hosts.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_maintain_etc_hosts.yml) |
+| [sap_netweaver_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_netweaver_preconfigure) | [![Ansible Lint for sap_netweaver_preconfigure](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_netweaver_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_netweaver_preconfigure.yml) |
+| [sap_storage_setup](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_storage_setup) | N/A |
+| [sap_swpm](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_swpm) | [![Ansible Lint for sap_swpm](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_swpm.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_install/actions/workflows/ansible-lint-sap_swpm.yml) |
+| [sap_vm_preconfigure](https://github.com/sap-linuxlab/community.sap_install/tree/main/roles/sap_vm_preconfigure) | N/A |
diff --git a/docs/developer_notes/sap_swmp_dev.md b/docs/developer_notes/sap_swmp_dev.md
index 3386af300..8e562eb29 100644
--- a/docs/developer_notes/sap_swmp_dev.md
+++ b/docs/developer_notes/sap_swmp_dev.md
@@ -2,7 +2,7 @@
## Errors with missing signature files for installation media
-After SWPM 1.0 SP22 and SAP SWPM 2.0 SP00, all SAP Software installation media requires a seperate signature file (SIGNATURE.SMF). The signature file is missing in older installation media.
+After SWPM 1.0 SP22 and SAP SWPM 2.0 SP00, all SAP Software installation media requires a separate signature file (SIGNATURE.SMF). The signature file is missing in older installation media.
For example, IDES for SAP ECC 6.0 EhP8. See the following error message and SAP Note 2622019 - "EXPORT_1 is not signed" error during IDES installation.
```shell
diff --git a/docs/getting_started/README.md b/docs/getting_started/README.md
index d66abf74e..7788e3c4d 100644
--- a/docs/getting_started/README.md
+++ b/docs/getting_started/README.md
@@ -1,12 +1,11 @@
-# Examples and Tips
+# Getting started
-In this folder you find sample files, a few additional tips for using the provided ansible roles, as well as references to further information.
+In this folder you will find sample files, a few additional tips for using the provided ansible roles, as well as references to further information.
-- [Examples and Tips](#examples-and-tips)
- - [How to run playbooks](#how-to-run-playbooks)
- - [Inventory and variable parameters](#inventory-and-variable-parameters)
- - [Security parameters](#security-parameters)
- - [Other useful options](#other-useful-options)
+- [How to run playbooks](#how-to-run-playbooks)
+ - [Inventory and variable parameters](#inventory-and-variable-parameters)
+ - [Security parameters](#security-parameters)
+ - [Other useful options](#other-useful-options)
## How to run playbooks
diff --git a/galaxy.yml b/galaxy.yml
index 2a2ddd20e..e00bfe582 100644
--- a/galaxy.yml
+++ b/galaxy.yml
@@ -10,7 +10,7 @@ namespace: community
name: sap_install
# The version of the collection. Must be compatible with semantic versioning
-version: 1.3.3
+version: 1.3.4
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
@@ -24,6 +24,7 @@ authors:
- Markus Moster
- Janine Fuchs
- Steven Stringer
+ - Marcel Mamula
### OPTIONAL but strongly recommended
# A short summary description of the collection
diff --git a/meta/runtime.yml b/meta/runtime.yml
index 2ee3c9fa9..c2ea65887 100644
--- a/meta/runtime.yml
+++ b/meta/runtime.yml
@@ -1,2 +1,2 @@
---
-requires_ansible: '>=2.9.10'
+requires_ansible: '>=2.12.0'
diff --git a/playbooks/README.md b/playbooks/README.md
new file mode 100644
index 000000000..7362b9535
--- /dev/null
+++ b/playbooks/README.md
@@ -0,0 +1,8 @@
+
+# List of playbooks
+
+- prepare-for-hana
+- prepare-for-netweaver
+- install-sap-hana
+- install-sap-hana-cluster
+- install-sap-hana-s4
diff --git a/playbooks/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml b/playbooks/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml
new file mode 100644
index 000000000..285e60488
--- /dev/null
+++ b/playbooks/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml
@@ -0,0 +1,11 @@
+---
+- hosts: all
+ gather_facts: true
+ serial: 1
+ vars:
+ sap_hypervisor_node_platform: redhat_ocp_virt
+
+ tasks:
+ - name: Include Role
+ ansible.builtin.include_role:
+ name: sap_hypervisor_node_preconfigure
diff --git a/playbooks/vars/sample-sap-swpm-inifile-reuse-mode-sample.inifile.params b/playbooks/vars/sample-sap-swpm-inifile-reuse-mode-sample.inifile.params
index 5d3466963..49ce43cba 100644
--- a/playbooks/vars/sample-sap-swpm-inifile-reuse-mode-sample.inifile.params
+++ b/playbooks/vars/sample-sap-swpm-inifile-reuse-mode-sample.inifile.params
@@ -123,7 +123,7 @@ NW_DDIC_Password.needDDICPasswords = false
# Master password
NW_GetMasterPassword.masterPwd = NewPass$321
-# Human readable form of the default login language to be preselected in SAPGUI. This Parameter is potentialy prompted in addition in the screen that also asks for the . It is only prompted in systems that have an ABAP stack. It is prompted for installation but not for system copy. It is asked in those installations, that perform the ABAP load. That could be the database load installation in case of a distributed system szenario, or in case of a standard system installation with all instances on one host. This Parameter is saved in the 'DEFAULT' profile. It is has no influence on language settings in a Java stack. Valid names are stored in a table of subcomponent 'NW_languagesInLoadChecks'. The available languages must be declaired in the 'LANGUAGES_IN_LOAD' parameter of the 'product.xml' file . In this file, the one-character representation of the languages is used. Check the same table in subcomponent 'NW_languagesInLoadChecks'.
+# Human readable form of the default login language to be preselected in SAPGUI. This Parameter is potentially prompted in addition in the screen that also asks for the . It is only prompted in systems that have an ABAP stack. It is prompted for installation but not for system copy. It is asked in those installations, that perform the ABAP load. That could be the database load installation in case of a distributed system scenario, or in case of a standard system installation with all instances on one host. This Parameter is saved in the 'DEFAULT' profile. It is has no influence on language settings in a Java stack. Valid names are stored in a table of subcomponent 'NW_languagesInLoadChecks'. The available languages must be declaired in the 'LANGUAGES_IN_LOAD' parameter of the 'product.xml' file . In this file, the one-character representation of the languages is used. Check the same table in subcomponent 'NW_languagesInLoadChecks'.
# NW_GetSidNoProfiles.SAP_GUI_DEFAULT_LANGUAGE =
# Windows only: The drive to use
diff --git a/playbooks/vars/sample-variables-sap-hypervisor-node-preconfigure-rh_ocp_virt.yml b/playbooks/vars/sample-variables-sap-hypervisor-node-preconfigure-rh_ocp_virt.yml
new file mode 100644
index 000000000..6f7f9af81
--- /dev/null
+++ b/playbooks/vars/sample-variables-sap-hypervisor-node-preconfigure-rh_ocp_virt.yml
@@ -0,0 +1,92 @@
+sap_hypervisor_node_preconfigure_cluster_config:
+
+ # URL under which the OCP cluster is reachable
+ cluster_url: ocpcluster.domain.org
+
+ # namespace under which the VMs are created, note this has to be
+ # openshift-sriov-network-operator in case of using SRIOV network
+ # devices
+ vm_namespace: sap
+
+ # Optional, configuration for trident driver for Netapp NFS filer
+ trident:
+ management: management.domain.org
+ data: datalif.netapp.domain.org
+ svm: sap_svm
+ backend: nas_backend
+ aggregate: aggregate_Name
+ username: admin
+ password: xxxxx
+ storage_driver: ontap-nas
+ storage_prefix: ocpv_sap_
+
+ # CPU cores which will be reserved for kubernetes
+ worker_kubernetes_reserved_cpus: "0,1"
+
+ # Storage device used for host path provisioner as local storage.
+ worker_localstorage_device: /dev/vdb
+
+ # detailed configuration for every worker that should be configured
+ #
+ workers:
+ - name: worker-0 # name must match the node name
+ networks: # Example network config
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
+
+ - bridge: # another bridge
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens2f0 # network IF name
+ description: storage
+ mtu: 9000
+ ipv4:
+ address:
+ - ip: 192.168.1.51 # IP config
+ prefix-length: 24
+ auto-dns: false
+ auto-gateway: false
+ enabled: true
+ name: storagebridge
+ state: up
+ type: linux-bridge
+ - name: multi # another SRIOV device
+ interface: ens2f1 # network IF name
+ type: sriov
+
+ - name: worker-1 # second worker configuration
+ networks: # Example network config
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
diff --git a/playbooks/vars/sample-variables-sap-swpm-advanced-mode-s4hana-onehost-install.yml b/playbooks/vars/sample-variables-sap-swpm-advanced-mode-s4hana-onehost-install.yml
index 74511ff45..966068b08 100644
--- a/playbooks/vars/sample-variables-sap-swpm-advanced-mode-s4hana-onehost-install.yml
+++ b/playbooks/vars/sample-variables-sap-swpm-advanced-mode-s4hana-onehost-install.yml
@@ -10,12 +10,14 @@ sap_install_media_detect_kernel: true
sap_install_media_detect_webdisp: false
sap_install_media_detect_db: "saphana"
+# Manual set critical software paths
+# sap_swpm_sapcar_path: /software/sapcar
+# sap_swpm_swpm_path: /software/sap_swpm
+# sap_swpm_software_path: /software/sap_download_basket
# NOTE: Values in Dictionary Keys for instance numbers must be string using '01' single quote, otherwise SAP SWPM will crash
sap_swpm_ansible_role_mode: "advanced"
-sap_swpm_sapcar_path: /software/sap_downloads
-sap_swpm_swpm_path: /software/sap_downloads
sap_swpm_product_catalog_id: NW_ABAP_OneHost:S4HANA2020.CORE.HDB.ABAP
diff --git a/playbooks/vars/sample-variables-sap-swpm-advanced-templates-mode.yml b/playbooks/vars/sample-variables-sap-swpm-advanced-templates-mode.yml
index 3a78f204b..fab324279 100644
--- a/playbooks/vars/sample-variables-sap-swpm-advanced-templates-mode.yml
+++ b/playbooks/vars/sample-variables-sap-swpm-advanced-templates-mode.yml
@@ -11,9 +11,15 @@ sap_install_media_detect_source: local_dir
#sap_install_media_detect_webdisp: false
#sap_install_media_detect_db: "saphana"
+# Manual set critical software paths
+# sap_swpm_sapcar_path: /software/sapcar
+# sap_swpm_swpm_path: /software/sap_swpm
+# sap_swpm_software_path: /software/sap_download_basket
# NOTE: Values in Dictionary Keys for instance numbers must be string using '01' single quote, otherwise SAP SWPM will crash
+sap_swpm_ansible_role_mode: advanced_templates
+
sap_swpm_templates_install_dictionary:
sap_system_rename:
diff --git a/roles/sap_anydb_install_oracle/.ansible-lint b/roles/sap_anydb_install_oracle/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_anydb_install_oracle/.ansible-lint
+++ b/roles/sap_anydb_install_oracle/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_anydb_install_oracle/tasks/oracledb_installer_minimal.yml b/roles/sap_anydb_install_oracle/tasks/oracledb_installer_minimal.yml
index df2a59a44..2ec77bdf6 100644
--- a/roles/sap_anydb_install_oracle/tasks/oracledb_installer_minimal.yml
+++ b/roles/sap_anydb_install_oracle/tasks/oracledb_installer_minimal.yml
@@ -5,7 +5,7 @@
## DISPLAY env var, not required for when Silent installation (-silent operator for RUNINSTALLER)
## DB_SID env var / -db_sid option. If unset then error "[FATAL] [INS-35072] The SID cannot be left blank."
## ORACLE_BASE env var / -oracle_base option, Oracle Base /oracle. If unset then error "[FATAL] [INS-32013] The Oracle base location is empty." If option -oracle_base unused, may cause "Using new ORACLE_BASE=/oracle/ instead of ORACLE_BASE=/oracle" which will cause directory lookup failure and error "[FATAL] [INS-35072] The SID cannot be left blank."
-## ORACLE_HOME env var / -install_home opiton, Oracle Home - Installation
+## ORACLE_HOME env var / -install_home option, Oracle Home - Installation
## ORACLE_HOME_NAME env var / -oracle_home_name option, Oracle Home - Installation name in Oracle Inventory. Uses a naming convention - see https://docs.oracle.com/cd/E26854_01/em.121/e37799/ch2_manage_oh.htm#OUICG137
## OHRDBMS env var / -runtime_home, Oracle Home - Runtime
## ORA_GOLD_IMAGE_FILE_L env var / -oracle_stage option, Oracle home image file path. If unset then error "(ERROR ) - Error during unzip occurred" where no file is given to extract
diff --git a/roles/sap_anydb_install_oracle/templates/oracledb_rsp_assistants_dbca.j2 b/roles/sap_anydb_install_oracle/templates/oracledb_rsp_assistants_dbca.j2
index e656818e7..108b8e5ae 100644
--- a/roles/sap_anydb_install_oracle/templates/oracledb_rsp_assistants_dbca.j2
+++ b/roles/sap_anydb_install_oracle/templates/oracledb_rsp_assistants_dbca.j2
@@ -546,7 +546,7 @@ initParams=
# Datatype : Boolean
# Description : Specifies whether or not to add the Sample Schemas to your database
# Valid values : TRUE \ FALSE
-# Default value : FASLE
+# Default value : FALSE
# Mandatory : No
#-----------------------------------------------------------------------------
sampleSchema=
diff --git a/roles/sap_anydb_install_oracle/templates/oracledb_rsp_inventory_oracleserver_ee.j2 b/roles/sap_anydb_install_oracle/templates/oracledb_rsp_inventory_oracleserver_ee.j2
index 504164fa0..e51f71bda 100644
--- a/roles/sap_anydb_install_oracle/templates/oracledb_rsp_inventory_oracleserver_ee.j2
+++ b/roles/sap_anydb_install_oracle/templates/oracledb_rsp_inventory_oracleserver_ee.j2
@@ -188,7 +188,7 @@ DEINSTALL_LIST={"oracle.server","19.0.0.0.0"}
#-------------------------------------------------------------------------------
#Name : SHOW_DEINSTALL_CONFIRMATION
#Datatype : Boolean
-#Description: Set to true if deinstall confimation is needed during a deinstall session.
+#Description: Set to true if deinstall confirmation is needed during a deinstall session.
#Example: SHOW_DEINSTALL_CONFIRMATION = true
#-------------------------------------------------------------------------------
SHOW_DEINSTALL_CONFIRMATION=true
diff --git a/roles/sap_general_preconfigure/.ansible-lint b/roles/sap_general_preconfigure/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_general_preconfigure/.ansible-lint
+++ b/roles/sap_general_preconfigure/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_general_preconfigure/README.md b/roles/sap_general_preconfigure/README.md
index 636ba7660..c1e228f53 100644
--- a/roles/sap_general_preconfigure/README.md
+++ b/roles/sap_general_preconfigure/README.md
@@ -14,6 +14,7 @@ make sure that the required collections are installed, for example by using the
To use this role, your system needs to be installed according to:
- RHEL 7: SAP note 2002167, Red Hat Enterprise Linux 7.x: Installation and Upgrade, section "Installing Red Hat Enterprise Linux 7"
- RHEL 8: SAP note 2772999, Red Hat Enterprise Linux 8.x: Installation and Configuration, section "Installing Red Hat Enterprise Linux 8".
+- RHEL 9: SAP note 3108316, Red Hat Enterprise Linux 9.x: Installation and Configuration, section "Installing Red Hat Enterprise Linux 9".
Note
----
@@ -256,7 +257,7 @@ List of SAP directories to be created.
- _Type:_ `bool`
- _Default:_ `true`
-Set to `false` if you do not want to modify the SELinux labels for the SAP directores set
+Set to `false` if you do not want to modify the SELinux labels for the SAP directories set
in variable `sap_general_preconfigure_sap_directories`.
### sap_general_preconfigure_size_of_tmpfs_gb
diff --git a/roles/sap_general_preconfigure/defaults/main.yml b/roles/sap_general_preconfigure/defaults/main.yml
index 9687a15ac..9bda53ad4 100644
--- a/roles/sap_general_preconfigure/defaults/main.yml
+++ b/roles/sap_general_preconfigure/defaults/main.yml
@@ -125,7 +125,7 @@ sap_general_preconfigure_sap_directories:
# List of SAP directories to be created.
sap_general_preconfigure_modify_selinux_labels: true
-# Set to `false` if you do not want to modify the SELinux labels for the SAP directores set
+# Set to `false` if you do not want to modify the SELinux labels for the SAP directories set
# in variable `sap_general_preconfigure_sap_directories`.
sap_general_preconfigure_size_of_tmpfs_gb: "{{ ((0.75 * (ansible_memtotal_mb + ansible_swaptotal_mb)) / 1024) | round | int }}"
@@ -145,13 +145,16 @@ sap_general_preconfigure_kernel_parameters: "{{ __sap_general_preconfigure_kerne
sap_general_preconfigure_max_hostname_length: '13'
# The maximum length of the hostname. See SAP note 611361.
-sap_hostname: "{{ ansible_hostname }}"
+# Reason for noqa: A separate role is planned to replace the code which uses this variable.
+sap_hostname: "{{ ansible_hostname }}" # noqa var-naming[no-role-prefix]
# The hostname to be used for updating or checking `/etc/hosts` entries.
-sap_domain: "{{ ansible_domain }}"
+# Reason for noqa: A separate role is planned to replace the code which uses this variable.
+sap_domain: "{{ ansible_domain }}" # noqa var-naming[no-role-prefix]
# The DNS domain name to be used for updating or checking `/etc/hosts` entries.
-sap_ip: "{{ ansible_default_ipv4.address }}"
+# Reason for noqa: A separate role is planned to replace the code which uses this variable.
+sap_ip: "{{ ansible_default_ipv4.address }}" # noqa var-naming[no-role-prefix]
# The IPV4 address to be used for updating or checking `/etc/hosts` entries.
# sap_general_preconfigure_db_group_name: (not defined by default)
diff --git a/roles/sap_general_preconfigure/handlers/main.yml b/roles/sap_general_preconfigure/handlers/main.yml
index 0c28d1bc7..533d3f49a 100644
--- a/roles/sap_general_preconfigure/handlers/main.yml
+++ b/roles/sap_general_preconfigure/handlers/main.yml
@@ -24,14 +24,17 @@
- not sap_general_preconfigure_fail_if_reboot_required|d(true)
- not sap_general_preconfigure_reboot_ok|d(false)
-# Reason for noqa: We want to avoid non-ansible.builtin modules where possible
-- name: Remount /dev/shm # noqa command-instead-of-module
+# Reasons for noqa:
+# - command-instead-of-module: We want to avoid non-ansible.builtin modules where possible
+# - no-changed-when: Remounting does not do any harm and does not affect idempotency.
+- name: Remount /dev/shm # noqa command-instead-of-module no-changed-when
ansible.builtin.command: mount -o remount /dev/shm
listen: __sap_general_preconfigure_mount_tmpfs_handler
- name: Check if /dev/shm is available
ansible.builtin.command: df -h /dev/shm
register: __sap_general_preconfigure_command_df_shm_result
+ changed_when: false
listen: __sap_general_preconfigure_mount_tmpfs_handler
- name: Show the result of df -h /dev/shm
diff --git a/roles/sap_general_preconfigure/meta/argument_specs.yml b/roles/sap_general_preconfigure/meta/argument_specs.yml
index 2d5993d95..5621e5b28 100644
--- a/roles/sap_general_preconfigure/meta/argument_specs.yml
+++ b/roles/sap_general_preconfigure/meta/argument_specs.yml
@@ -261,7 +261,7 @@ argument_specs:
sap_general_preconfigure_modify_selinux_labels:
default: true
description:
- - Set to `false` if you do not want to modify the SELinux labels for the SAP directores set
+ - Set to `false` if you do not want to modify the SELinux labels for the SAP directories set
- in variable `sap_general_preconfigure_sap_directories`.
required: false
type: bool
diff --git a/roles/sap_general_preconfigure/meta/runtime.yml b/roles/sap_general_preconfigure/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_general_preconfigure/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/assert-installation.yml b/roles/sap_general_preconfigure/tasks/RedHat/assert-installation.yml
index e6edbefd7..a08b3e47a 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/assert-installation.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/assert-installation.yml
@@ -52,7 +52,7 @@
- name: Assert that all required repos are enabled
ansible.builtin.assert:
- that: "'{{ line_item }}' in __sap_general_preconfigure_register_enabled_repos_assert.stdout_lines"
+ that: line_item in __sap_general_preconfigure_register_enabled_repos_assert.stdout_lines
fail_msg: "FAIL: Repository '{{ line_item }}' is not enabled!"
success_msg: "PASS: Repository '{{ line_item }}' is enabled."
with_items:
@@ -73,7 +73,7 @@
- name: Assert that the RHEL release is locked correctly
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_subscription_manager_release_assert.stdout == '{{ ansible_distribution_version }}'"
+ that: __sap_general_preconfigure_register_subscription_manager_release_assert.stdout == ansible_distribution_version
fail_msg: "FAIL: The RHEL release lock status is '{{ __sap_general_preconfigure_register_subscription_manager_release_assert.stdout }}'
but the expected value is '{{ ansible_distribution_version }}'!"
success_msg: "PASS: The RHEL release is correctly locked to '{{ ansible_distribution_version }}'."
@@ -107,7 +107,7 @@
- name: Assert that all required RHEL 7 package groups are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in __sap_general_preconfigure_register_yum_group_assert.stdout_lines"
+ that: line_item in __sap_general_preconfigure_register_yum_group_assert.stdout_lines
fail_msg: "FAIL: Package group '{{ line_item }}' is not installed!"
success_msg: "PASS: Package group '{{ line_item }}' is installed."
loop: "{{ sap_general_preconfigure_packagegroups | map('replace', '@', '') | list }}"
@@ -136,7 +136,7 @@
- name: Assert that all required RHEL 8 environment groups are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in __sap_general_preconfigure_register_yum_envgroup_assert.stdout_lines"
+ that: line_item in __sap_general_preconfigure_register_yum_envgroup_assert.stdout_lines
fail_msg: "FAIL: Environment group '{{ line_item }}' is not installed!"
success_msg: "PASS: Environment group '{{ line_item }}' is installed."
with_items:
@@ -147,7 +147,7 @@
- name: Assert that all required packages are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in ansible_facts.packages"
+ that: line_item in ansible_facts.packages
fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
success_msg: "PASS: Package '{{ line_item }}' is installed."
with_items:
@@ -163,10 +163,11 @@
register: __sap_general_preconfigure_register_required_ppc64le_packages_assert
changed_when: no
when: ansible_architecture == "ppc64le"
+ ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
- name: Assert that all required IBM packages are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in __sap_general_preconfigure_register_required_ppc64le_packages_assert.stdout_lines"
+ that: line_item in __sap_general_preconfigure_register_required_ppc64le_packages_assert.stdout_lines
fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
success_msg: "PASS: Package '{{ line_item }}' is installed."
with_items:
@@ -181,7 +182,8 @@
- sap_general_preconfigure_min_package_check|bool
- __sap_general_preconfigure_min_pkgs | d([])
block:
- - name: Assert - Create a list of minimum required package versions to be installed
+# Reason for noqa: We can safely fail at the last command in the pipeline.
+ - name: Assert - Create a list of minimum required package versions to be installed # noqa risky-shell-pipe
# How does it work?
# 1 - Print the required package name and version with a prefix "1" followed by a space.
# 2 - In the same output sequence, list all installed versions of this package with a prefix "2" followed by a space.
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-dns-name-resolution.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-dns-name-resolution.yml
index 6d83943af..c7df9b1b3 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-dns-name-resolution.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-dns-name-resolution.yml
@@ -37,7 +37,7 @@
- name: Assert that sap_ip is set
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_dig_short_assert.stdout == '{{ sap_ip }}'"
+ that: __sap_general_preconfigure_register_dig_short_assert.stdout == sap_ip
fail_msg: "FAIL: The variable 'sap_ip' is not set!"
success_msg: "PASS: The variable 'sap_ip' is set."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
@@ -53,7 +53,7 @@
- name: Assert that the IP address for sap_hostname is resolved correctly
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_dig_search_short_assert.stdout == '{{ sap_ip }}'"
+ that: __sap_general_preconfigure_register_dig_search_short_assert.stdout == sap_ip
fail_msg: "FAIL: The IP address for 'sap_hostname' could not be resolved!"
success_msg: "PASS: The IP address for 'sap_hostname' was resolved."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(true) }}"
@@ -68,7 +68,7 @@
- name: Assert that the reverse name resolution is correct
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_dig_reverse_assert.stdout == '{{ sap_hostname }}.{{ sap_domain }}.'"
+ that: __sap_general_preconfigure_register_dig_reverse_assert.stdout == (sap_hostname + '.' + sap_domain + '.')
fail_msg: "FAIL: The reverse name resolution of 'sap_ip' was not successful!"
success_msg: "PASS: The reverse name resolution of 'sap_ip' was successful."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(true) }}"
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-etc-hosts.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-etc-hosts.yml
index 1de70c990..48b301f38 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-etc-hosts.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-etc-hosts.yml
@@ -31,7 +31,7 @@
- name: Assert that ipv4 address, FQDN, and hostname are once in /etc/hosts
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_ipv4_fqdn_sap_hostname_once_assert.stdout == '1'"
+ that: __sap_general_preconfigure_register_ipv4_fqdn_sap_hostname_once_assert.stdout == '1'
fail_msg: "FAIL: The line '{{ sap_ip }} {{ sap_hostname }}.{{ sap_domain }} {{ sap_hostname }}' needs to be once in /etc/hosts!"
success_msg: "PASS: The line '{{ sap_ip }} {{ sap_hostname }}.{{ sap_domain }} {{ sap_hostname }}' is once in /etc/hosts."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
@@ -52,7 +52,7 @@
- name: Assert that there is just one line containing {{ sap_ip }} in /etc/hosts
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_sap_ip_once_assert.stdout == '1'"
+ that: __sap_general_preconfigure_register_sap_ip_once_assert.stdout == '1'
fail_msg: "FAIL: There is no line, or more than one line, containing '{{ sap_ip }}' in /etc/hosts!"
success_msg: "PASS: There is only one line containing '{{ sap_ip }}' in /etc/hosts."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
@@ -67,7 +67,7 @@
- name: Assert that there is just one line containing {{ sap_hostname }}.{{ sap_domain }} in /etc/hosts
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_fqdn_once_assert.stdout == '1'"
+ that: __sap_general_preconfigure_register_fqdn_once_assert.stdout == '1'
fail_msg: "FAIL: There is no line, or more than one line, containing '{{ sap_hostname }}.{{ sap_domain }}' in /etc/hosts!"
success_msg: "PASS: There is only one line containing '{{ sap_hostname }}.{{ sap_domain }}' in /etc/hosts."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
@@ -82,7 +82,7 @@
- name: Assert that there is just one line containing {{ sap_hostname }} in /etc/hosts
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_sap_hostname_once_assert.stdout == '1'"
+ that: __sap_general_preconfigure_register_sap_hostname_once_assert.stdout == '1'
fail_msg: "FAIL: There is no line, or more than one line, containing '{{ sap_hostname }}' in /etc/hosts!"
success_msg: "PASS: There is only one line containing '{{ sap_hostname }}' in /etc/hosts."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-hostname.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-hostname.yml
index 64957c5dc..0ee9b8972 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-hostname.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-hostname.yml
@@ -8,14 +8,14 @@
- name: Assert that the output of hostname matches the content of variable sap_hostname
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_hostname_assert.stdout == '{{ sap_hostname }}'"
+ that: __sap_general_preconfigure_register_hostname_assert.stdout == sap_hostname
fail_msg: "FAIL: The output of 'hostname' does not match the content of variable 'sap_hostname'!"
success_msg: "PASS: The output of 'hostname' matches the content of variable 'sap_hostname'."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
- name: "Assert that the length of the hostname is not longer than 'sap_general_preconfigure_max_hostname_length'"
ansible.builtin.assert:
- that: "{{ sap_hostname | length | int }} <= {{ sap_general_preconfigure_max_hostname_length | int }}"
+ that: (sap_hostname | length | int) <= (sap_general_preconfigure_max_hostname_length | int)
fail_msg: "FAIL: The length of the hostname is {{ sap_hostname | length | int }} but must be less or equal to {{ sap_general_preconfigure_max_hostname_length }} (variable 'sap_general_preconfigure_max_hostname_length')!"
success_msg: "PASS: The length of the hostname is {{ sap_hostname | length | int }}, which is less or equal to {{ sap_general_preconfigure_max_hostname_length }} (variable 'sap_general_preconfigure_max_hostname_length')."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-kernel-parameters-loop-block.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-kernel-parameters-loop-block.yml
index d3b90e0ae..c87db29b0 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-kernel-parameters-loop-block.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-kernel-parameters-loop-block.yml
@@ -11,7 +11,7 @@
- name: Assert that {{ line_item.name }} is set correctly in {{ sap_general_preconfigure_etc_sysctl_sap_conf }}
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_sysctl_sap_conf_kernel_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_general_preconfigure_register_sysctl_sap_conf_kernel_parameter_assert.stdout == line_item.value
fail_msg: "FAIL: The value of '{{ line_item.name }}' in '{{ sap_general_preconfigure_etc_sysctl_sap_conf }}' is
'{{ __sap_general_preconfigure_register_sysctl_sap_conf_kernel_parameter_assert.stdout }}' but the expected value is '{{ line_item.value }}'!"
success_msg: "PASS: The value of '{{ line_item.name }}' in '{{ sap_general_preconfigure_etc_sysctl_sap_conf }}' is
@@ -27,7 +27,7 @@
- name: Assert that {{ line_item.name }} is set correctly as per sysctl
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_sysctl_kernel_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_general_preconfigure_register_sysctl_kernel_parameter_assert.stdout == line_item.value
fail_msg: "FAIL: The current value of '{{ line_item.name }}' as per sysctl is
'{{ __sap_general_preconfigure_register_sysctl_kernel_parameter_assert.stdout }}' but the expected value is '{{ line_item.value }}'!"
success_msg: "PASS: The current value of '{{ line_item.name }}' as per sysctl is
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nofile-limits.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nofile-limits.yml
index c915bd3e4..6d549b96d 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nofile-limits.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nofile-limits.yml
@@ -11,7 +11,7 @@
- name: Assert that the hard limit of nofile for group sapsys is 1048576
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_nofile_hard_assert.stdout == '1048576'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_nofile_hard_assert.stdout == '1048576'
fail_msg: "FAIL: The hard limit of nofile for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
'{{ __sap_general_preconfigure_register_limits_sap_conf_nofile_hard_assert.stdout }}' but the expected value is 1048576 !"
success_msg: "PASS: The hard limit of nofile for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
@@ -32,7 +32,7 @@
- name: Assert that the soft limit of nofile for group sapsys is 1048576
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_nofile_soft_assert.stdout == '1048576'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_nofile_soft_assert.stdout == '1048576'
fail_msg: "FAIL: The soft limit of nofile for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
'{{ __sap_general_preconfigure_register_limits_sap_conf_nofile_soft_assert.stdout }}' but the expected value is 1048576 !"
success_msg: "PASS: The soft limit of nofile for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
@@ -54,7 +54,7 @@
- name: Assert that the hard limit of nofile for the database group is 1048576
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_db_group_nofile_hard_assert.stdout == '1048576'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_db_group_nofile_hard_assert.stdout == '1048576'
fail_msg: "FAIL: The hard limit of nofile for group '{{ sap_general_preconfigure_db_group_name }}' is not set to '1048576' in /etc/security/limits.d/99-sap.conf!"
success_msg: "PASS: The hard limit of nofile for group '{{ sap_general_preconfigure_db_group_name }}' is set to '1048576' in /etc/security/limits.d/99-sap.conf."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
@@ -75,7 +75,7 @@
- name: Assert that the soft limit of nofile for the database group is 1048576
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_db_group_nofile_soft_assert.stdout == '1048576'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_db_group_nofile_soft_assert.stdout == '1048576'
fail_msg: "FAIL: The soft limit of nofile for group '{{ sap_general_preconfigure_db_group_name }}' is not set to '1048576' in /etc/security/limits.d/99-sap.conf!"
success_msg: "PASS: The soft limit of nofile for group '{{ sap_general_preconfigure_db_group_name }}' is set to '1048576' in /etc/security/limits.d/99-sap.conf."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nproc-limits.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nproc-limits.yml
index 9edf3e320..0a24bcd15 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nproc-limits.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-nproc-limits.yml
@@ -11,7 +11,7 @@
- name: Assert that the hard limit of nproc for group sapsys is unlimited
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_nproc_hard_assert.stdout == 'unlimited'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_nproc_hard_assert.stdout == 'unlimited'
fail_msg: "FAIL: The hard limit of nproc for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
'{{ __sap_general_preconfigure_register_limits_sap_conf_nproc_hard_assert.stdout }}' but the expected value is 'unlimited'!"
success_msg: "PASS: The hard limit of nproc for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
@@ -32,7 +32,7 @@
- name: Assert that the soft limit of nproc for group sapsys is unlimited
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_nproc_soft_assert.stdout == 'unlimited'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_nproc_soft_assert.stdout == 'unlimited'
fail_msg: "FAIL: The soft limit of nproc for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
'{{ __sap_general_preconfigure_register_limits_sap_conf_nproc_hard_assert.stdout }}' but the expected value is 'unlimited'!"
success_msg: "PASS: The soft limit of nproc for group 'sapsys' in /etc/security/limits.d/99-sap.conf is
@@ -54,7 +54,7 @@
- name: Assert that the hard limit of nproc for the database group is unlimited
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_db_group_nproc_hard_assert.stdout == 'unlimited'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_db_group_nproc_hard_assert.stdout == 'unlimited'
fail_msg: "FAIL: The hard limit of nproc for group '{{ sap_general_preconfigure_db_group_name }}' in /etc/security/limits.d/99-sap.conf is
'{{ __sap_general_preconfigure_register_limits_sap_conf_db_group_nproc_hard_assert.stdout }}' but the expected value is 'unlimited'!"
success_msg: "PASS: The hard limit of nproc for group '{{ sap_general_preconfigure_db_group_name }}' in /etc/security/limits.d/99-sap.conf is
@@ -77,7 +77,7 @@
- name: Assert that the soft limit of nproc for the database group is unlimited
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_limits_sap_conf_db_group_nproc_soft_assert.stdout == 'unlimited'"
+ that: __sap_general_preconfigure_register_limits_sap_conf_db_group_nproc_soft_assert.stdout == 'unlimited'
fail_msg: "FAIL: The soft limit of nproc for group '{{ sap_general_preconfigure_db_group_name }}' in /etc/security/limits.d/99-sap.conf is
'{{ __sap_general_preconfigure_register_limits_sap_conf_db_group_nproc_soft_assert.stdout }}' but the expected value is 'unlimited'!"
success_msg: "PASS: The soft limit of nproc for group '{{ sap_general_preconfigure_db_group_name }}' in /etc/security/limits.d/99-sap.conf is
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-selinux.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-selinux.yml
index 7a41cf16a..bf9cf759a 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-selinux.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-selinux.yml
@@ -34,7 +34,7 @@
- name: "Assert that the permanent configuration of the SELinux state is set to '{{ sap_general_preconfigure_selinux_state }}'"
ansible.builtin.assert:
- that: __sap_general_preconfigure_register_selinux_conf_assert.stdout == "{{ sap_general_preconfigure_selinux_state }}"
+ that: __sap_general_preconfigure_register_selinux_conf_assert.stdout == sap_general_preconfigure_selinux_state
fail_msg: "FAIL: The system is not configured for the SELinux state of '{{ sap_general_preconfigure_selinux_state }}'.
Current configuration: '{{ __sap_general_preconfigure_register_selinux_conf_assert.stdout }}'."
success_msg: "PASS: The system is configured for the SELinux state of '{{ sap_general_preconfigure_selinux_state }}'"
@@ -48,7 +48,7 @@
- name: Assert that the SELinux state is set correctly
ansible.builtin.assert:
- that: __sap_general_preconfigure_register_getenforce_assert.stdout | lower == "{{ sap_general_preconfigure_selinux_state }}"
+ that: (__sap_general_preconfigure_register_getenforce_assert.stdout | lower) == sap_general_preconfigure_selinux_state
fail_msg: "FAIL: SELinux is currently not '{{ sap_general_preconfigure_selinux_state }}'!
The current SELinux state is: '{{ __sap_general_preconfigure_register_getenforce_assert.stdout | lower }}'."
success_msg: "PASS: SELinux is currently {{ sap_general_preconfigure_selinux_state }}."
@@ -89,7 +89,7 @@
- name: Assert that there is an entry for '/usr/sap' in the SELinux configuration database
ansible.builtin.assert:
- that: __sap_general_preconfigure_register_semanage_fcontext_usr_sap.stdout | int != 0
+ that: (__sap_general_preconfigure_register_semanage_fcontext_usr_sap.stdout | int) != 0
fail_msg: "FAIL: There is no entry for '/usr/sap' in the SELinux configuration database!"
success_msg: "PASS: There is an entry for '/usr/sap' in the SELinux configuration database."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
@@ -107,7 +107,7 @@
- name: Assert that all files in '/usr/sap' and below have the 'usr_t' file context
ansible.builtin.assert:
- that: __sap_general_preconfigure_register_ls_z_usr_sap.stdout | int == 0
+ that: (__sap_general_preconfigure_register_ls_z_usr_sap.stdout | int) == 0
fail_msg: "FAIL: There is at least one file in '/usr/sap' or below without the 'usr_t' file context!"
success_msg: "PASS: All files in '/usr/sap' and below have the 'usr_t' file context."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-tmpfs.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-tmpfs.yml
index 748e31be9..81e5e175c 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-tmpfs.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-tmpfs.yml
@@ -8,14 +8,14 @@
- name: Assert that there is an entry for tmpfs in /etc/fstab
ansible.builtin.assert:
- that: __sap_general_preconfigure_register_fstab_tmpfs_size_gb_assert.stdout | length > 0
+ that: (__sap_general_preconfigure_register_fstab_tmpfs_size_gb_assert.stdout | length) > 0
fail_msg: "FAIL: There is no entry for 'tmpfs' in /etc/fstab!"
success_msg: "PASS: An entry for 'tmpfs' in /etc/fstab exists."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
- name: Assert that the size of tmpfs is large enough as per /etc/fstab
ansible.builtin.assert:
- that: "'{{ sap_general_preconfigure_size_of_tmpfs_gb }}G' in __sap_general_preconfigure_register_fstab_tmpfs_size_gb_assert.stdout"
+ that: (sap_general_preconfigure_size_of_tmpfs_gb + 'G') in __sap_general_preconfigure_register_fstab_tmpfs_size_gb_assert.stdout
fail_msg: "FAIL: The size of tmpfs in /etc/fstab is '{{ __sap_general_preconfigure_register_fstab_tmpfs_size_gb_assert.stdout }}'
but the expected size is '{{ sap_general_preconfigure_size_of_tmpfs_gb }}G'!"
success_msg: "PASS: The size of tmpfs in /etc/fstab is '{{ __sap_general_preconfigure_register_fstab_tmpfs_size_gb_assert.stdout }}'."
@@ -30,7 +30,7 @@
- name: Assert that the current size of tmpfs is large enough as per df output
ansible.builtin.assert:
- that: "__sap_general_preconfigure_register_df_shm_assert.stdout == '{{ sap_general_preconfigure_size_of_tmpfs_gb }}G'"
+ that: __sap_general_preconfigure_register_df_shm_assert.stdout == (sap_general_preconfigure_size_of_tmpfs_gb + 'G')
fail_msg: "FAIL: The current size of tmpfs is '{{ __sap_general_preconfigure_register_df_shm_assert.stdout }}'
but the expected size is '{{ sap_general_preconfigure_size_of_tmpfs_gb }}G'!"
success_msg: "PASS: The current size of tmpfs is '{{ __sap_general_preconfigure_register_df_shm_assert.stdout }}'."
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-uuidd.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-uuidd.yml
index 0951483f9..cd78d9b42 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-uuidd.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/assert-uuidd.yml
@@ -35,8 +35,8 @@
- name: Assert that uuidd.socket is active
ansible.builtin.assert:
- that: "'active (running)' in __sap_general_preconfigure_register_uuidd_socket_status_assert.stdout or 'active (listening)'
- in __sap_general_preconfigure_register_uuidd_socket_status_assert.stdout"
+ that: "'active (running)' in __sap_general_preconfigure_register_uuidd_socket_status_assert.stdout or
+ 'active (listening)' in __sap_general_preconfigure_register_uuidd_socket_status_assert.stdout"
fail_msg: "FAIL: Service 'uuidd.socket' is not active!"
success_msg: "PASS: Service 'uuidd.socket' is active."
ignore_errors: "{{ sap_general_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/check-dns-name-resolution.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/check-dns-name-resolution.yml
index 994c3998b..42ca53351 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/check-dns-name-resolution.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/check-dns-name-resolution.yml
@@ -1,19 +1,5 @@
---
-# moved to configure-etc-hosts.yml:
-#- name: Verify that the DNS domain is set
-# assert:
-# that:
-# not( (sap_domain is undefined) or (sap_domain is none) or (sap_domain | trim == '') )
-# msg: "You need to define the variable 'sap_domain' in defaults/main.yml."
-
-### DNS is not that necessary and as such the errors are ignored
-### for production it is strongly recommended to have proper DNS setup
-# - name: Ensure dig command is installed
-# package:
-# name: bind-utils
-# state: present
-
- name: Check dns forwarding settings
ansible.builtin.shell: test "$(dig {{ sap_hostname }}.{{ sap_domain }} +short)" = "{{ sap_ip }}"
changed_when: false
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-selinux.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-selinux.yml
index 19a406c90..09dc818d7 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-selinux.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-selinux.yml
@@ -39,6 +39,7 @@
- name: SELinux - Call Reboot handler if necessary
ansible.builtin.command: /bin/true
notify: __sap_general_preconfigure_reboot_handler
+ changed_when: false
when: __sap_general_preconfigure_fact_selinux_mode != sap_general_preconfigure_selinux_state
- name: Set or unset SELinux kernel parameter, RHEL 8 and RHEL 9
@@ -50,7 +51,7 @@
block:
- name: SELinux - Examine grub entries
- ansible.builtin.shell: grubby --info=ALL | awk 'BEGIN{a=0;b=0}/^args/{a++}/selinux=0/{b++}END{print a, b}'
+ ansible.builtin.shell: set -o pipefail && grubby --info=ALL | awk 'BEGIN{a=0;b=0}/^args/{a++}/selinux=0/{b++}END{print a, b}'
register: __sap_general_preconfigure_register_grubby_info_all_selinux
check_mode: no
changed_when: false
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-tmpfs.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-tmpfs.yml
index b18eec512..ebda944a4 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-tmpfs.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/configure-tmpfs.yml
@@ -20,4 +20,5 @@
- name: Trigger remounting if /dev/shm has not the expected size
ansible.builtin.command: /bin/true
notify: __sap_general_preconfigure_mount_tmpfs_handler
+ changed_when: false
when: __sap_general_preconfigure_register_df_shm.stdout != sap_general_preconfigure_size_of_tmpfs_gb
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nofile-limits.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nofile-limits.yml
index a96d6a84d..43788f3e5 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nofile-limits.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nofile-limits.yml
@@ -2,7 +2,7 @@
# Reasons for noqa: 1. Tabs can increase readability;
# 2. The example in man limits.conf is tab formatted;
-# 3. It is difficult to replace tabs by spaces for entries for which their lenghts are not known
+# 3. It is difficult to replace tabs by spaces for entries for which their lengths are not known
- name: Set the hard and soft limit for the max number of open files per process (nofile) to 1048576
for group 'sapsys' # noqa no-tabs
ansible.builtin.lineinfile:
@@ -19,7 +19,7 @@
# Reasons for noqa: 1. Tabs can increase readability;
# 2. The example in man limits.conf is tab formatted;
-# 3. It is difficult to replace tabs by spaces for entries for which their lenghts are not known
+# 3. It is difficult to replace tabs by spaces for entries for which their lengths are not known
- name: Set the hard and soft limit for the max number of open files per process (nofile) to 1048576
for group '{{ sap_general_preconfigure_db_group_name }}' # noqa no-tabs
ansible.builtin.lineinfile:
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nproc-limits.yml b/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nproc-limits.yml
index a9b25a495..81537d6ce 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nproc-limits.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/generic/increase-nproc-limits.yml
@@ -2,7 +2,7 @@
# Reasons for noqa: 1. Tabs can increase readability;
# 2. The example in man limits.conf is tab formatted;
-# 3. It is difficult to replace tabs by spaces for entries for which their lenghts are not known
+# 3. It is difficult to replace tabs by spaces for entries for which their lengths are not known
- name: Set the hard and soft limit for the max number of processes per user (nproc) to unlimited
for group 'sapsys' # noqa no-tabs
ansible.builtin.lineinfile:
@@ -19,7 +19,7 @@
# Reasons for noqa: 1. Tabs can increase readability;
# 2. The example in man limits.conf is tab formatted;
-# 3. It is difficult to replace tabs by spaces for entries for which their lenghts are not known
+# 3. It is difficult to replace tabs by spaces for entries for which their lengths are not known
- name: Set the hard and soft limit for the max number of processes per user (nproc) to unlimited
for group '{{ sap_general_preconfigure_db_group_name }}' # noqa no-tabs
ansible.builtin.lineinfile:
diff --git a/roles/sap_general_preconfigure/tasks/RedHat/installation.yml b/roles/sap_general_preconfigure/tasks/RedHat/installation.yml
index 655cd4e8b..b86af1c69 100644
--- a/roles/sap_general_preconfigure/tasks/RedHat/installation.yml
+++ b/roles/sap_general_preconfigure/tasks/RedHat/installation.yml
@@ -97,7 +97,8 @@
- sap_general_preconfigure_set_minor_release
- __sap_general_preconfigure_register_subscription_manager_release.stdout == ansible_distribution_version
-- name: Set the minor RHEL release
+# Reason for noqa: Finding out if the minor release has already been set would require one more task.
+- name: Set the minor RHEL release # noqa no-changed-when
ansible.builtin.command: subscription-manager release --set="{{ ansible_distribution_version }}"
when:
- sap_general_preconfigure_set_minor_release
@@ -113,7 +114,8 @@
# Because the installation of an environment or package group is not guaranteed to avoid package updates,
# and because of bug 2011426 (for which the fix is not available in the RHEL 8.1 ISO image), a RHEL 8.1
# system might not boot after installing environment group Server.
-- name: Ensure that the required package groups are installed, RHEL 8 and RHEL 9 # noqa command-instead-of-module
+# Reason for noqa: Finding out if packages already are installed would require one more task.
+- name: Ensure that the required package groups are installed, RHEL 8 and RHEL 9 # noqa command-instead-of-module no-changed-when
ansible.builtin.command: "yum install {{ sap_general_preconfigure_packagegroups | join(' ') }} --nobest --exclude=kernel* -y"
register: __sap_general_preconfigure_register_yum_group_install
when: ansible_distribution_major_version == '8' or ansible_distribution_major_version == '9'
@@ -144,6 +146,7 @@
- name: Accept the license for the IBM Service and Productivity Tools
ansible.builtin.shell: LESS=+q /opt/ibm/lop/configure <<<'y'
+ changed_when: true
when:
- ansible_architecture == "ppc64le"
- sap_general_preconfigure_install_ibm_power_tools | d(true)
@@ -164,7 +167,8 @@
- __sap_general_preconfigure_min_pkgs | d([])
block:
- - name: Create a list of minimum required package versions to be installed
+# Reason for noqa: We can safely fail at the last command in the pipeline.
+ - name: Create a list of minimum required package versions to be installed # noqa risky-shell-pipe
# How does it work?
# 1 - Print the required package name and version with a prefix "1" followed by a space.
# 2 - In the same output sequence, list all installed versions of this package with a prefix "2" followed by a space.
@@ -275,4 +279,5 @@
- name: Call Reboot handler if necessary
ansible.builtin.command: /bin/true
notify: __sap_general_preconfigure_reboot_handler
+ changed_when: false
when: __sap_general_preconfigure_register_needs_restarting is failed
diff --git a/roles/sap_general_preconfigure/tasks/SLES/assert-installation.yml b/roles/sap_general_preconfigure/tasks/SLES/assert-installation.yml
index 941ce6625..4b5e489e2 100644
--- a/roles/sap_general_preconfigure/tasks/SLES/assert-installation.yml
+++ b/roles/sap_general_preconfigure/tasks/SLES/assert-installation.yml
@@ -2,7 +2,7 @@
- name: Assert that all required packages are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in ansible_facts.packages"
+ that: line_item in ansible_facts.packages
fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
success_msg: "PASS: Package '{{ line_item }}' is installed."
with_items:
@@ -17,7 +17,8 @@
- __sap_general_preconfigure_min_pkgs | d([])
block:
- - name: Assert - Create a list of minimum required package versions to be installed
+# Reason for noqa: We can safely fail at the last command in the pipeline.
+ - name: Assert - Create a list of minimum required package versions to be installed # noqa risky-shell-pipe
# How does it work?
# 1 - Print the required package name and version with a prefix "1" followed by a space.
# 2 - In the same output sequence, list all installed versions of this package with a prefix "2" followed by a space.
diff --git a/roles/sap_general_preconfigure/tasks/SLES/installation.yml b/roles/sap_general_preconfigure/tasks/SLES/installation.yml
index 305da9953..5f78c2e5d 100644
--- a/roles/sap_general_preconfigure/tasks/SLES/installation.yml
+++ b/roles/sap_general_preconfigure/tasks/SLES/installation.yml
@@ -11,7 +11,8 @@
- __sap_general_preconfigure_min_pkgs|d([])
block:
- - name: Create a list of minimum required package versions to be installed
+# Reason for noqa: We can safely fail at the last command in the pipeline.
+ - name: Create a list of minimum required package versions to be installed # noqa risky-shell-pipe
# How does it work?
# 1 - Print the required package name and version with a prefix "1" followed by a space.
# 2 - In the same output sequence, list all installed versions of this package with a prefix "2" followed by a space.
@@ -98,4 +99,5 @@
- name: Call Reboot handler if necessary
ansible.builtin.command: /bin/true
notify: __sap_general_preconfigure_reboot_handler
+ changed_when: false
when: __sap_general_preconfigure_register_needs_restarting is failed
diff --git a/roles/sap_general_preconfigure/tasks/sapnote/1771258.yml b/roles/sap_general_preconfigure/tasks/sapnote/1771258.yml
index 6d6a290b9..da081396e 100644
--- a/roles/sap_general_preconfigure/tasks/sapnote/1771258.yml
+++ b/roles/sap_general_preconfigure/tasks/sapnote/1771258.yml
@@ -4,7 +4,7 @@
- name: Configure - Display SAP note number 1771258 and its version
ansible.builtin.debug:
msg: "SAP note {{ (__sap_general_preconfigure_sapnotes_versions | selectattr('number', 'match', '^1771258$') | first).number }}
- (version {{ (__sap_general_preconfigure_sapnotes_versions | selectattr('number', 'match', '^1771258$') | first).version }}): User and system resouce limits"
+ (version {{ (__sap_general_preconfigure_sapnotes_versions | selectattr('number', 'match', '^1771258$') | first).version }}): User and system resource limits"
- name: Import tasks from '../RedHat/generic/increase-nofile-limits.yml'
ansible.builtin.import_tasks: ../RedHat/generic/increase-nofile-limits.yml
diff --git a/roles/sap_general_preconfigure/tasks/sapnote/assert-1771258.yml b/roles/sap_general_preconfigure/tasks/sapnote/assert-1771258.yml
index a5b82dc82..ac06c3a6e 100644
--- a/roles/sap_general_preconfigure/tasks/sapnote/assert-1771258.yml
+++ b/roles/sap_general_preconfigure/tasks/sapnote/assert-1771258.yml
@@ -4,7 +4,7 @@
- name: Assert - Display SAP note number 1771258 and its version
ansible.builtin.debug:
msg: "SAP note {{ (__sap_general_preconfigure_sapnotes_versions | selectattr('number', 'match', '^1771258$') | first).number }}
- (version {{ (__sap_general_preconfigure_sapnotes_versions | selectattr('number', 'match', '^1771258$') | first).version }}): User and system resouce limits"
+ (version {{ (__sap_general_preconfigure_sapnotes_versions | selectattr('number', 'match', '^1771258$') | first).version }}): User and system resource limits"
- name: Import tasks from '../RedHat/generic/assert-nofile-limits.yml'
ansible.builtin.import_tasks: ../RedHat/generic/assert-nofile-limits.yml
diff --git a/roles/sap_ha_install_hana_hsr/.ansible-lint b/roles/sap_ha_install_hana_hsr/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_ha_install_hana_hsr/.ansible-lint
+++ b/roles/sap_ha_install_hana_hsr/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_ha_install_hana_hsr/defaults/main.yml b/roles/sap_ha_install_hana_hsr/defaults/main.yml
index bc3f8ca4c..5aed4c99f 100644
--- a/roles/sap_ha_install_hana_hsr/defaults/main.yml
+++ b/roles/sap_ha_install_hana_hsr/defaults/main.yml
@@ -9,4 +9,4 @@ sap_ha_install_hana_hsr_fqdn: "{{ sap_domain }}"
sap_ha_install_hana_hsr_rep_mode: sync
sap_ha_install_hana_hsr_oper_mode: logreplay
-sap_ha_install_hana_hsr_update_etchosts: yes
+sap_ha_install_hana_hsr_update_etchosts: true
diff --git a/roles/sap_ha_install_hana_hsr/meta/runtime.yml b/roles/sap_ha_install_hana_hsr/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_ha_install_hana_hsr/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_ha_install_hana_hsr/tasks/configure_hsr.yml b/roles/sap_ha_install_hana_hsr/tasks/configure_hsr.yml
index 688d93653..04c108471 100644
--- a/roles/sap_ha_install_hana_hsr/tasks/configure_hsr.yml
+++ b/roles/sap_ha_install_hana_hsr/tasks/configure_hsr.yml
@@ -49,6 +49,7 @@
loop: "{{ sap_ha_install_hana_hsr_cluster_nodes }}"
loop_control:
label: "{{ item.node_name }}"
+ changed_when: true
- name: "SAP HSR - Start HANA instance on secondary"
ansible.builtin.shell: |
diff --git a/roles/sap_ha_install_hana_hsr/tasks/hdbuserstore.yml b/roles/sap_ha_install_hana_hsr/tasks/hdbuserstore.yml
index 696eaffdf..290a8689c 100644
--- a/roles/sap_ha_install_hana_hsr/tasks/hdbuserstore.yml
+++ b/roles/sap_ha_install_hana_hsr/tasks/hdbuserstore.yml
@@ -2,6 +2,7 @@
# ansible-lint:
# become_user string is deduced from a variable + suffix with no spaces
- name: "SAP HSR - Check if hdbuserstore exists"
+ become: true
become_user: "{{ sap_ha_install_hana_hsr_sid | lower }}adm"
ansible.builtin.command: |
/usr/sap/{{ sap_ha_install_hana_hsr_sid }}/SYS/exe/hdb/hdbuserstore \
@@ -15,6 +16,7 @@
# ansible-lint:
# become_user string is deduced from a variable + suffix with no spaces
- name: "SAP HSR - Create and Store Connection Info in hdbuserstore"
+ become: true
become_user: "{{ sap_ha_install_hana_hsr_sid | lower }}adm"
ansible.builtin.command: |
/usr/sap/{{ sap_ha_install_hana_hsr_sid }}/SYS/exe/hdb/hdbuserstore \
@@ -22,3 +24,4 @@
{{ ansible_hostname }}:3{{ sap_ha_install_hana_hsr_instance_number }}13 \
SYSTEM '{{ sap_ha_install_hana_hsr_db_system_password }}'
when: sap_ha_install_hana_hsr_hdbuserstore.rc != '0'
+ changed_when: true
diff --git a/roles/sap_ha_install_hana_hsr/tasks/log_mode.yml b/roles/sap_ha_install_hana_hsr/tasks/log_mode.yml
index 0b007b2a2..952abfa69 100644
--- a/roles/sap_ha_install_hana_hsr/tasks/log_mode.yml
+++ b/roles/sap_ha_install_hana_hsr/tasks/log_mode.yml
@@ -24,3 +24,4 @@
EOF
ignore_errors: true
when: sap_ha_install_hana_hsr_log_mode.rc != '0'
+ changed_when: true
diff --git a/roles/sap_ha_install_hana_hsr/tasks/main.yml b/roles/sap_ha_install_hana_hsr/tasks/main.yml
index 7d33bf2bf..8af8c44ea 100644
--- a/roles/sap_ha_install_hana_hsr/tasks/main.yml
+++ b/roles/sap_ha_install_hana_hsr/tasks/main.yml
@@ -31,8 +31,8 @@
that:
- __sap_ha_install_hana_hsr_connection.node_ip is defined
- __sap_ha_install_hana_hsr_connection.node_ip != ""
- fail_msg: "The IP adress configured for HSR does not exist on this host"
- success_msg: "The IP adress for HSR is configured on this host"
+ fail_msg: "The IP address configured for HSR does not exist on this host"
+ success_msg: "The IP address for HSR is configured on this host"
tags: always
- name: "SAP HSR - Pick up primary node name from definition"
diff --git a/roles/sap_ha_install_hana_hsr/tasks/pki_files.yml b/roles/sap_ha_install_hana_hsr/tasks/pki_files.yml
index 3031fda73..f93be1c3c 100644
--- a/roles/sap_ha_install_hana_hsr/tasks/pki_files.yml
+++ b/roles/sap_ha_install_hana_hsr/tasks/pki_files.yml
@@ -5,6 +5,8 @@
# control node.
- name: "SAP HSR - Direct connection handling to primary"
+ become: true
+ become_user: "{{ sap_ha_install_hana_hsr_sid | lower }}adm"
block:
- name: "SAP HSR - Create .ssh if missing"
@@ -35,8 +37,8 @@
- name: "SAP HSR - Authorize pub key on primary node"
ansible.builtin.lineinfile:
- backup: yes
- create: yes
+ backup: true
+ create: true
line: "{{ __sap_ha_install_hana_hsr_pubkey.stdout }}"
mode: "0600"
path: ~/.ssh/authorized_keys
@@ -123,6 +125,3 @@
- __sap_ha_install_hana_hsr_create_ssh_prim.changed is defined
- __sap_ha_install_hana_hsr_create_ssh_prim.changed
delegate_to: "{{ __sap_ha_install_hana_hsr_primary_node }}"
-
- become: true
- become_user: "{{ sap_ha_install_hana_hsr_sid | lower }}adm"
diff --git a/roles/sap_ha_install_hana_hsr/tasks/update_etchosts.yml b/roles/sap_ha_install_hana_hsr/tasks/update_etchosts.yml
index bcb2c5db0..bad7b39da 100644
--- a/roles/sap_ha_install_hana_hsr/tasks/update_etchosts.yml
+++ b/roles/sap_ha_install_hana_hsr/tasks/update_etchosts.yml
@@ -1,7 +1,9 @@
---
- name: "SAP HSR - Check /etc/hosts for conflicting entries"
ansible.builtin.shell: |
- awk '(/{{ "( |\t)" + item.node_name + "($| |\t)" }}/ && !/^{{ item.node_ip + "( |\t)" }}/) || (/^{{ item.node_ip + "( |\t)" }}/ && !/{{ "( |\t)" + item.node_name + "($| |\t)" }}/)' /etc/hosts
+ awk '(/{{ "( |\t)" + item.node_name + "($| |\t)" }}/ && !/^{{ item.node_ip + "( |\t)" }}/) \
+ || (/^{{ item.node_ip + "( |\t)" }}/ \
+ && !/{{ "( |\t)" + item.node_name + "($| |\t)" }}/)' /etc/hosts
register: etchosts_conflict
changed_when: false
failed_when: etchosts_conflict.stdout != ''
@@ -17,7 +19,7 @@
create: true
mode: '0644'
state: present
- backup: yes
+ backup: true
line: "{{ item.node_ip }}\t{{ item.node_name.split('.')[0] }}.{{ item.node_name.split('.')[1:] | join('.') or sap_ha_install_hana_hsr_fqdn }}\t{{ item.node_name.split('.')[0] }}"
regexp: (?i)^\s*{{ item.node_ip }}\s+{{ item.node_name.split('.')[0] }}
loop: "{{ sap_ha_install_hana_hsr_cluster_nodes }}"
diff --git a/roles/sap_ha_pacemaker_cluster/.ansible-lint b/roles/sap_ha_pacemaker_cluster/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_ha_pacemaker_cluster/.ansible-lint
+++ b/roles/sap_ha_pacemaker_cluster/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_ha_pacemaker_cluster/README.md b/roles/sap_ha_pacemaker_cluster/README.md
index 9a65edec6..30f194cf1 100644
--- a/roles/sap_ha_pacemaker_cluster/README.md
+++ b/roles/sap_ha_pacemaker_cluster/README.md
@@ -84,7 +84,7 @@ In addition, the following network ports must be available:
| **SAP Technical Application and Component** | **Port** |
| --- | --- |
-| **_SAP HANA Sytem Replication_** | |
+| **_SAP HANA System Replication_** | |
| hdbnameserver
used for log and data shipping from a primary site to a secondary site.
System DB port number plus 10,000 | 4``01 |
| hdbnameserver
unencrypted metadata communication between sites.
System DB port number plus 10,000 | 4``02 |
| hdbnameserver
used for encrypted metadata communication between sites.
System DB port number plus 10,000 | 4``06 |
@@ -95,7 +95,7 @@ In addition, the following network ports must be available:
| **_Linux Pacemaker_** | |
| pcsd
cluster nodes requirement for node-to-node communication | 2224 (TCP)|
| pacemaker
cluster nodes requirement for Pacemaker Remote service daemon | 3121 (TCP) |
-| corosync
cluster nodes requirement for node-to-node communcation | 5404-5412 (UDP) |
+| corosync
cluster nodes requirement for node-to-node communication | 5404-5412 (UDP) |
## Execution Flow
@@ -269,25 +269,6 @@ The minimal set of fence agent packages that will be installed.
Additional fence agent packages to be installed.
This is automatically combined with `sap_ha_pacemaker_cluster_fence_agent_minimal_packages`.
-### sap_ha_pacemaker_cluster_fence_options
-
-- _Type:_ `dict`
-- _Default:_ `{'pcmk_reboot_retries': 4, 'pcmk_reboot_timeout': 400, 'power_timeout': 240}`
-
-STONITH resource common parameters that apply to most fencing agents.
-These options are applied to fencing resources this role uses automatically for pre-defined platforms (like AWS EC2 VS, IBM Cloud VS).
-The listed options are set by default.
-Additional options can be added by defining this parameter in dictionary format and adding the defaults plus any valid stonith resource key-value pair.
-
-Example:
-
-```yaml
-sap_ha_pacemaker_cluster_fence_options:
- pcmk_reboot_retries: 4
- pcmk_reboot_timeout: 400
- power_timeout: 240
-```
-
### sap_ha_pacemaker_cluster_gcp_project
- _Type:_ `string`
@@ -421,7 +402,7 @@ Mandatory for the cluster setup on IBM Cloud Virtual Server instances or IBM Pow
- _Type:_ `string`
IBM Power Virtual Server API Endpoint type (public or private) dependent on network interface attachments for the target instances.
-['Mandatory for the cluster setup on IBM Power Virtual Server from IBM Cloud.']
+Mandatory for the cluster setup on IBM Power Virtual Server from IBM Cloud.
### sap_ha_pacemaker_cluster_ibmcloud_powervs_forward_proxy_url
@@ -791,6 +772,13 @@ Name of the SAPInstance resource for NetWeaver AAS.
Virtual IP of the NetWeaver ASCS instance.
Mandatory for NetWeaver ASCS/ERS cluster setup.
+### sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_group_name
+
+- _Type:_ `string`
+- _Default:_ `_ASCS_group`
+
+Name of the NetWeaver ASCS resource group.
+
### sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name
- _Type:_ `string`
@@ -805,6 +793,13 @@ Name of the SAPInstance resource for NetWeaver ASCS.
Virtual IP of the NetWeaver ERS instance.
Mandatory for NetWeaver ASCS/ERS cluster setup.
+### sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_group_name
+
+- _Type:_ `string`
+- _Default:_ `_ERS_group`
+
+Name of the NetWeaver ERS resource group.
+
### sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name
- _Type:_ `string`
diff --git a/roles/sap_ha_pacemaker_cluster/defaults/main.yml b/roles/sap_ha_pacemaker_cluster/defaults/main.yml
index b4652cb72..88df2160d 100644
--- a/roles/sap_ha_pacemaker_cluster/defaults/main.yml
+++ b/roles/sap_ha_pacemaker_cluster/defaults/main.yml
@@ -19,7 +19,7 @@ sap_ha_pacemaker_cluster_system_roles_collection: 'fedora.linux_system_roles'
# Optional: write all cluster configuration (including unencrypted credentials!) into a yaml
# config file.
-# Useful for parameter review or re-use with the 'ha_cluster' LSR.
+# Useful for parameter review or reuse with the 'ha_cluster' LSR.
sap_ha_pacemaker_cluster_create_config_varfile: false
sap_ha_pacemaker_cluster_create_config_dest: "review_resource_config.yml"
@@ -56,20 +56,7 @@ sap_ha_pacemaker_cluster_resource_defaults: {}
# TODO: review with testers, updated arg specs now require it to be a list from the start
sap_ha_pacemaker_cluster_host_type: "{{ sap_host_type | default(['hana_scaleup_perf']) }}"
-# Currently unused parameter. Keeping for future functionality.
-#sap_ha_pacemaker_cluster_replication_type: none
-
-### stonith resource parameter defaults
-sap_ha_pacemaker_cluster_fence_options:
- pcmk_reboot_retries: 4
- pcmk_reboot_timeout: 400
- power_timeout: 240
-
### VIP resource default patterns
-# Currently there is no task for a different default VIP resource agent.
-# Leaving out of meta/argument_specs.yml.
-# Platform specific defaults are defined separately.
-sap_ha_pacemaker_cluster_vip_resource_agent: "ocf:heartbeat:IPaddr2"
sap_ha_pacemaker_cluster_vip_client_interface: ''
## A custom stonith definition that takes precedence over platform defaults.
@@ -79,7 +66,7 @@ sap_ha_pacemaker_cluster_vip_client_interface: ''
# options:
# pcmk_host_list: ""
-#sap_ha_pacemaker_cluster_stonith_custom: []
+# sap_ha_pacemaker_cluster_stonith_custom: []
# Simpler definition format here which gets transformed into the 'ha_cluster' LSR native
# 'ha_cluster_cluster_properties' parameter.
@@ -88,13 +75,17 @@ sap_ha_pacemaker_cluster_cluster_properties:
stonith-timeout: 900
concurrent-fencing: true
+### Constraints:
+# score is dynamic and automatically increased for groups
+sap_ha_pacemaker_cluster_constraint_colo_base_score: 2000
+
################################################################################
# Inherit from 'ha_cluster' Linux System Role parameters when defined
################################################################################
# Optional without a default. The 'ha_cluster' LSR defaults will apply when not defined.
-#sap_ha_pacemaker_cluster_ha_cluster:
-#sap_ha_pacemaker_cluster_cluster_name:
+# sap_ha_pacemaker_cluster_ha_cluster:
+# sap_ha_pacemaker_cluster_cluster_name:
# Optional. Set a default here and not in the code.
sap_ha_pacemaker_cluster_extra_packages: []
@@ -114,7 +105,9 @@ sap_ha_pacemaker_cluster_hacluster_user_password: "{{ ha_cluster_hacluster_passw
sap_ha_pacemaker_cluster_hana_sid: "{{ sap_hana_sid | default('') }}"
# Keeping 'sap_ha_pacemaker_cluster_hana_instance_number' for the time being for backwards compatibility.
-sap_ha_pacemaker_cluster_hana_instance_nr: "{{ sap_ha_pacemaker_cluster_hana_instance_number | default(sap_hana_instance_number) | default('') }}"
+sap_ha_pacemaker_cluster_hana_instance_nr: >-
+ {{ sap_ha_pacemaker_cluster_hana_instance_number
+ | default(sap_hana_instance_number) | default('') }}
# Optional parameters to customize SAPHana resources
# AUTOMATED_REGISTER
@@ -124,12 +117,15 @@ sap_ha_pacemaker_cluster_hana_duplicate_primary_timeout: 900
# PREFER_SITE_TAKEOVER
sap_ha_pacemaker_cluster_hana_prefer_site_takeover: true
-
# SAP HANA - Resource IDs (names) as convenience parameters.
-sap_ha_pacemaker_cluster_hana_resource_name: "SAPHana_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}"
-sap_ha_pacemaker_cluster_hana_resource_clone_name: "{{ sap_ha_pacemaker_cluster_hana_resource_name }}-clone"
-sap_ha_pacemaker_cluster_hana_topology_resource_name: "SAPHanaTopology_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}"
-sap_ha_pacemaker_cluster_hana_topology_resource_clone_name: "{{ sap_ha_pacemaker_cluster_hana_topology_resource_name }}-clone"
+sap_ha_pacemaker_cluster_hana_resource_name: >-
+ SAPHana_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}
+sap_ha_pacemaker_cluster_hana_resource_clone_name: >-
+ {{ sap_ha_pacemaker_cluster_hana_resource_name }}-clone
+sap_ha_pacemaker_cluster_hana_topology_resource_name: >-
+ SAPHanaTopology_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}
+sap_ha_pacemaker_cluster_hana_topology_resource_clone_name: >-
+ {{ sap_ha_pacemaker_cluster_hana_topology_resource_name }}-clone
# Multiple VIP parameters can be defined and will be combined.
@@ -137,9 +133,18 @@ sap_ha_pacemaker_cluster_hana_topology_resource_clone_name: "{{ sap_ha_pacemaker
#
# Mandatory: primary VIP address definition in HANA scale-up clusters
sap_ha_pacemaker_cluster_vip_hana_primary_ip_address: ''
-sap_ha_pacemaker_cluster_vip_hana_primary_resource_name: "vip_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}_primary"
+sap_ha_pacemaker_cluster_vip_hana_primary_resource_name: >-
+ vip_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}_primary
sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address: ''
-sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name: "vip_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}_readonly"
+sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name: >-
+ vip_{{ sap_ha_pacemaker_cluster_hana_sid }}_{{ sap_ha_pacemaker_cluster_hana_instance_nr }}_readonly
+
+sap_ha_pacemaker_cluster_healthcheck_hana_primary_id: "{{ sap_ha_pacemaker_cluster_hana_sid + 'prim' }}"
+sap_ha_pacemaker_cluster_healthcheck_hana_secondary_id: "{{ sap_ha_pacemaker_cluster_hana_sid + 'ro' }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_id: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid + 'ascs' }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_id: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid + 'ers' }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_id: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid + 'pas' }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_id: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid + 'aas' }}"
################################################################################
@@ -161,8 +166,8 @@ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr: "{{ sap_swpm_ers_instance_nr
sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr: "{{ sap_swpm_pas_instance_nr | default('') }}"
sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr: "{{ sap_swpm_aas_instance_nr | default('') }}"
# Prepare in case JAVA SCS/ERS will be included later.
-#sap_ha_pacemaker_cluster_nwas_java_scs_instance_nr: "{{ sap_swpm_java_scs_instance_nr | default('') }}"
-#sap_ha_pacemaker_cluster_nwas_java_ers_instance_nr: "{{ sap_swpm_java_ers_instance_nr | default('') }}"
+# sap_ha_pacemaker_cluster_nwas_java_scs_instance_nr: "{{ sap_swpm_java_scs_instance_nr | default('') }}"
+# sap_ha_pacemaker_cluster_nwas_java_ers_instance_nr: "{{ sap_swpm_java_ers_instance_nr | default('') }}"
# Definitions for filesystems resources. Currently limited to NFS filesystems.
sap_ha_pacemaker_cluster_storage_definition: "{{ sap_storage_setup_definition | default([]) }}"
@@ -179,13 +184,17 @@ sap_ha_pacemaker_cluster_resource_filesystem_force_unmount: safe
# Multiple VIP parameters can be defined and will be combined.
# See tasks/include_construct_vip_resources.yml
sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address: ''
-sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name: "vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}_ascs"
+sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name: >-
+ vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}_ascs
sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address: ''
-sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name: "vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}_ers"
+sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name: >-
+ vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}_ers
sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address: ''
-sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name: "vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}_pas"
+sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name: >-
+ vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}_pas
sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address: ''
-sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name: "vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}_aas"
+sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name: >-
+ vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}_aas
# SAP NetWeaver common - Resource IDs (names) as convenience parameters
@@ -193,15 +202,23 @@ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name: "vip_{{ sap_ha_pacemak
# - /sapmnt
# - /usr/sap/trans
# - /usr/sap/<>/SYS
-sap_ha_pacemaker_cluster_nwas_sapmnt_filesystem_resource_name: "Filesystem_NWAS_SAPMNT_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}"
-sap_ha_pacemaker_cluster_nwas_transports_filesystem_resource_name: "Filesystem_NWAS_TRANS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}"
-sap_ha_pacemaker_cluster_nwas_sys_filesystem_resource_name: "Filesystem_NWAS_SYS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}"
+sap_ha_pacemaker_cluster_nwas_sapmnt_filesystem_resource_name: >-
+ Filesystem_NWAS_SAPMNT_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}
+sap_ha_pacemaker_cluster_nwas_transports_filesystem_resource_name: >-
+ Filesystem_NWAS_TRANS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}
+sap_ha_pacemaker_cluster_nwas_sys_filesystem_resource_name: >-
+ Filesystem_NWAS_SYS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}
# The shared filesystems are not required to be configured in the cluster.
# By default it is assumed that they are mounted by the system and available on all cluster nodes.
# Set this parameter to "true" to configure the 3 shared filesystems as part of the cluster.
sap_ha_pacemaker_cluster_nwas_shared_filesystems_cluster_managed: false
+# SAP NetWeaver resource group names as convenience parameters
+sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_group_name: >-
+ {{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_ASCS{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}_group
+sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_group_name: >-
+ {{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_ERS{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}_group
################################################################################
# ASCS resource defaults
@@ -216,9 +233,11 @@ sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_start_profile_string: ''
# - /usr/sap/<>/ASCS<>
# - /usr/sap/<>/ERS<>
-sap_ha_pacemaker_cluster_nwas_abap_ascs_filesystem_resource_name: "Filesystem_NWAS_ABAP_ASCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}"
-sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name: "SAPInstance_NWAS_ABAP_ASCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_clone_name: "{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name }}-clone"
+sap_ha_pacemaker_cluster_nwas_abap_ascs_filesystem_resource_name: >-
+ Filesystem_NWAS_ABAP_ASCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}
+sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name: >-
+ SAPInstance_NWAS_ABAP_ASCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}
+# sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_clone_name: "{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name }}-clone"
sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_automatic_recover_bool: false
sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_stickiness: 5000
sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_ensa1_migration_threshold: 1
@@ -237,9 +256,11 @@ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_instance_name: ''
# Full path with instance profile name - mandatory to be user-defined
sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_start_profile_string: ''
-sap_ha_pacemaker_cluster_nwas_abap_ers_filesystem_resource_name: "Filesystem_NWAS_ABAP_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}"
-sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name: "SAPInstance_NWAS_ABAP_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_clone_name: "{{ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name }}-clone"
+sap_ha_pacemaker_cluster_nwas_abap_ers_filesystem_resource_name: >-
+ Filesystem_NWAS_ABAP_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}
+sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name: >-
+ SAPInstance_NWAS_ABAP_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}
+# sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_clone_name: "{{ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name }}-clone"
sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_automatic_recover_bool: false
@@ -248,10 +269,14 @@ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_automatic_recover_bool: false
################################################################################
# SAP NetWeaver ABAP PAS/AAS - Resource IDs (names) as convenience parameters.
# - /usr/sap/<>/D<>
-#sap_ha_pacemaker_cluster_nwas_abap_pas_filesystem_resource_name: "Filesystem_NWAS_ABAP_PAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_abap_pas_sapinstance_resource_name: "SAPInstance_NWAS_ABAP_PAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_abap_aas_filesystem_resource_name: "Filesystem_NWAS_ABAP_AAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_abap_aas_sapinstance_resource_name: "SAPInstance_NWAS_ABAP_AAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_abap_pas_filesystem_resource_name: >
+# "Filesystem_NWAS_ABAP_PAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_abap_pas_sapinstance_resource_name: >
+# "SAPInstance_NWAS_ABAP_PAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_abap_aas_filesystem_resource_name: >
+# "Filesystem_NWAS_ABAP_AAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_abap_aas_sapinstance_resource_name: >
+# "SAPInstance_NWAS_ABAP_AAS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}"
################################################################################
# JAVA SCS/ERS resource defaults
@@ -259,12 +284,18 @@ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_automatic_recover_bool: false
# SAP NetWeaver JAVA SCS/ERS - Resource IDs (names) as convenience parameters.
# - /usr/sap/<>/SCS<>
# - /usr/sap/<>/ERS<>
-#sap_ha_pacemaker_cluster_nwas_java_scs_filesystem_resource_name: "Filesytem_NWAS_JAVA_SCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_scs_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_java_scs_sapinstance_resource_name: "SAPInstance_NWAS_JAVA_SCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_scs_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_java_scs_sapinstance_resource_clone_name: "{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name }}-clone"
-#sap_ha_pacemaker_cluster_nwas_java_ers_filesystem_resource_name: "Filesytem_NWAS_JAVA_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_ers_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_java_ers_sapinstance_resource_name: "SAPInstance_NWAS_JAVA_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_ers_instance_nr }}"
-#sap_ha_pacemaker_cluster_nwas_java_ers_sapinstance_resource_clone_name: "{{ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name }}-clone"
+# sap_ha_pacemaker_cluster_nwas_java_scs_filesystem_resource_name: >
+# "Filesytem_NWAS_JAVA_SCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_scs_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_java_scs_sapinstance_resource_name: >
+# "SAPInstance_NWAS_JAVA_SCS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_scs_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_java_scs_sapinstance_resource_clone_name: >
+# "{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name }}-clone"
+# sap_ha_pacemaker_cluster_nwas_java_ers_filesystem_resource_name: >
+# "Filesytem_NWAS_JAVA_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_ers_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_java_ers_sapinstance_resource_name: >
+# "SAPInstance_NWAS_JAVA_ERS_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_{{ sap_ha_pacemaker_cluster_nwas_java_ers_instance_nr }}"
+# sap_ha_pacemaker_cluster_nwas_java_ers_sapinstance_resource_clone_name: >
+# "{{ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name }}-clone"
################################################################################
diff --git a/roles/sap_ha_pacemaker_cluster/handlers/main.yml b/roles/sap_ha_pacemaker_cluster/handlers/main.yml
new file mode 100644
index 000000000..7abbf7fc0
--- /dev/null
+++ b/roles/sap_ha_pacemaker_cluster/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: "Reload systemd daemon"
+ ansible.builtin.systemd_service:
+ daemon_reload: true
+ listen: "systemd daemon-reload"
diff --git a/roles/sap_ha_pacemaker_cluster/meta/argument_specs.yml b/roles/sap_ha_pacemaker_cluster/meta/argument_specs.yml
index 1bd4d0050..2de3b8d43 100644
--- a/roles/sap_ha_pacemaker_cluster/meta/argument_specs.yml
+++ b/roles/sap_ha_pacemaker_cluster/meta/argument_specs.yml
@@ -40,19 +40,28 @@ argument_specs:
sap_ha_pacemaker_cluster_create_config_dest:
default: review_resource_config.yml
description:
- - The pacemaker cluster resource configuration optionally created by this role will be saved in a Yaml file in the current working directory.
- - Requires `sap_ha_pacemaker_cluster_create_config_varfile` to be enabled for generating the output file.
+ - The pacemaker cluster resource configuration optionally created by this role will be
+ saved in a Yaml file in the current working directory.
+ - Requires `sap_ha_pacemaker_cluster_create_config_varfile` to be enabled for generating
+ the output file.
- Specify a path/filename to save the file in a custom location.
- - The file can be used as input vars file for an Ansible playbook running the 'ha_cluster' Linux System Role.
+ - The file can be used as input vars file for an Ansible playbook running the 'ha_cluster'
+ Linux System Role.
sap_ha_pacemaker_cluster_create_config_varfile:
type: bool
default: false
description:
- - When enabled, all cluster configuration parameters this role constructs for executing the 'ha_cluster' Linux System role will be written into a file in Yaml format.
- - This allows using the output file later as input file for additional custom steps using the 'ha_cluster' role and covering the resource configuration in a cluster that was set up using this 'sap_ha_pacemaker_cluster' role.
- - When enabled this parameters file is also created when the playbook is run in check_mode (`--check`) and can be used to review the configuration parameters without executing actual changes on the target nodes.
- - WARNING! This report may include sensitive details like secrets required for certain cluster resources!
+ - When enabled, all cluster configuration parameters this role constructs for executing
+ the 'ha_cluster' Linux System role will be written into a file in Yaml format.
+ - This allows using the output file later as input file for additional custom steps using
+ the 'ha_cluster' role and covering the resource configuration in a cluster that was set
+ up using this 'sap_ha_pacemaker_cluster' role.
+ - When enabled this parameters file is also created when the playbook is run in check_mode
+ (`--check`) and can be used to review the configuration parameters without executing
+ actual changes on the target nodes.
+ - WARNING! This report may include sensitive details like secrets required for certain
+ cluster resources!
sap_ha_pacemaker_cluster_cluster_nodes:
type: list
@@ -134,25 +143,6 @@ argument_specs:
# - The type of SAP HANA site replication across multiple hosts.
# - _Not yet supported_
- sap_ha_pacemaker_cluster_fence_options:
- type: dict
- default:
- pcmk_reboot_retries: 4
- pcmk_reboot_timeout: 400
- power_timeout: 240
- description:
- - STONITH resource common parameters that apply to most fencing agents.
- - These options are applied to fencing resources this role uses automatically for pre-defined platforms (like AWS EC2 VS, IBM Cloud VS).
- - The listed options are set by default.
- - Additional options can be added by defining this parameter in dictionary format and adding the defaults plus any valid stonith resource key-value pair.
-
- example:
- sap_ha_pacemaker_cluster_fence_options:
- pcmk_reboot_retries: 4
- pcmk_reboot_timeout: 400
- power_timeout: 240
-
-
sap_ha_pacemaker_cluster_vip_client_interface:
description:
- OS device name of the network interface to use for the Virtual IP configuration.
@@ -176,7 +166,8 @@ argument_specs:
options:
description:
- The resource options listed in dictionary format, one option per line.
- - Requires the mandatory options for the particular stonith resource agent to be defined, otherwise the setup will fail.
+ - Requires the mandatory options for the particular stonith resource agent to be
+ defined, otherwise the setup will fail.
required: true
example:
@@ -197,7 +188,8 @@ argument_specs:
stonith-timeout: 900
concurrent-fencing: true
description:
- - Standard pacemaker cluster properties are configured with recommended settings for cluster node fencing.
+ - Standard pacemaker cluster properties are configured with recommended settings for
+ cluster node fencing.
- When no STONITH resource is defined, STONITH will be disabled and a warning displayed.
example:
@@ -217,7 +209,8 @@ argument_specs:
- The `ha_cluster` LSR native parameter `ha_cluster` can be used as a synonym.
- Optional _**host_vars**_ parameter - if defined it must be set for each node.
- Dictionary that can contain various node options for the pacemaker cluster configuration.
- - Supported options can be reviewed in the `ha_cluster` Linux System Role [https://github.com/linux-system-roles/ha_cluster/blob/master/README.md].
+ - Supported options can be reviewed in the `ha_cluster` Linux System Role
+ [https://github.com/linux-system-roles/ha_cluster/blob/master/README.md].
- If not defined, the `ha_cluster` Linux System Role default will be used.
example:
@@ -238,7 +231,8 @@ argument_specs:
type: list
description:
- Additional extra packages to be installed, for instance specific resource packages.
- - For SAP clusters configured by this role, the relevant standard packages for the target scenario are automatically included.
+ - For SAP clusters configured by this role, the relevant standard packages for the target
+ scenario are automatically included.
sap_ha_pacemaker_cluster_fence_agent_packages:
type: list
@@ -284,8 +278,10 @@ argument_specs:
description:
- Parameter for the 'SAPHana' cluster resource.
- Time difference needed between to primary time stamps, if a dual-primary situation occurs.
- - If the time difference is less than the time gap, then the cluster holds one or both instances in a "WAITING" status.
- - This is to give an admin a chance to react on a failover. A failed former primary will be registered after the time difference is passed.
+ - If the time difference is less than the time gap, then the cluster holds one or both
+ instances in a "WAITING" status.
+ - This is to give an admin a chance to react on a failover. A failed former primary will
+ be registered after the time difference is passed.
sap_ha_pacemaker_cluster_hana_prefer_site_takeover:
type: bool
@@ -336,7 +332,8 @@ argument_specs:
sap_ha_pacemaker_cluster_vip_secondary_resource_name:
default: "vip_"
description:
- - Customize the name of the resource managing the Virtual IP of read-only access to the secondary HANA instance.
+ - Customize the name of the resource managing the Virtual IP of read-only access to the
+ secondary HANA instance.
##########################################################################
@@ -354,7 +351,8 @@ argument_specs:
type: bool
default: true
description:
- - Enables/Disables the SAP HA Interface for SAP ABAP application server instances, also known as `sap_cluster_connector`.
+ - Enables/Disables the SAP HA Interface for SAP ABAP application server instances, also
+ known as `sap_cluster_connector`.
- Set this parameter to 'false' if the SAP HA interface should not be installed and configured.
sap_ha_pacemaker_cluster_nwas_abap_sid:
@@ -391,9 +389,12 @@ argument_specs:
- Options relevant, see example.
- Mandatory for SAP NetWeaver HA cluster configurations.
- Reuse `sap_storage_setup_definition` if defined.
- - Reuse `sap_storage_setup_definition` will extract values 'mountpoint', 'nfs_filesystem_type', 'nfs_mount_options', 'nfs_path', 'nfs_server'.
- - Reuse `sap_storage_setup_definition` all options are documented under Ansible Role `sap_storage_setup`.
- - Note! For this variable, the argument specification does not list options, to avoid errors during reuse of `sap_storage_setup_definition` if defined.
+ - Reuse `sap_storage_setup_definition` will extract values 'mountpoint',
+ 'nfs_filesystem_type', 'nfs_mount_options', 'nfs_path', 'nfs_server'.
+ - Reuse `sap_storage_setup_definition` all options are documented under Ansible Role
+ `sap_storage_setup`.
+ - Note! For this variable, the argument specification does not list options, to avoid
+ errors during reuse of `sap_storage_setup_definition` if defined.
elements: dict
example:
@@ -472,29 +473,42 @@ argument_specs:
default: Filesystem_NWAS_SAPMNT_
description:
- Filesystem resource name for the shared filesystem /sapmnt.
- - Optional, this is typically managed by the OS, but can as well be added to the cluster configuration.
+ - Optional, this is typically managed by the OS, but can as well be added to the cluster
+ configuration.
- Enable this resource setup using `sap_ha_pacemaker_cluster_nwas_shared_filesystems_cluster_managed`.
sap_ha_pacemaker_cluster_nwas_transports_filesystem_resource_name:
default: Filesystem_NWAS_TRANS_
description:
- Filesystem resource name for the transports filesystem /usr/sap/trans.
- - Optional, this is typically managed by the OS, but can as well be added to the cluster configuration.
+ - Optional, this is typically managed by the OS, but can as well be added to the cluster
+ configuration.
- Enable this resource setup using `sap_ha_pacemaker_cluster_nwas_shared_filesystems_cluster_managed`.
sap_ha_pacemaker_cluster_nwas_sys_filesystem_resource_name:
default: Filesystem_NWAS_SYS_
description:
- Filesystem resource name for the transports filesystem /usr/sap//SYS.
- - Optional, this is typically managed by the OS, but can as well be added to the cluster configuration.
+ - Optional, this is typically managed by the OS, but can as well be added to the cluster
+ configuration.
- Enable this resource setup using `sap_ha_pacemaker_cluster_nwas_shared_filesystems_cluster_managed`.
sap_ha_pacemaker_cluster_nwas_shared_filesystems_cluster_managed:
type: bool
default: false
description:
- - Change this parameter to 'true' if the 3 shared filesystems `/usr/sap/trans`, `/usr/sap//SYS` and '/sapmnt' shall be configured as cloned cluster resources.
+ - Change this parameter to 'true' if the 3 shared filesystems `/usr/sap/trans`,
+ `/usr/sap//SYS` and '/sapmnt' shall be configured as cloned cluster resources.
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_group_name:
+ default: _ASCS_group
+ description:
+ - Name of the NetWeaver ASCS resource group.
+
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_group_name:
+ default: _ERS_group
+ description:
+ - Name of the NetWeaver ERS resource group.
##########################################################################
# NetWeaver ASCS specific parameters
@@ -535,18 +549,21 @@ argument_specs:
default: 1
description:
- NetWeaver ASCS instance migration-threshold setting attribute.
- - Only used for ENSA1 setups (see `sap_ha_pacemaker_cluster_nwas_abap_ascs_ers_ensa1`). Default setup is ENSA2.
+ - Only used for ENSA1 setups (see `sap_ha_pacemaker_cluster_nwas_abap_ascs_ers_ensa1`).
+ Default setup is ENSA2.
sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_ensa1_failure_timeout:
default: 60
description:
- NetWeaver ASCS instance failure-timeout attribute.
- - Only used for ENSA1 setups (see `sap_ha_pacemaker_cluster_nwas_abap_ascs_ers_ensa1`). Default setup is ENSA2.
+ - Only used for ENSA1 setups (see `sap_ha_pacemaker_cluster_nwas_abap_ascs_ers_ensa1`).
+ Default setup is ENSA2.
sap_ha_pacemaker_cluster_nwas_abap_ascs_group_stickiness:
default: 3000
description:
- - NetWeaver ASCS resource group stickiness to prefer the ASCS group to stay on the node it was started on.
+ - NetWeaver ASCS resource group stickiness to prefer the ASCS group to stay on the node
+ it was started on.
##########################################################################
@@ -640,13 +657,16 @@ argument_specs:
sap_ha_pacemaker_cluster_ibmcloud_api_key:
description:
- - The API key which is required to allow the control of instances (for example for fencing operations).
- - Mandatory for the cluster setup on IBM Cloud Virtual Server instances or IBM Power Virtual Server on IBM Cloud.
+ - The API key which is required to allow the control of instances (for example for fencing
+ operations).
+ - Mandatory for the cluster setup on IBM Cloud Virtual Server instances or IBM Power
+ Virtual Server on IBM Cloud.
sap_ha_pacemaker_cluster_ibmcloud_region:
description:
- The IBM Cloud VS region name in which the instances are running.
- - Mandatory for the cluster setup on IBM Cloud Virtual Server instances or IBM Power Virtual Server on IBM Cloud.
+ - Mandatory for the cluster setup on IBM Cloud Virtual Server instances or IBM Power
+ Virtual Server on IBM Cloud.
##########################################################################
@@ -655,19 +675,23 @@ argument_specs:
sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_crn:
description:
- - IBM Power Virtual Server Workspace service cloud resource name (CRN) identifier which contains the target instances
+ - IBM Power Virtual Server Workspace service cloud resource name (CRN) identifier which
+ contains the target instances
- Mandatory for the cluster setup on IBM Power Virtual Server from IBM Cloud.
sap_ha_pacemaker_cluster_ibmcloud_powervs_api_type:
description:
- - IBM Power Virtual Server API Endpoint type (public or private) dependent on network interface attachments for the target instances.
- - - Mandatory for the cluster setup on IBM Power Virtual Server from IBM Cloud.
+ - IBM Power Virtual Server API Endpoint type (public or private) dependent on network
+ interface attachments for the target instances.
+ - Mandatory for the cluster setup on IBM Power Virtual Server from IBM Cloud.
sap_ha_pacemaker_cluster_ibmcloud_powervs_forward_proxy_url:
description:
- - IBM Power Virtual Server forward proxy url when IBM Power Virtual Server API Endpoint type is set to private.
+ - IBM Power Virtual Server forward proxy url when IBM Power Virtual Server API Endpoint
+ type is set to private.
- When public network interface, can be ignored.
- - When private network interface, mandatory for the cluster setup on IBM Power Virtual Server from IBM Cloud.
+ - When private network interface, mandatory for the cluster setup on IBM Power Virtual
+ Server from IBM Cloud.
##########################################################################
diff --git a/roles/sap_ha_pacemaker_cluster/meta/collection-requirements.yml b/roles/sap_ha_pacemaker_cluster/meta/collection-requirements.yml
new file mode 100644
index 000000000..0a4e50f46
--- /dev/null
+++ b/roles/sap_ha_pacemaker_cluster/meta/collection-requirements.yml
@@ -0,0 +1,4 @@
+---
+
+collections:
+ - name: fedora.linux_system_roles
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/configure_nwas_ascs_ers_postinstallation.yml b/roles/sap_ha_pacemaker_cluster/tasks/configure_nwas_ascs_ers_postinstallation.yml
index 08681b6f0..a65fc28fb 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/configure_nwas_ascs_ers_postinstallation.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/configure_nwas_ascs_ers_postinstallation.yml
@@ -108,7 +108,7 @@
- sap_ha_pacemaker_cluster_enable_cluster_connector
block:
- - name: "SAP HA Pacemaker - (SAP HA Interface) Add {{sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm user to 'haclient' group"
+ - name: "SAP HA Pacemaker - (SAP HA Interface) Add {{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm user to 'haclient' group"
ansible.builtin.user:
name: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm"
groups: haclient
@@ -129,6 +129,7 @@
label: "{{ nwas_profile_item.0 }} -> {{ nwas_profile_item.1 }}"
- name: "SAP HA Pacemaker - (SAP HA Interface) Wait for ASCS to be up and running"
+ become: true
become_user: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm"
register: __sap_ha_pacemaker_cluster_register_where_ascs
ansible.builtin.shell: |
@@ -137,6 +138,7 @@
failed_when: false
- name: "SAP HA Pacemaker - (SAP HA Interface) Wait for ERS to be up and running"
+ become: true
become_user: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm"
register: __sap_ha_pacemaker_cluster_register_where_ers
ansible.builtin.shell: |
@@ -147,6 +149,7 @@
- name: "SAP HA Pacemaker - (SAP HA Interface) Restart the ASCS service"
when:
- __sap_ha_pacemaker_cluster_register_where_ascs.rc == 0
+ become: true
become_user: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm"
register: __sap_ha_pacemaker_cluster_register_restart_ascs
ansible.builtin.shell: |
@@ -156,6 +159,7 @@
- name: "SAP HA Pacemaker - (SAP HA Interface) Restart the ERS service"
when:
- __sap_ha_pacemaker_cluster_register_where_ers.rc == 0
+ become: true
become_user: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm"
register: __sap_ha_pacemaker_cluster_register_restart_ers
ansible.builtin.shell: |
@@ -172,6 +176,7 @@
- name: "SAP HA Pacemaker - (SAP HA Interface) Run HA check for ASCS"
when:
- __sap_ha_pacemaker_cluster_register_where_ascs.rc == 0
+ become: true
become_user: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm"
register: __sap_ha_pacemaker_cluster_register_ascs_ha
ansible.builtin.shell: |
@@ -181,6 +186,7 @@
- name: "SAP HA Pacemaker - (SAP HA Interface) Run HA check for ERS"
when:
- __sap_ha_pacemaker_cluster_register_where_ers.rc == 0
+ become: true
become_user: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid | lower }}adm"
register: __sap_ha_pacemaker_cluster_register_ers_ha
ansible.builtin.shell: |
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_final_hacluster_vars.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_final_hacluster_vars.yml
index a919561a4..bfb24a925 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/construct_final_hacluster_vars.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_final_hacluster_vars.yml
@@ -26,6 +26,7 @@
# __sap_ha_pacemaker_cluster_resource_clones ha_cluster_resource_clones
# __sap_ha_pacemaker_cluster_resource_groups ha_cluster_resource_groups
# __sap_ha_pacemaker_cluster_resource_primitives ha_cluster_resource_primitives
+# __sap_ha_pacemaker_cluster_corosync_totem ha_cluster_totem
- name: "SAP HA Prepare Pacemaker - (ha_cluster) Define parameter 'ha_cluster'"
when: __sap_ha_pacemaker_cluster_ha_cluster is defined
@@ -93,3 +94,8 @@
ansible.builtin.set_fact:
ha_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives }}"
no_log: true # be paranoid, there could be credentials in it
+
+- name: "SAP HA Prepare Pacemaker - (ha_cluster) Define parameter 'ha_cluster_totem'"
+ when: __sap_ha_pacemaker_cluster_corosync_totem is defined
+ ansible.builtin.set_fact:
+ ha_cluster_totem: "{{ __sap_ha_pacemaker_cluster_corosync_totem }}"
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_common.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_common.yml
index bc6e17140..0361368ec 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_common.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_common.yml
@@ -28,11 +28,11 @@
- name: "SAP HA Prepare Pacemaker - Combine extra packages lists"
ansible.builtin.set_fact:
- __sap_ha_pacemaker_cluster_extra_packages: "{{ (
- sap_ha_pacemaker_cluster_extra_packages
+ __sap_ha_pacemaker_cluster_extra_packages: "{{
+ (sap_ha_pacemaker_cluster_extra_packages
+ __sap_ha_pacemaker_cluster_sap_extra_packages
- + __sap_ha_pacemaker_cluster_platform_extra_packages
- ) | unique | select() }}"
+ + __sap_ha_pacemaker_cluster_platform_extra_packages)
+ | unique | select() }}"
# remove duplicates and empty elements
# sap_ha_pacemaker_cluster_fence_agent_minimal_packages -> global default
@@ -41,8 +41,28 @@
- name: "SAP HA Prepare Pacemaker - Combine fence agent packages lists"
ansible.builtin.set_fact:
- __sap_ha_pacemaker_cluster_fence_agent_packages: "{{ (
- sap_ha_pacemaker_cluster_fence_agent_minimal_packages
+ __sap_ha_pacemaker_cluster_fence_agent_packages: "{{
+ (sap_ha_pacemaker_cluster_fence_agent_minimal_packages
+ sap_ha_pacemaker_cluster_fence_agent_packages
- + __sap_ha_pacemaker_cluster_fence_agent_packages
- ) | unique }}"
+ + __sap_ha_pacemaker_cluster_fence_agent_packages)
+ | unique }}"
+
+- name: "SAP HA Prepare Pacemaker - Add default corosync totem settings"
+ when:
+ - sap_ha_pacemaker_cluster_corosync_totem is defined
+ - sap_ha_pacemaker_cluster_corosync_totem.options is defined
+ - sap_ha_pacemaker_cluster_corosync_totem.options | length > 0
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_corosync_totem:
+ options: "{{ __sap_ha_pacemaker_cluster_corosync_totem.options | default([]) + __totem_settings }}"
+ vars:
+ __totem_settings: |-
+ {% set new_opts = [] %}
+ {% for option in sap_ha_pacemaker_cluster_corosync_totem.options | dict2items -%}
+ {%- set add_opts = new_opts.extend([
+ {
+ 'name': option.key,
+ 'value': option.value
+ }]) -%}
+ {%- endfor %}
+ {{ new_opts }}
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_haproxy_constraints_hana.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_haproxy_constraints_hana.yml
new file mode 100644
index 000000000..a1fb86bf1
--- /dev/null
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_haproxy_constraints_hana.yml
@@ -0,0 +1,53 @@
+---
+# Reminder: This file is included in a loop over a dictionary.
+
+# Start haproxy only after the HANA resource has been promoted
+- name: "SAP HA Prepare Pacemaker - Add order constraint: 'haproxy' starts after DB is promoted"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_constraints_order: "{{ __sap_ha_pacemaker_cluster_constraints_order + [__constraint_order_haproxy] }}"
+ vars:
+ __constraint_order_haproxy:
+ resource_first:
+ id: "{{ sap_ha_pacemaker_cluster_hana_resource_clone_name }}"
+ action: promote
+ resource_then:
+ id: "{{ vip_list_item.key }}"
+ action: start
+ when:
+ - __constraint_order_haproxy.resource_then not in (__sap_ha_pacemaker_cluster_constraints_order | map(attribute='resource_then'))
+
+# The primary haproxy only runs where HANA is promoted
+- name: "SAP HA Prepare Pacemaker - Add colocation constraint: Primary 'haproxy' runs where HANA is promoted"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_constraints_colocation: "{{ __sap_ha_pacemaker_cluster_constraints_colocation + [__constraint_colo_haproxy] }}"
+ vars:
+ __constraint_colo_haproxy:
+ resource_leader:
+ id: "{{ sap_ha_pacemaker_cluster_hana_resource_clone_name }}"
+ role: promoted
+ resource_follower:
+ id: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}"
+ options:
+ - name: score
+ value: "{{ sap_ha_pacemaker_cluster_constraint_colo_base_score }}"
+ when:
+ - __constraint_colo_haproxy.resource_follower not in (__sap_ha_pacemaker_cluster_constraints_colocation | map(attribute='resource_follower'))
+
+# The secondary haproxy only runs where HANA is UNpromoted
+- name: "SAP HA Prepare Pacemaker - Add colocation constraint: Secondary 'haproxy' runs where HANA is not promoted"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_constraints_colocation: "{{ __sap_ha_pacemaker_cluster_constraints_colocation + [__constraint_colo_haproxy] }}"
+ vars:
+ __constraint_colo_haproxy:
+ resource_leader:
+ id: "{{ sap_ha_pacemaker_cluster_hana_resource_clone_name }}"
+ role: unpromoted
+ resource_follower:
+ id: "{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}"
+ options:
+ - name: score
+ value: "{{ sap_ha_pacemaker_cluster_constraint_colo_base_score }}"
+ when:
+ - __constraint_colo_haproxy.resource_follower not in (__sap_ha_pacemaker_cluster_constraints_colocation | map(attribute='resource_follower'))
+ - sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address is defined
+ - sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address != ''
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_nwas_abap_ascs_ers.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_nwas_abap_ascs_ers.yml
index 5d6b32cc9..aa76295a0 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_nwas_abap_ascs_ers.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_nwas_abap_ascs_ers.yml
@@ -249,17 +249,29 @@
# - ASCS VIP
# The order of the resources in the group define the order in which they are
# started - resources are stopped in reverse order.
+#
+# Only resources that were defined as resources to be configured will be
+# added to the group.
- name: "SAP HA Prepare Pacemaker - Add resource group for ASCS resources"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_groups: "{{ __sap_ha_pacemaker_cluster_resource_groups + [__ascs_group] }}"
vars:
__ascs_group:
- id: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_ASCS{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}_group"
- resource_ids:
- - "{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_filesystem_resource_name }}"
- - "{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name }}"
- - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name }}"
+ id: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_group_name }}"
+ resource_ids: |
+ {% set resource_ids_list = [] %}
+ {%- for resource in
+ sap_ha_pacemaker_cluster_nwas_abap_ascs_filesystem_resource_name,
+ sap_ha_pacemaker_cluster_nwas_abap_ascs_sapinstance_resource_name,
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name,
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_resource_name %}
+ {%- if resource | length > 0
+ and resource in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')) %}
+ {%- set ids = resource_ids_list.append(resource) %}
+ {%- endif %}
+ {%- endfor %}
+ {{ resource_ids_list }}
meta_attrs:
- attrs:
- name: resource-stickiness
@@ -267,26 +279,45 @@
when:
- __ascs_group.id is not in (__sap_ha_pacemaker_cluster_resource_groups | map(attribute='id'))
+
# ERS group consists of resources for
# - ERS filesystem
# - ERS instance
# - ERS VIP
# The order of the resources in the group define the order in which they are
# started - resources are stopped in reverse order.
+#
+# Only resources that were defined as resources to be configured will be
+# added to the group.
- name: "SAP HA Prepare Pacemaker - Add resource group for ERS resources"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_groups: "{{ __sap_ha_pacemaker_cluster_resource_groups + [__ers_group] }}"
vars:
__ers_group:
- id: "{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}_ERS{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}_group"
- resource_ids:
- - "{{ sap_ha_pacemaker_cluster_nwas_abap_ers_filesystem_resource_name }}"
- - "{{ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name }}"
- - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name }}"
+ id: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_group_name }}"
+ resource_ids: |
+ {% set resource_ids_list = [] %}
+ {%- for resource in
+ sap_ha_pacemaker_cluster_nwas_abap_ers_filesystem_resource_name,
+ sap_ha_pacemaker_cluster_nwas_abap_ers_sapinstance_resource_name,
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name,
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_resource_name %}
+ {%- if resource | length > 0
+ and resource in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')) %}
+ {%- set ids = resource_ids_list.append(resource) %}
+ {%- endif %}
+ {%- endfor %}
+ {{ resource_ids_list }}
when:
- __ers_group.id is not in (__sap_ha_pacemaker_cluster_resource_groups | map(attribute='id'))
+- name: "SAP HA Prepare Pacemaker - Display VIP resource group definition if any were built"
+ ansible.builtin.debug:
+ var: __sap_ha_pacemaker_cluster_resource_groups
+ when:
+ - __sap_ha_pacemaker_cluster_resource_groups is defined
+ - __sap_ha_pacemaker_cluster_resource_groups | length > 0
#################################################
# Constraints
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_stonith.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_stonith.yml
index 43da8bf1b..8e5cf4002 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_stonith.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_stonith.yml
@@ -37,7 +37,7 @@
- name: "SAP HA Prepare Pacemaker - (STONITH) Set to disabled when no fencing resource is defined"
ansible.builtin.set_fact:
sap_ha_pacemaker_cluster_cluster_properties:
- "{{ sap_ha_pacemaker_cluster_cluster_properties | combine({ 'stonith-enabled': false }) }}"
+ "{{ sap_ha_pacemaker_cluster_cluster_properties | combine({'stonith-enabled': false}) }}"
- name: "SAP HA Prepare Pacemaker - Warn that there is no STONITH configured"
ansible.builtin.pause:
@@ -85,43 +85,64 @@
# - generic pacemaker fence resource options
# (see defaults: sap_ha_pacemaker_cluster_fence_options)
+# Note: the 'ha_cluster' LSR only calls the stonith creation for ONE host
+# -> the definition must contain the resources for all hosts, if multiple
+
- name: "SAP HA Prepare Pacemaker - (STONITH) Assemble the resource definition from platform default"
when:
- sap_ha_pacemaker_cluster_stonith_default is defined
+ - sap_ha_pacemaker_cluster_stonith_default | length > 0
- sap_ha_pacemaker_cluster_stonith_custom is not defined
+ - __stonith_resource_element.id not in (__sap_ha_pacemaker_cluster_stonith_resource | default([])| map(attribute='id'))
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_stonith_resource: "{{ __sap_ha_pacemaker_cluster_stonith_resource | default([]) + [__stonith_resource_element] }}"
vars:
__stonith_resource_element:
- id: "{{ sap_ha_pacemaker_cluster_stonith_default.id }}"
+ id: "{{ sap_ha_pacemaker_cluster_stonith_default.id + __plug_suffix }}"
agent: "{{ sap_ha_pacemaker_cluster_stonith_default.agent }}"
instance_attrs:
- - attrs: |-
+ - attrs: >-
{% set attrs = [] -%}
- {% set map = attrs.extend([
- {
- 'name': 'pcmk_host_map',
- 'value': __sap_ha_pacemaker_cluster_pcmk_host_map
- }]) -%}
- {%- for agent_opt in (sap_ha_pacemaker_cluster_stonith_default.options | default({}) | dict2items) -%}
- {% set aopts = attrs.extend([
+ {% if __sap_ha_pacemaker_cluster_pcmk_host_map | length > 0 -%}
+ {% set map = attrs.extend([
{
- 'name': agent_opt.key,
- 'value': agent_opt.value
+ 'name': 'pcmk_host_map',
+ 'value': __sap_ha_pacemaker_cluster_pcmk_host_map
}]) -%}
- {%- endfor %}
- {%- for fence_opt in (sap_ha_pacemaker_cluster_fence_options | dict2items) -%}
- {% set fopts = attrs.extend([
+ {%- else -%}
+ {% set map = attrs.extend([
{
- 'name': fence_opt.key,
- 'value': fence_opt.value
+ 'name': 'plug',
+ 'value': stonith_host_item
}]) -%}
+ {%- endif %}
+ {%- if sap_ha_pacemaker_cluster_stonith_default.options is defined
+ and sap_ha_pacemaker_cluster_stonith_default.options | length > 0 -%}
+ {%- for agent_opt in (sap_ha_pacemaker_cluster_stonith_default.options | default({}) | dict2items) -%}
+ {% set aopts = attrs.extend([
+ {
+ 'name': agent_opt.key,
+ 'value': agent_opt.value
+ }]) -%}
{%- endfor %}
+ {%- endif -%}
{{ attrs }}
+ __plug_suffix: >-
+ {%- if __sap_ha_pacemaker_cluster_pcmk_host_map | length == 0 -%}
+ _{{ stonith_host_item }}
+ {%- else %}{% endif -%}
+
+ loop: "{{ ansible_play_hosts_all }}"
+ loop_control:
+ loop_var: stonith_host_item
+ label: "{{ stonith_host_item }}"
+
+
- name: "SAP HA Prepare Pacemaker - (STONITH) Assemble the resources from custom definition"
when:
- sap_ha_pacemaker_cluster_stonith_custom is defined
+ - __stonith_resource_element.id not in (__sap_ha_pacemaker_cluster_stonith_resource | default([]) | map(attribute='id'))
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_stonith_resource: "{{ __sap_ha_pacemaker_cluster_stonith_resource | default([]) + [__stonith_resource_element] }}"
vars:
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_constraints_hana.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_constraints_hana.yml
index 8049ef7ec..6896055ae 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_constraints_hana.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_constraints_hana.yml
@@ -1,7 +1,6 @@
---
-# Reminder: This file is included in a loop over a dictionary.
+# Included in: tasks/main.yml
-# Start the VIP(s) only after the HANA resource has been promoted
- name: "SAP HA Prepare Pacemaker - Add order constraint: Primary VIP starts after DB is promoted"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_constraints_order: "{{ __sap_ha_pacemaker_cluster_constraints_order + [__constraint_order_vip] }}"
@@ -11,10 +10,69 @@
id: "{{ sap_ha_pacemaker_cluster_hana_resource_clone_name }}"
action: promote
resource_then:
- id: "{{ vip_list_item.key }}"
+ id: "{{ __res_or_grp }}"
action: start
+
+ # Check if there is
+ # - a group for the hana_primary VIP/HC resources
+ # - otherwise check if the VIP resource for hana_primary is defined
+ # - otherwise check for a HC resource for hana_primary
+ # and use the found resource in the constraint.
+ __res_or_grp: |-
+ {% if sap_ha_pacemaker_cluster_vip_group_prefix | length > 0 and
+ __sap_ha_pacemaker_cluster_resource_groups | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_group_prefix + sap_ha_pacemaker_cluster_vip_hana_primary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_group_prefix }}{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_hana_primary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name }}
+ {%- else -%}
+ none_found
+ {%- endif -%}
+
+ when:
+ - __constraint_order_vip.resource_then not in (__sap_ha_pacemaker_cluster_constraints_order | map(attribute='resource_then'))
+ - __res_or_grp != 'none_found' # fallback skip if there was neither a group nor any VIP/HC resources found
+
+- name: "SAP HA Prepare Pacemaker - Add order constraint: Read-only VIP starts after DB on the secondary"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_constraints_order: "{{ __sap_ha_pacemaker_cluster_constraints_order + [__constraint_order_vip] }}"
+ vars:
+ __constraint_order_vip:
+ resource_first:
+ id: "{{ sap_ha_pacemaker_cluster_hana_resource_clone_name }}"
+ action: start
+ resource_then:
+ id: "{{ __res_or_grp }}"
+ action: start
+
+ # Check if there is
+ # - a group for the hana_secondary VIP/HC resources
+ # - otherwise check if the VIP resource for hana_secondary is defined
+ # - otherwise check for a HC resource for hana_secondary
+ # and use the found resource in the constraint.
+ __res_or_grp: |-
+ {% if sap_ha_pacemaker_cluster_vip_group_prefix | length > 0 and
+ __sap_ha_pacemaker_cluster_resource_groups | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_group_prefix + sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_group_prefix }}{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name }}
+ {%- else -%}
+ none_found
+ {%- endif -%}
+
when:
- __constraint_order_vip.resource_then not in (__sap_ha_pacemaker_cluster_constraints_order | map(attribute='resource_then'))
+ - __res_or_grp != 'none_found' # fallback skip if there was neither a group nor any VIP/HC resources found
+
# The primary VIP only runs where HANA is promoted
- name: "SAP HA Prepare Pacemaker - Add colocation constraint: Primary VIP runs where HANA is promoted"
@@ -26,12 +84,49 @@
id: "{{ sap_ha_pacemaker_cluster_hana_resource_clone_name }}"
role: promoted
resource_follower:
- id: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}"
+ id: "{{ __res_or_grp }}"
options:
- name: score
- value: 2000
+ value: "{{ __colo_score }}"
+
+ ## Group or single resource?
+ # Check if there is
+ # - a group for the hana_primary VIP/HC resources
+ # - otherwise check if the VIP resource for hana_primary is defined
+ # - otherwise check for a HC resource for hana_primary
+ # and use the found resource in the constraint.
+ __res_or_grp: |-
+ {% if sap_ha_pacemaker_cluster_vip_group_prefix | length > 0 and
+ __sap_ha_pacemaker_cluster_resource_groups | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_group_prefix + sap_ha_pacemaker_cluster_vip_hana_primary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_group_prefix }}{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_hana_primary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name }}
+ {%- else -%}
+ none_found
+ {%- endif -%}
+
+ ## When in a group, increase the default base score by adding 1000 per resource in the group.
+ __colo_score: >-
+ {% if __sap_ha_pacemaker_cluster_resource_groups | length > 0 -%}
+ {% for group in __sap_ha_pacemaker_cluster_resource_groups -%}
+ {% if group.id == (sap_ha_pacemaker_cluster_vip_group_prefix
+ + sap_ha_pacemaker_cluster_vip_hana_primary_resource_name) -%}
+ {{ (group.resource_ids | length * 1000) + sap_ha_pacemaker_cluster_constraint_colo_base_score }}
+ {%- endif %}
+ {%- endfor %}
+ {%- else -%}
+ {{ sap_ha_pacemaker_cluster_constraint_colo_base_score }}
+ {%- endif %}
+
when:
- __constraint_colo_vip.resource_follower not in (__sap_ha_pacemaker_cluster_constraints_colocation | map(attribute='resource_follower'))
+ - __res_or_grp != 'none_found' # fallback skip if there was neither a group nor any VIP/HC resources found
+
# The secondary VIP only runs where HANA is UNpromoted
- name: "SAP HA Prepare Pacemaker - Add colocation constraint: Read-only VIP runs where HANA is not promoted"
@@ -43,11 +138,46 @@
id: "{{ sap_ha_pacemaker_cluster_hana_resource_clone_name }}"
role: unpromoted
resource_follower:
- id: "{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}"
+ id: "{{ __res_or_grp }}"
options:
- name: score
- value: 2000
+ value: "{{ __colo_score }}"
+
+ # Check if there is
+ # - a group for the hana_secondary VIP/HC resources
+ # - otherwise check if the VIP resource for hana_secondary is defined
+ # - otherwise check for a HC resource for hana_secondary
+ # and use the found resource in the constraint.
+ __res_or_grp: |-
+ {% if sap_ha_pacemaker_cluster_vip_group_prefix | length > 0 and
+ __sap_ha_pacemaker_cluster_resource_groups | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_group_prefix + sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_group_prefix }}{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}
+ {%- elif __sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')
+ | select('match', sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name) -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name }}
+ {%- else -%}
+ none_found
+ {%- endif -%}
+
+ ## When in a group, increase the default base score by adding 1000 per resource in the group.
+ __colo_score: >-
+ {% if __sap_ha_pacemaker_cluster_resource_groups | length > 0 -%}
+ {% for group in __sap_ha_pacemaker_cluster_resource_groups -%}
+ {% if group.id == (sap_ha_pacemaker_cluster_vip_group_prefix
+ + sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name) -%}
+ {{ (group.resource_ids | length * 1000) + sap_ha_pacemaker_cluster_constraint_colo_base_score }}
+ {%- endif %}
+ {%- endfor %}
+ {%- else -%}
+ {{ sap_ha_pacemaker_cluster_constraint_colo_base_score }}
+ {%- endif %}
+
when:
- __constraint_colo_vip.resource_follower not in (__sap_ha_pacemaker_cluster_constraints_colocation | map(attribute='resource_follower'))
- sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address is defined
- sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address != ''
+ - __res_or_grp != 'none_found' # fallback skip if there was neither a group nor any VIP/HC resources found
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_groups.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_groups.yml
new file mode 100644
index 000000000..18c634f19
--- /dev/null
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_groups.yml
@@ -0,0 +1,54 @@
+---
+# Currently this is only used for HANA scenarios.
+- name: "SAP HA Prepare Pacemaker - Group the related VIP and healthcheck resources together"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_resource_groups: "{{ __sap_ha_pacemaker_cluster_resource_groups + [__vip_group] }}"
+ vars:
+ __vip_group:
+ id: "{{ sap_ha_pacemaker_cluster_vip_group_prefix }}{{ group_item.name }}"
+ resource_ids: "{{ group_item.vip_hc_resources }}"
+
+ __instance:
+ - name: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}"
+ vip_hc_resources:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name }}"
+ - name: "{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}"
+ vip_hc_resources:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name }}"
+ - name: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name }}"
+ vip_hc_resources:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_resource_name }}"
+ - name: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name }}"
+ vip_hc_resources:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_resource_name }}"
+ - name: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name }}"
+ vip_hc_resources:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_resource_name }}"
+ - name: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name }}"
+ vip_hc_resources:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_resource_name }}"
+ loop: "{{ __instance }}"
+ loop_control:
+ loop_var: group_item
+ label: "{{ sap_ha_pacemaker_cluster_vip_group_prefix }}{{ group_item.name }}"
+ when:
+ - group_item.vip_hc_resources | difference(__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id')) | length == 0
+ - sap_ha_pacemaker_cluster_vip_group_prefix | length > 0
+ - __vip_group.id not in __sap_ha_pacemaker_cluster_resource_groups | map(attribute='id')
+# Only create the group when
+# - all resources in the 'vip_hc_resources' sub-list are part of the cluster resource definition
+# - a group name prefix has been set (default = '')
+# - the group resource does not exist yet in the definition (by group name)
+
+- name: "SAP HA Prepare Pacemaker - Display VIP resource group definition if any were built"
+ ansible.builtin.debug:
+ var: __sap_ha_pacemaker_cluster_resource_groups
+ when:
+ - __sap_ha_pacemaker_cluster_resource_groups is defined
+ - __sap_ha_pacemaker_cluster_resource_groups | length > 0
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_resources_default.yml b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_resources_default.yml
index fda53ca9b..b274fbda9 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_resources_default.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/construct_vars_vip_resources_default.yml
@@ -1,14 +1,20 @@
---
# Reminder: This file is included in a loop over a dictionary.
+# Included in: tasks/include_construct_vip_resources.yml
+#
+# file loop var: vip_list_item
+#
+# Example:
+# {{ vip_list_item.key }} => vip_SID_00_primary
+# {{ vip_list_item.value }} => 192.168.1.10
-# VIP resource definition itself
-- name: "SAP HA Prepare Pacemaker - Add resource: VIP {{ vip_list_item.key }} ({{ sap_ha_pacemaker_cluster_vip_resource_agent }})"
+- name: "SAP HA Prepare Pacemaker - Add resource: VIP {{ vip_list_item.key }} (IPaddr2)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__resource_vip] }}"
vars:
__resource_vip:
id: "{{ vip_list_item.key }}"
- agent: "{{ sap_ha_pacemaker_cluster_vip_resource_agent }}"
+ agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents['ipaddr'].agent }}"
instance_attrs:
- attrs:
- name: ip
@@ -16,5 +22,7 @@
- name: nic
value: "{{ sap_ha_pacemaker_cluster_vip_client_interface }}"
when:
- - __sap_ha_pacemaker_cluster_vip_resource_id not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - '"IPaddr2" in sap_ha_pacemaker_cluster_vip_resource_agent'
+ - vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
+ - sap_ha_pacemaker_cluster_vip_method == 'ipaddr' or
+ (__sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with is defined and
+ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with == 'ipaddr')
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/import_hacluster_vars_from_inventory.yml b/roles/sap_ha_pacemaker_cluster/tasks/import_hacluster_vars_from_inventory.yml
index a2ca8de87..b3c5ad290 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/import_hacluster_vars_from_inventory.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/import_hacluster_vars_from_inventory.yml
@@ -31,7 +31,7 @@
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_cluster_properties: "{{ ha_cluster_cluster_properties }}"
-#__sap_ha_pacemaker_cluster_resource_groups ha_cluster_constraints_colocation
+# __sap_ha_pacemaker_cluster_resource_groups ha_cluster_constraints_colocation
- name: "SAP HA Prepare Pacemaker - (ha_cluster) Register parameter 'ha_cluster_constraints_colocation'"
when: ha_cluster_constraints_colocation is defined
ansible.builtin.set_fact:
@@ -92,3 +92,9 @@
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ ha_cluster_resource_primitives }}"
no_log: true # be paranoid, there could be credentials in it
+
+# ha_cluster_totem
+- name: "SAP HA Prepare Pacemaker - (ha_cluster) Register parameter 'ha_cluster_totem'"
+ when: ha_cluster_totem is defined
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_corosync_totem: "{{ ha_cluster_totem }}"
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/include_construct_vip_resources.yml b/roles/sap_ha_pacemaker_cluster/tasks/include_construct_vip_resources.yml
index 2ef69df6e..6017e8bda 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/include_construct_vip_resources.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/include_construct_vip_resources.yml
@@ -1,37 +1,94 @@
---
-# The VIP resource construction files are included in a loop to allow
-# for multiple IPs to be configured in cluster resources
-
+# For the sake of readability and maintainability, suppress cosmetical ansible-lint warnings.
- name: "SAP HA Prepare Pacemaker - Make a list of potential VIP definitions"
ansible.builtin.set_fact:
- __sap_ha_pacemaker_cluster_all_vip_fact:
+ __sap_ha_pacemaker_cluster_all_vip_fact: # noqa: jinja[spacing]
hana_scaleup_perf: "{{
{
- sap_ha_pacemaker_cluster_vip_hana_primary_resource_name: sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | regex_replace('/.*', ''),
- sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name: sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address | regex_replace('/.*', '')
+ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name:
+ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | regex_replace('/.*', ''),
+ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name:
+ sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address | regex_replace('/.*', ''),
+ sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name:
+ sap_ha_pacemaker_cluster_healthcheck_hana_primary_port,
+ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name:
+ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port
} }}"
nwas_abap_ascs_ers: "{{
{
- sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name: sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | regex_replace('/.*', ''),
- sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name: sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | regex_replace('/.*', '')
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name:
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | regex_replace('/.*', ''),
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name:
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | regex_replace('/.*', ''),
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_resource_name:
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port,
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_resource_name:
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port
} }}"
nwas_abap_pas_aas: "{{
{
- sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name: sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | regex_replace('/.*', ''),
- sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name: sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | regex_replace('/.*', '')
+ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name:
+ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | regex_replace('/.*', ''),
+ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name:
+ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | regex_replace('/.*', ''),
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_resource_name:
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_port,
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_resource_name:
+ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_port
} }}"
+### Maintenance note
+#
+# The above task returns the following structure with a definition for HANA and ASCS/ERS,
+# example for platform "MS Azure":
+#
+# __sap_ha_pacemaker_cluster_all_vip_fact:
+# hana_scaleup_perf:
+# vip_HAN_10_primary: 192.168.1.10
+# vip_HAN_10_readonly:
+# hc_vip_HAN_10_primary: 62610
+# hc_vip_HAN_10_readonly: 0
+# nwas_abap_ascs_ers:
+# vip_NW1_20_ascs: 192.168.2.20
+# vip_NW2_30_ers: 192.168.2.30
+# hc_vip_NW1_20_ascs: 62620
+# hc_vip_NW2_30_ers: 62630
+# nwas_abap_pas_aas:
+# vip___pas:
+# vip___aaas:
+# hc_vip___pas: 0
+# hc_vip___aas: 0
+
- name: "SAP HA Prepare Pacemaker - Combine VIP parameters"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_vip_resource_definition:
"{{ __sap_ha_pacemaker_cluster_vip_resource_definition | default({})
- | combine(__sap_ha_pacemaker_cluster_all_vip_fact[vip_item]) }}"
+ | combine(__sap_ha_pacemaker_cluster_all_vip_fact[vip_item])
+ | dict2items | rejectattr('value', 'equalto', '') | list | items2dict }}"
loop: "{{ sap_ha_pacemaker_cluster_host_type }}"
loop_control:
loop_var: vip_item
+### Maintenance note
+#
+# The above task reduces the previous dictionary to the contents matching the target
+# host type definition. It reduces the VIP/HC resources list to only those with values.
+# Example for NW ASCS/ERS:
+#
+# __sap_ha_pacemaker_cluster_vip_resource_definition:
+# nwas_abap_ascs_ers:
+# vip_NW1_20_ascs: 192.168.2.20
+# vip_NW2_30_ers: 192.168.2.30
+# hc_vip_NW1_20_ascs: 62620
+# hc_vip_NW2_30_ers: 62630
+
+
+# The VIP resource construction files are included in a loop to allow
+# for multiple IPs to be configured in cluster resources
# Repeat the VIP resource definition in a loop over the above combined possible parameters.
+# Applies to systems with no particular platform detected.
+# VIP resources creation only.
- name: "SAP HA Prepare Pacemaker - Include variable construction for standard VIP resources"
ansible.builtin.include_tasks: construct_vars_vip_resources_default.yml
loop: "{{ query('dict', __sap_ha_pacemaker_cluster_vip_resource_definition) }}"
@@ -40,11 +97,13 @@
loop_var: vip_list_item
label: "{{ vip_list_item.key }} - {{ vip_list_item.value }}"
when:
- - sap_ha_pacemaker_cluster_vip_method == 'ipaddr'
- - vip_list_item.value | length > 0
-
+ - __sap_ha_pacemaker_cluster_platform != 'cloud_gcp_ce_vm' # custom IPaddr2 task per platform
+ - __sap_ha_pacemaker_cluster_platform != 'cloud_ibmcloud_powervs' # custom IPaddr2 task per platform
+ - __sap_ha_pacemaker_cluster_platform != 'cloud_ibmcloud_vs' # no IPaddr2 resource creation, even when an IP is defined
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_vip_resource_list
-- name: "SAP HA Prepare Pacemaker - Include variable construction for platform VIP resources"
+# Platform custom VIP and/or health check resources creation.
+- name: "SAP HA Prepare Pacemaker - Include variable construction for platform VIP methods"
ansible.builtin.include_tasks: "platform/construct_vars_vip_resources_{{ __sap_ha_pacemaker_cluster_platform }}.yml"
loop: "{{ query('dict', __sap_ha_pacemaker_cluster_vip_resource_definition) }}"
loop_control:
@@ -52,18 +111,12 @@
loop_var: vip_list_item
label: "{{ vip_list_item.key }} - {{ vip_list_item.value }}"
when:
- - sap_ha_pacemaker_cluster_vip_method != 'ipaddr'
- - vip_list_item.value | length > 0
+ - __sap_ha_pacemaker_cluster_platform | length > 0
-- name: "SAP HA Prepare Pacemaker - Include variable construction for SAP Hana VIP constraints"
+# Group VIP and healthcheck resources if applicable.
+- name: "SAP HA Prepare Pacemaker - Include VIP and healthcheck group creation"
ansible.builtin.include_tasks:
- file: construct_vars_vip_constraints_hana.yml
- loop: "{{ query('dict', __sap_ha_pacemaker_cluster_vip_resource_definition) }}"
- loop_control:
- index_var: loop_index
- loop_var: vip_list_item
- label: "{{ vip_list_item.key }} - {{ vip_list_item.value }}"
+ file: construct_vars_vip_groups.yml
when:
- - sap_ha_pacemaker_cluster_host_type | select('search', 'hana') | length > 0
- - vip_list_item.value != ''
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'hana_scaleup') | length > 0
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/main.yml b/roles/sap_ha_pacemaker_cluster/tasks/main.yml
index a235bd0ec..e62864c79 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/main.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/main.yml
@@ -10,7 +10,7 @@
# 5. Include LSR "ha_cluster" with the constructed parameters as role arguments
# 6. Add solution specific config
# * srhook
-# 7. Save LSR "ha_cluster" input parameters in .yml file for easy re-use
+# 7. Save LSR "ha_cluster" input parameters in .yml file for easy reuse
# TODO: Build all resource/constraint configuration variables based on
# simpler user input (think: drop-down options in a UI)
@@ -94,6 +94,11 @@
when:
- "'nwas_java' in nwas_build_item"
+# Include constraints construction after the related resources were constructed.
+- name: "SAP HA Prepare Pacemaker - Include variable construction for SAP Hana VIP constraints"
+ ansible.builtin.include_tasks:
+ file: construct_vars_vip_constraints_hana.yml
+
# All of the SAP HA role constructed parameters must be translated to
# 'ha_cluster' Linux System Role parameters.
- name: "SAP HA Prepare Pacemaker - Translate all parameters to 'ha_cluster' input variables"
@@ -153,17 +158,14 @@
dest: /root/cib_backup.xml
group: root
owner: root
- mode: 0600
+ mode: "0600"
# Cluster installation and configuration through the dedicated
# linux system role 'ha_cluster'
- name: "SAP HA Install Pacemaker - Include System Role 'ha_cluster'"
- ansible.builtin.include_role:
+ ansible.builtin.import_role:
name: "{{ sap_ha_pacemaker_cluster_system_roles_collection }}.ha_cluster"
- apply:
- tags: ha_cluster
- no_log: true # some parameters contain secrets
- tags: ha_cluster
+ no_log: "{{ __sap_ha_pacemaker_cluster_no_log }}" # some parameters contain secrets
# Resource defaults settings were added to "ha_cluster" in Apr 2023 (GH version 1.9.0)
# https://github.com/linux-system-roles/ha_cluster#ha_cluster_resource_defaults
@@ -190,6 +192,31 @@
loop_control:
label: "{{ item.key }}={{ item.value }}"
run_once: true
+ changed_when: true
+
+ # Corosync post-inst
+ - name: "SAP HA Install Pacemaker - Make sure corosync systemd directory exists"
+ ansible.builtin.file:
+ path: /etc/systemd/system/corosync.service.d
+ state: directory
+ mode: '0755'
+ when:
+ - __sap_ha_pacemaker_cluster_platform == 'cloud_gcp_ce_vm'
+
+ - name: "SAP HA Install Pacemaker - Corosync systemd configuration"
+ ansible.builtin.copy:
+ backup: true
+ content: |-
+ [Service]
+ ExecStartPre=/bin/sleep 60
+
+ dest: /etc/systemd/system/corosync.service.d/override.conf
+ group: root
+ owner: root
+ mode: '0644'
+ when:
+ - __sap_ha_pacemaker_cluster_platform == 'cloud_gcp_ce_vm'
+ notify: "Reload systemd daemon"
- name: "SAP HA Install Pacemaker - Include srHook configuration"
ansible.builtin.include_tasks:
@@ -213,7 +240,7 @@
# Save all the constructed cluster parameters into a vars file.
#
-# This will help re-using ha_cluster afterwards without loosing the already
+# This will help re-using ha_cluster afterwards without losing the already
# configured resources and constraints.
# The ha_cluster role will otherwise remove configuration that is not part
# of the parameters provided during any subsequent run outside of the current
@@ -229,7 +256,7 @@
ansible.builtin.template:
backup: true
dest: "{{ sap_ha_pacemaker_cluster_create_config_dest }}"
- mode: 0600
+ mode: "0600"
src: cluster_create_config.j2
trim_blocks: true
lstrip_blocks: true
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/ascertain_platform_type.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/ascertain_platform_type.yml
index a12ee9c30..95196df6f 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/ascertain_platform_type.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/ascertain_platform_type.yml
@@ -41,13 +41,16 @@
# TODO: detection based on multiple facts and providing one standard
# name for use as platform type in related include files
-# cloud_aliyun_ecs_vm, cloud_aws_ec2_vs, cloud_gcp_ce_vm, cloud_ibmcloud_powervs, cloud_ibmcloud_vs, cloud_msazure_vm, hyp_ibmpower_vm, hyp_redhat_ocp_virt_vm, hyp_redhat_rhel_kvm_vm, hyp_vmware_vsphere_vm
+# cloud_aliyun_ecs_vm, cloud_aws_ec2_vs, cloud_gcp_ce_vm, cloud_ibmcloud_powervs, cloud_ibmcloud_vs,
+# cloud_msazure_vm, hyp_ibmpower_vm, hyp_redhat_ocp_virt_vm, hyp_redhat_rhel_kvm_vm,
+# hyp_vmware_vsphere_vm
- name: "SAP HA Prepare Pacemaker - Check if platform is Amazon Web Services EC2 Virtual Server"
when:
- '"amazon" in ansible_system_vendor | lower
- or "amazon" in ansible_product_name | lower'
+ or "amazon" in ansible_product_name | lower
+ or "amazon" in ansible_product_version | lower'
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_platform: cloud_aws_ec2_vs
@@ -74,6 +77,7 @@
ansible.builtin.shell: |
set -o pipefail && rpm -qa | grep -E -e "rsct.basic"
register: __sap_ha_pacemaker_cluster_power_rsct_check
+ changed_when: false
when: ansible_architecture == "ppc64le"
- name: "SAP HA Prepare Pacemaker - Check if platform is IBM Power - RSCT binary check"
@@ -87,6 +91,7 @@
ansible.builtin.shell: |
/opt/rsct/bin/ctgethscid
register: __sap_ha_pacemaker_cluster_power_rsct_hscid
+ changed_when: false
when:
- ansible_architecture == "ppc64le"
- __sap_ha_pacemaker_cluster_power_rsct_check.stdout != ""
@@ -105,18 +110,11 @@
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_platform: hyp_ibmpower_vm
-#- name: "SAP HA Prepare Pacemaker - Check if platform is VMware vSphere"
-# when:
-# - ansible_virtualization_type == 'VMware'
-# ansible.builtin.set_fact:
-# __sap_ha_pacemaker_cluster_platform: hyp_vmware_vsphere_vm
-
-- name: "SAP HA Prepare Pacemaker - Include platform specific tasks - MS Azure VM - Ensure socat binary installed"
- ansible.builtin.package:
- name:
- - socat
- state: present
- when: __sap_ha_pacemaker_cluster_platform == "cloud_msazure_vm"
+# - name: "SAP HA Prepare Pacemaker - Check if platform is VMware vSphere"
+# when:
+# - ansible_virtualization_type == 'VMware'
+# ansible.builtin.set_fact:
+# __sap_ha_pacemaker_cluster_platform: hyp_vmware_vsphere_vm
# Call tasks to discover information that is needed as input for any further steps.
# Run this before including the platform vars in order to build vars based on the
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_aws_ec2_vs.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_aws_ec2_vs.yml
index 0a178b04e..0dbcdf44f 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_aws_ec2_vs.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_aws_ec2_vs.yml
@@ -1,26 +1,19 @@
---
# Reminder: This file is included in a loop over a dictionary.
+# Included in: tasks/include_construct_vip_resources.yml
+#
+# file loop var: vip_list_item
+#
+# Example:
+# {{ vip_list_item.key }} => vip_SID_00_primary
+# {{ vip_list_item.value }} => 192.168.1.10
-- name: "SAP HA Prepare Pacemaker - ipaddr resource agent - Add resource: OS network interface Virtual IP (dev/test only)"
- ansible.builtin.set_fact:
- __sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__resource_vip] }}"
- vars:
- __resource_vip:
- id: "{{ vip_list_item.key }}"
- agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].agent }}"
- instance_attrs:
- - attrs:
- - name: ip
- value: "{{ vip_list_item.value }}"
- - name: nic
- value: "{{ sap_ha_pacemaker_cluster_vip_client_interface }}"
- when:
- - vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - (sap_ha_pacemaker_cluster_vip_method == 'ipaddr') or
- (__sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with is defined and
- 'ipaddr' in __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with)
-
-- name: "SAP HA Prepare Pacemaker - awsvip resource agent - Add resource: AWS floating IP (dev/test only)"
+# 1. Using the IPaddr2 resource agent is not recommended nor supported
+# 2. IPaddr2 resources would be created by construct_vars_vip_resources_default.yml
+#
+# Using 'awsvip' is also not recommended or supported officially for SAP setups.
+# This is for testing purpose only and works together with the standard IPaddr2 resource.
+- name: "SAP HA Prepare Pacemaker - AWS EC2 VS - Add resource: AWS floating IP: {{ vip_list_item.key }} (awsvip - dev/test only!)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__resource_vip] }}"
vars:
@@ -34,21 +27,24 @@
when:
- ('pri_' ~ vip_list_item.key) not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- sap_ha_pacemaker_cluster_vip_method == 'awsvip'
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_vip_resource_list
-- name: "SAP HA Prepare Pacemaker - awsvip resource agent - Add resource group for VIP resources (dev/test only)"
+- name: "SAP HA Prepare Pacemaker - AWS EC2 VS - Add resource group for VIP resources (dev/test only)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_groups: "{{ __sap_ha_pacemaker_cluster_resource_groups + [__vip_group] }}"
vars:
__vip_group:
- id: "{{ sap_ha_pacemaker_cluster_vip_resource_group_name }}_{{ vip_list_item.key }}"
+ id: "{{ sap_ha_pacemaker_cluster_vip_group_prefix }}{{ vip_list_item.key }}"
resource_ids:
- "{{ vip_list_item.key }}"
- "pri_{{ vip_list_item.key }}"
when:
- __vip_group.id is not in (__sap_ha_pacemaker_cluster_resource_groups | map(attribute='id'))
- sap_ha_pacemaker_cluster_vip_method in ['awsvip']
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_vip_resource_list
-- name: "SAP HA Prepare Pacemaker - aws_vpc_move_ip resource agent - Add resource: AWS VIP OverlayIP"
+# The following is the officially recommended and supported method for VIP resources on AWS EC2.
+- name: "SAP HA Prepare Pacemaker - AWS EC2 VS - Add resource: AWS VIP OverlayIP: {{ vip_list_item.key }} (aws_vpc_move_ip)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__resource_vip] }}"
vars:
@@ -66,3 +62,4 @@
when:
- vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- sap_ha_pacemaker_cluster_vip_method == 'aws_vpc_move_ip'
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_vip_resource_list
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_gcp_ce_vm.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_gcp_ce_vm.yml
index 39272c9a6..2f5d3d600 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_gcp_ce_vm.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_gcp_ce_vm.yml
@@ -1,7 +1,14 @@
---
# Reminder: This file is included in a loop over a dictionary.
+# Included in: tasks/include_construct_vip_resources.yml
+#
+# file loop var: vip_list_item
+#
+# Example:
+# {{ vip_list_item.key }} => vip_SID_00_primary
+# {{ vip_list_item.value }} => 192.168.1.10
-- name: "SAP HA Prepare Pacemaker - ipaddr resource agent - Add resource: OS network interface Virtual IP"
+- name: "SAP HA Prepare Pacemaker - GCP CE - Add resource: OS network interface Virtual IP: {{ vip_list_item.key }} (IPaddr2)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__resource_vip] }}"
vars:
@@ -16,19 +23,26 @@
value: 32
- name: nic
value: "{{ sap_ha_pacemaker_cluster_vip_client_interface }}"
+ operations:
+ - action: monitor
+ attrs:
+ - name: interval
+ value: 3600
+
when:
- vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - (sap_ha_pacemaker_cluster_vip_method == 'ipaddr') or
+ - sap_ha_pacemaker_cluster_vip_method == 'ipaddr' or
(__sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with is defined and
- 'ipaddr' in __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with)
+ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with == 'ipaddr')
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_vip_resource_list
-- name: "SAP HA Prepare Pacemaker - haproxy resource agent - Add resource: Google Cloud Load Balancing Internal passthrough Network Load Balancer (NLB L-4) for VIP routing when SAP HANA scale-up HA"
+- name: "SAP HA Prepare Pacemaker - GCP CE - Add resource: Network Load Balancer (NLB L-4) for VIP routing: {{ vip_list_item.key }} (haproxy)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__health_check] }}"
vars:
__health_check:
- id: "hc_{{ vip_list_item.key }}"
- agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].agent }}"
+ id: "{{ vip_list_item.key }}"
+ agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].agent + '@' + __haproxy_id }}"
operations:
- action: monitor
attrs:
@@ -36,8 +50,26 @@
value: 10
- name: timeout
value: 20
+
+ __haproxy_id: >-
+ {% if vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name
+ and sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port | length > 4 -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_id }}
+ {%- else -%}
+ {%- endif %}
+
when:
- - __health_check.id is not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - ('hc_' + vip_list_item.key) not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
+ - vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- sap_ha_pacemaker_cluster_vip_method == 'gcp_nlb_reserved_ip_haproxy'
- - sap_ha_pacemaker_cluster_host_type | select('search', 'hana') | length > 0
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_healthcheck_resource_list
+ - __haproxy_id | length > 0
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_ibmcloud_powervs.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_ibmcloud_powervs.yml
index 97b662d67..01d2cf3b0 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_ibmcloud_powervs.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_ibmcloud_powervs.yml
@@ -1,21 +1,24 @@
---
# Reminder: This file is included in a loop over a dictionary.
-- name: "SAP HA Prepare Pacemaker - ipaddr resource agent - Add resource: OS network interface Virtual IP"
+- name: "SAP HA Prepare Pacemaker - IBM Cloud PowerVS - Add resource: OS network interface Virtual IP: {{ vip_list_item.key }} (IPaddr2)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__resource_vip] }}"
vars:
__resource_vip:
id: "{{ vip_list_item.key }}"
- agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents['ipaddr'].agent }}"
+ agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents['ipaddr_custom'].agent }}"
instance_attrs:
- attrs:
- name: ip
value: "{{ vip_list_item.value }}"
+ - name: cidr_netmask
+ value: "{{ __sap_ha_pacemaker_cluster_vip_client_interface_subnet_cidr.stdout | int }}"
- name: nic
value: "{{ sap_ha_pacemaker_cluster_vip_client_interface }}"
when:
- vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - (sap_ha_pacemaker_cluster_vip_method == 'ipaddr') or
+ - sap_ha_pacemaker_cluster_vip_method == 'ipaddr_custom' or
(__sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with is defined and
- 'ipaddr' in __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with)
+ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with == 'ipaddr_custom')
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_vip_resource_list
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_ibmcloud_vs.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_ibmcloud_vs.yml
new file mode 100644
index 000000000..57137edbc
--- /dev/null
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_ibmcloud_vs.yml
@@ -0,0 +1,58 @@
+---
+# Reminder: This file is included in a loop over a dictionary.
+# Included in: tasks/include_construct_vip_resources.yml
+#
+# file loop var: vip_list_item
+#
+# Example for VIP definition:
+# {{ vip_list_item.key }} => vip_SID_00_primary
+# {{ vip_list_item.value }} => 192.168.1.10
+#
+# Example for HC definition:
+# {{ vip_list_item.key }} => hc_vip_SID_00_primary
+# {{ vip_list_item.value }} => 62600
+
+
+- name: "SAP HA Prepare Pacemaker - IBM Cloud VS - Add resource: Load Balancer Private ALB L-7 (or NLB L-4) for VIP routing: {{ vip_list_item.key }} (haproxy)"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__health_check] }}"
+ vars:
+ __health_check:
+ id: "{{ vip_list_item.key }}"
+ agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].agent + '@' + __haproxy_id }}"
+ operations:
+ - action: monitor
+ attrs:
+ - name: interval
+ value: 10
+ - name: timeout
+ value: 20
+
+ __haproxy_id: >-
+ {% if vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name
+ and sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port | length > 4 -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_id }}
+ {%- elif vip_list_item.key == sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_resource_name -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_id }}
+ {%- else -%}
+ {%- endif %}
+
+ when:
+ - __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].agent == 'service:haproxy'
+ - vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
+ - vip_list_item.key in __sap_ha_pacemaker_cluster_healthcheck_resource_list
+ - __haproxy_id | length > 0
+
+# IBM Cloud Load Balancer will own and control the Virtual IP (VIP) routing and failover
+# When Linux Pacemaker failover to secondary occurs, the primary will cease Health Check response to IBM Cloud Load Balancer,
+# the secondary will emit Health Check response to IBM Cloud Load Balancer,
+# and then the IBM Cloud Load Balancer will automatically route traffic to secondary;
+# therefore Linux Pacemaker does not require a Resource (e.g. IPAddr2 Resource Agent) allocated for the VIP
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_msazure_vm.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_msazure_vm.yml
index 117a8439f..255a2b420 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_msazure_vm.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/construct_vars_vip_resources_cloud_msazure_vm.yml
@@ -1,77 +1,48 @@
---
# Reminder: This file is included in a loop over a dictionary.
+# Included in: tasks/include_construct_vip_resources.yml
+#
+# file loop var: vip_list_item
+#
+# Example:
+# {{ vip_list_item.key }} => vip_SID_00_primary
+# {{ vip_list_item.value }} => 192.168.1.10
+#
+# Example for HC definition:
+# {{ vip_list_item.key }} => hc_vip_SID_00_primary
+# {{ vip_list_item.value }} => 62600
-- name: "SAP HA Prepare Pacemaker - ipaddr resource agent - Add resource: OS network interface Virtual IP"
- ansible.builtin.set_fact:
- __sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__resource_vip] }}"
- vars:
- __resource_vip:
- id: "{{ vip_list_item.key }}"
- agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents['ipaddr'].agent }}"
- instance_attrs:
- - attrs:
- - name: ip
- value: "{{ vip_list_item.value }}"
- - name: nic
- value: "{{ sap_ha_pacemaker_cluster_vip_client_interface }}"
- when:
- - vip_list_item.key not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - (sap_ha_pacemaker_cluster_vip_method == 'ipaddr') or
- (__sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with is defined and
- 'ipaddr' in __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].with)
-
-- name: "SAP HA Prepare Pacemaker - azure_lb resource agent - Verify socat binary path"
- ansible.builtin.shell: |
- which socat
- register: __sap_ha_pacemaker_cluster_register_socat_path
- changed_when: false
+# Azure requires 2 resources for the floating IP setup.
+#
+# 1) resource type "IPaddr2"
+# => is already created through construct_vars_vip_resources_default.yml
-- name: "SAP HA Prepare Pacemaker - azure_lb resource agent - Define load balancer port for health check when SAP HANA scale-up HA"
- ansible.builtin.set_fact:
- __sap_ha_pacemaker_cluster_resource_lb_port: "626{{ sap_ha_pacemaker_cluster_hana_instance_nr }}"
- when: "'hana_scaleup' in sap_ha_pacemaker_cluster_host_type[0]" # REPLACE with substring in any of the strings contained in the list
-
-- name: "SAP HA Prepare Pacemaker - azure_lb resource agent - Add resource: Azure Load Balancer (NLB L-4) for VIP routing when SAP HANA scale-up HA"
- ansible.builtin.set_fact:
- __sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__health_check] }}"
- vars:
- __health_check:
- id: "hc_{{ vip_list_item.key }}"
- agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].agent }}"
- instance_attrs:
- - attrs:
- - name: port
- value: "{{ __sap_ha_pacemaker_cluster_resource_lb_port | default(0) }}" # Add default to ensure skip without errors
- - name: nc
- value: "{{ __sap_ha_pacemaker_cluster_register_socat_path.stdout }}"
- when:
- - __health_check.id is not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - ('hc_' ~ vip_list_item.key) not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - sap_ha_pacemaker_cluster_vip_method == 'azure_lb'
- - "'hana_scaleup' in sap_ha_pacemaker_cluster_host_type[0]" # REPLACE with substring in any of the strings contained in the list
+# 2) resource type "azure_lb" (requires 'socat')
+#- name: "SAP HA Prepare Pacemaker - MS Azure - Verify socat binary path"
+# ansible.builtin.shell: |
+# which socat
+# register: __sap_ha_pacemaker_cluster_register_socat_path
+# changed_when: false
-- name: "SAP HA Prepare Pacemaker - azure_lb resource agent - Add resource: Azure Load Balancer (NLB L-4) for VIP routing when SAP NetWeaver HA - ASCS and ERS"
+# Hint: the 'vip_list_item's will only be the ones matching the cluster host type.
+- name: "SAP HA Prepare Pacemaker - MS Azure - Add resource: Azure Load Balancer (NLB L-4) for VIP routing: {{ vip_list_item.key }} (azure_lb)"
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_resource_primitives: "{{ __sap_ha_pacemaker_cluster_resource_primitives + [__health_check] }}"
vars:
__health_check:
- id: "hc_{{ vip_list_item.key }}"
+ id: "{{ vip_list_item.key }}"
agent: "{{ __sap_ha_pacemaker_cluster_available_vip_agents[sap_ha_pacemaker_cluster_vip_method].agent }}"
instance_attrs:
- attrs:
- name: port
- value: "{{ lb_port_for_hc | default(0) }}" # Add default to ensure skip without errors
- - name: nc
- value: "{{ __sap_ha_pacemaker_cluster_register_socat_path.stdout }}"
- loop: ["626{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}", "626{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}"]
- loop_control:
- loop_var: lb_port_for_hc
- index_var: lb_port_for_hc_index
- label: "{{ lb_port_for_hc }}"
+ value: "{{ vip_list_item.value }}"
+# - name: nc
+# value: "{{ __sap_ha_pacemaker_cluster_register_socat_path.stdout }}"
when:
- - __health_check.id is not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- - ('hc_' ~ vip_list_item.key) not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
- sap_ha_pacemaker_cluster_vip_method == 'azure_lb'
- - "'nwas_abap_ascs_ers' in sap_ha_pacemaker_cluster_host_type[0]" # REPLACE with substring in any of the strings contained in the list
+ - __health_check.id in __sap_ha_pacemaker_cluster_healthcheck_resource_list
+ - __health_check.id not in (__sap_ha_pacemaker_cluster_resource_primitives | map(attribute='id'))
+# - __sap_ha_pacemaker_cluster_register_socat_path.stdout is defined
+# - __sap_ha_pacemaker_cluster_register_socat_path.stdout | length > 0
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/include_vars_platform.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/include_vars_platform.yml
index 2779d9d65..0a7199bc3 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/include_vars_platform.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/include_vars_platform.yml
@@ -16,14 +16,17 @@
__sap_ha_pacemaker_cluster_pcmk_host_map: |-
{% for node in ansible_play_hosts_all -%}
{{ hostvars[node].ansible_hostname }}:{{ hostvars[node].ansible_board_asset_tag }}
- {%- if not loop.last %};{% endif %}
+ {%- if not loop.last %};{% endif %}
{% endfor %}
when: __sap_ha_pacemaker_cluster_platform == "cloud_aws_ec2_vs"
- name: "SAP HA Prepare Pacemaker - IBM Power VS from IBM Cloud - Set variable for fencing agent"
ansible.builtin.set_fact:
- sap_ha_pacemaker_cluster_ibmcloud_powervs_host_guid: "{{ __sap_ha_pacemaker_cluster_register_ibmcloud_powervs_host }}"
- sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_guid: '{{ __sap_ha_pacemaker_cluster_register_ibmcloud_powervs_workspace_crn | replace("::","") | regex_replace(".*\:") }}'
+ sap_ha_pacemaker_cluster_ibmcloud_powervs_host_guid: >-
+ {{ __sap_ha_pacemaker_cluster_register_ibmcloud_powervs_host }}
+ sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_guid: >-
+ {{ sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_crn
+ | replace("::", "") | regex_replace(".*\:") }}
when: __sap_ha_pacemaker_cluster_platform == "cloud_ibmcloud_powervs"
# pcmk_host_map format: :;:...
@@ -31,16 +34,18 @@
ansible.builtin.set_fact:
__sap_ha_pacemaker_cluster_pcmk_host_map: |-
{% for node in ansible_play_hosts_all -%}
- {{ hostvars[node].ansible_hostname }}:{{ hostvars[node].sap_ha_pacemaker_cluster_ibmcloud_powervs_instance_id }}
- {%- if not loop.last %};{% endif %}
+ {{ hostvars[node].ansible_hostname }}:{{ hostvars[node].__sap_ha_pacemaker_cluster_register_ibmcloud_powervs_host.stdout }}
+ {%- if not loop.last %};{% endif %}
{% endfor %}
when: __sap_ha_pacemaker_cluster_platform == "cloud_ibmcloud_powervs"
- name: "SAP HA Prepare Pacemaker - IBM PowerVM - Set variable for fencing agent"
ansible.builtin.set_fact:
- sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_partition_name: "{{ __sap_ha_pacemaker_cluster_register_ibmpower_vm_hmc_system_partition_name.stdout }}"
- sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_host_mtms: "{{ __sap_ha_pacemaker_cluster_register_ibmpower_vm_hmc_system_host_mtms_gui_string.stdout }}"
+ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_partition_name: >-
+ {{ __sap_ha_pacemaker_cluster_register_ibmpower_vm_hmc_system_partition_name.stdout }}
+ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_host_mtms: >-
+ {{ __sap_ha_pacemaker_cluster_register_ibmpower_vm_hmc_system_host_mtms_gui_string.stdout }}
when: __sap_ha_pacemaker_cluster_platform == "hyp_ibmpower_vm"
@@ -50,7 +55,7 @@
__sap_ha_pacemaker_cluster_pcmk_host_map: |-
{% for node in ansible_play_hosts_all -%}
{{ hostvars[node].ansible_hostname }}:{{ hostvars[node].sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_partition_name }}
- {%- if not loop.last %};{% endif %}
+ {%- if not loop.last %};{% endif %}
{% endfor %}
when: __sap_ha_pacemaker_cluster_platform == "hyp_ibmpower_vm"
@@ -61,7 +66,7 @@
__sap_ha_pacemaker_cluster_pcmk_host_map: |-
{% for node in ansible_play_hosts_all -%}
{{ hostvars[node].ansible_hostname }}:{{ hostvars[node].__sap_ha_pacemaker_cluster_register_ibmcloud_vs_host.stdout }}
- {%- if not loop.last %};{% endif %}
+ {%- if not loop.last %};{% endif %}
{% endfor %}
when: __sap_ha_pacemaker_cluster_platform == "cloud_ibmcloud_vs"
@@ -72,6 +77,6 @@
__sap_ha_pacemaker_cluster_pcmk_host_map: |-
{% for node in ansible_play_hosts_all -%}
{{ hostvars[node].ansible_hostname }}:{{ hostvars[node].ansible_hostname }}
- {%- if not loop.last %};{% endif %}
+ {%- if not loop.last %};{% endif %}
{% endfor %}
when: __sap_ha_pacemaker_cluster_platform == "cloud_msazure_vm"
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_aws_ec2_vs.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_aws_ec2_vs.yml
index 98a2efa23..87c181d5d 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_aws_ec2_vs.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_aws_ec2_vs.yml
@@ -3,7 +3,7 @@
- name: "SAP HA Prepare Pacemaker - AWS EC2 VS - Create awscli config directory"
ansible.builtin.file:
- mode: 0755
+ mode: "0755"
owner: root
path: /root/.aws
state: directory
@@ -15,7 +15,7 @@
[default]
region = {{ sap_ha_pacemaker_cluster_aws_region }}
create: true
- mode: 0600
+ mode: "0600"
owner: root
path: /root/.aws/config
@@ -27,7 +27,7 @@
aws_access_key_id = {{ sap_ha_pacemaker_cluster_aws_access_key_id }}
aws_secret_access_key = {{ sap_ha_pacemaker_cluster_aws_secret_access_key }}
create: true
- mode: 0600
+ mode: "0600"
owner: root
path: /root/.aws/credentials
no_log: true
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_gcp_ce_vm.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_gcp_ce_vm.yml
index 47b401867..76e4dfc12 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_gcp_ce_vm.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_gcp_ce_vm.yml
@@ -1,39 +1,154 @@
---
-# Requirement to enable the fencing resource to function.
-- name: "SAP HA Prepare Pacemaker - GCP Compute VM - haproxy package install"
+- name: "SAP HA Install Pacemaker - GCP CE VM - Install haproxy"
ansible.builtin.package:
- name: "haproxy"
+ name: haproxy
state: present
-- name: "SAP HA Prepare Pacemaker - GCP Compute VM - haproxy listener configuration"
- ansible.builtin.blockinfile:
+- name: "SAP HA Install Pacemaker - GCP CE VM - Check if haproxy service template exists"
+ ansible.builtin.stat:
+ path: /etc/systemd/system/haproxy@.service
+ register: __sap_ha_pacemaker_cluster_register_haproxy_template
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Create haproxy service template"
+ ansible.builtin.copy:
+ dest: /etc/systemd/system/haproxy@.service
+ remote_src: true
+ src: /usr/lib/systemd/system/haproxy.service
+ mode: '0644'
+ when:
+ - not __sap_ha_pacemaker_cluster_register_haproxy_template.stat.exists
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Update haproxy service template description"
+ ansible.builtin.lineinfile:
+ backup: true
+ path: /etc/systemd/system/haproxy@.service
+ regexp: '^Description='
+ line: 'Description=HAProxy Load Balancer %i'
+ state: present
+ insertafter: '^[Unit]$'
+ notify: "systemd daemon-reload"
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Update haproxy service template environment"
+ ansible.builtin.lineinfile:
+ backup: true
+ path: /etc/systemd/system/haproxy@.service
+ regexp: '^Environment='
+ line: 'Environment="CONFIG=/etc/haproxy/haproxy-%i.cfg" "PIDFILE=/run/haproxy-%i.pid"'
state: present
- insertafter: EOF
- dest: /etc/haproxy/haproxy.cfg
- marker_begin: "---- haproxy health check listener for SAP HANA ----"
- marker_end: "----"
- content: |
- listen healthcheck_vip_hana
- bind *:60000
+ insertafter: '^[Service]$'
+ notify: "systemd daemon-reload"
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Define healthcheck details for HANA"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_healthcheck_list_hana:
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_id }}"
+ port: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_port }}"
+ # If no custom port is defined, calculate the port for the secondary
+ # by adding 10, to avoid a conflict with the port for the primary hc port.
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_id }}"
+ port: >-
+ {% if sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address | length > 0 -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port }}
+ {%- else %}0{%- endif %}
when:
- - sap_ha_pacemaker_cluster_host_type | select('search', 'hana') | length > 0
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'hana_scaleup') | length > 0
-- name: "SAP HA Prepare Pacemaker - GCP Compute VM - haproxy service start and enable"
- ansible.builtin.service:
- name: "haproxy"
- state: started
+- name: "SAP HA Install Pacemaker - GCP CE VM - Define healthcheck details for NW ASCS/ERS"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_healthcheck_list_ascs:
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_id }}"
+ port: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port }}"
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_id }}"
+ port: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port }}"
+ when:
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'nwas_abap_ascs') | length > 0
+
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Create haproxy config for HANA instances"
+ ansible.builtin.blockinfile:
+ backup: false
+ create: true
+ path: "/etc/haproxy/haproxy-{{ haproxy_item.name }}.cfg"
+ mode: "0644"
+ owner: root
+ group: root
+ marker: "# {mark} Created by Ansible role sap_ha_pacemaker_cluster"
+ block: |
+ global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy-%i.pid
+ user haproxy
+ group haproxy
+ daemon
+
+ defaults
+ mode tcp
+ log global
+ option dontlognull
+ option redispatch
+ retries 3
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout check 10s
+ maxconn 3000
+
+ # Listener for SAP healthcheck
+ listen healthcheck
+ bind *:{{ haproxy_item.port }}
+ loop: "{{ __sap_ha_pacemaker_cluster_healthcheck_list_hana }}"
+ loop_control:
+ loop_var: haproxy_item
+ label: "{{ haproxy_item.name }}: {{ haproxy_item.port }}"
+ when:
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'hana_scaleup') | length > 0
+ - haproxy_item.port | length > 4
+
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Create haproxy config for NWAS ASCS/ERS instances"
+ ansible.builtin.blockinfile:
+ create: true
+ path: "/etc/haproxy/haproxy-{{ haproxy_item.name }}.cfg"
+ mode: "0644"
+ owner: root
+ group: root
+ marker: "# {mark} Created by Ansible role sap_ha_pacemaker_cluster"
+ block: |
+ global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy-%i.pid
+ user haproxy
+ group haproxy
+ daemon
+
+ defaults
+ mode tcp
+ log global
+ option dontlognull
+ option redispatch
+ retries 3
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout check 10s
+ maxconn 3000
-- name: "SAP HA Prepare Pacemaker - haproxy resource agent - Verify haproxy binary path"
- ansible.builtin.shell: |
- which haproxy
- register: __sap_ha_pacemaker_cluster_register_haproxy_path
- changed_when: false
-
-- name: "SAP HA Prepare Pacemaker - haproxy resource agent - Verify haproxy config file listener (/etc/haproxy/haproxy.cfg) for SAP HANA"
- ansible.builtin.shell: |
- grep 'listen healthcheck_vip_hana' /etc/haproxy/haproxy.cfg
- register: __sap_ha_pacemaker_cluster_register_haproxy_config
- changed_when: false
+ # Listener for SAP healthcheck
+ listen healthcheck
+ bind *:{{ haproxy_item.port }}
+ loop: "{{ __sap_ha_pacemaker_cluster_healthcheck_list_ascs }}"
+ loop_control:
+ loop_var: haproxy_item
+ label: "{{ haproxy_item.name }}: {{ haproxy_item.port }}"
when:
- - sap_ha_pacemaker_cluster_host_type | select('search', 'hana') | length > 0
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'nwas_abap_ascs') | length > 0
+
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Ensure that haproxy service is running"
+ ansible.builtin.service:
+ name: haproxy
+ enabled: false
+ state: started
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_ibmcloud_vs.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_ibmcloud_vs.yml
index 33c4f2d5a..2547a28e9 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_ibmcloud_vs.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_ibmcloud_vs.yml
@@ -1,2 +1,198 @@
---
# Requirement to enable the fencing resource to function.
+
+- name: "SAP HA Prepare Pacemaker - IBM Cloud VS - Install haproxy"
+ ansible.builtin.package:
+ name: haproxy
+ state: present
+
+# - name: "SAP HA Prepare Pacemaker - IBM Cloud VS - Create haproxy log directory for rsyslog"
+# ansible.builtin.file:
+# path: /var/log/haproxy
+# state: directory
+# mode: '0755'
+#
+# - name: "SAP HA Prepare Pacemaker - IBM Cloud VS - Create haproxy config for rsyslog"
+# ansible.builtin.copy:
+# dest: /etc/rsyslog.d/haproxy.conf
+# mode: '0644'
+# content: |
+# # Additional socket in haproxy's chroot
+# # to allow logging via /dev/log to chroot'ed HAProxy processes
+# $AddUnixListenSocket /var/lib/haproxy/dev/log
+#
+# # Send HAProxy messages to a dedicated logfile
+# :programname,startswith,"haproxy" /var/log/haproxy/haproxy.log
+#
+# - name: "SAP HA Prepare Pacemaker - IBM Cloud VS - rsyslog service restart"
+# ansible.builtin.service:
+# name: rsyslog
+# state: restarted
+# enabled: true
+
+- name: "SAP HA Install Pacemaker - IBM Cloud VS - Check if haproxy service template exists"
+ ansible.builtin.stat:
+ path: /etc/systemd/system/haproxy@.service
+ register: __sap_ha_pacemaker_cluster_register_haproxy_template
+
+- name: "SAP HA Install Pacemaker - IBM Cloud VS - Create haproxy service template"
+ ansible.builtin.copy:
+ dest: /etc/systemd/system/haproxy@.service
+ remote_src: true
+ src: /usr/lib/systemd/system/haproxy.service
+ mode: '0644'
+ when:
+ - not __sap_ha_pacemaker_cluster_register_haproxy_template.stat.exists
+
+- name: "SAP HA Install Pacemaker - IBM Cloud VS - Update haproxy service template description"
+ ansible.builtin.lineinfile:
+ backup: true
+ path: /etc/systemd/system/haproxy@.service
+ regexp: '^Description='
+ line: 'Description=HAProxy Load Balancer %i'
+ state: present
+ insertafter: '^[Unit]$'
+ notify: "systemd daemon-reload"
+
+- name: "SAP HA Install Pacemaker - IBM Cloud VS - Update haproxy service template environment"
+ ansible.builtin.lineinfile:
+ backup: true
+ path: /etc/systemd/system/haproxy@.service
+ regexp: '^Environment='
+ line: 'Environment="CONFIG=/etc/haproxy/haproxy-%i.cfg" "PIDFILE=/run/haproxy-%i.pid"'
+ state: present
+ insertafter: '^[Service]$'
+ notify: "systemd daemon-reload"
+
+- name: "SAP HA Install Pacemaker - IBM Cloud VS - Update haproxy service template environment"
+ ansible.builtin.lineinfile:
+ backup: true
+ path: /etc/systemd/system/haproxy@.service
+ regexp: '^Environment='
+ line: 'Environment="CONFIG=/etc/haproxy/haproxy-%i.cfg" "PIDFILE=/run/haproxy-%i.pid"'
+ state: present
+ insertafter: '^[Service]$'
+ notify: "systemd daemon-reload"
+
+- name: "SAP HA Install Pacemaker - IBM Cloud VS - Define healthcheck details for HANA"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_healthcheck_list_hana:
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_id }}"
+ port: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_port }}"
+ # If no custom port is defined, calculate the port for the secondary
+ # by adding 10, to avoid a conflict with the port for the primary hc port.
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_id }}"
+ port: >-
+ {% if sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address | length > 0 -%}
+ {{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port }}
+ {%- else %}0{%- endif %}
+ when:
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'hana_scaleup') | length > 0
+
+- name: "SAP HA Install Pacemaker - IBM Cloud VS - Define healthcheck details for NW ASCS/ERS"
+ ansible.builtin.set_fact:
+ __sap_ha_pacemaker_cluster_healthcheck_list_ascs:
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_id }}"
+ port: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port }}"
+ - name: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_id }}"
+ port: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port }}"
+ when:
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'nwas_abap_ascs') | length > 0
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Create haproxy config for HANA instances"
+ ansible.builtin.blockinfile:
+ backup: false
+ create: true
+ path: "/etc/haproxy/haproxy-{{ haproxy_item.name }}.cfg"
+ mode: "0644"
+ owner: root
+ group: root
+ marker: "# {mark} Created by Ansible role sap_ha_pacemaker_cluster"
+ block: |
+ global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy-%i.pid
+ user haproxy
+ group haproxy
+ daemon
+
+ defaults
+ mode tcp
+ log global
+ option dontlognull
+ option redispatch
+ retries 3
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout check 10s
+ maxconn 3000
+
+ # Listener for SAP healthcheck
+ listen healthcheck
+ bind *:{{ haproxy_item.port }}
+ loop: "{{ __sap_ha_pacemaker_cluster_healthcheck_list_hana }}"
+ loop_control:
+ loop_var: haproxy_item
+ label: "{{ haproxy_item.name }}: {{ haproxy_item.port }}"
+ when:
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'hana_scaleup') | length > 0
+ - haproxy_item.port | length > 4
+
+- name: "SAP HA Install Pacemaker - GCP CE VM - Create haproxy config for NWAS ASCS/ERS instances"
+ ansible.builtin.blockinfile:
+ create: true
+ path: "/etc/haproxy/haproxy-{{ haproxy_item.name }}.cfg"
+ mode: "0644"
+ owner: root
+ group: root
+ marker: "# {mark} Created by Ansible role sap_ha_pacemaker_cluster"
+ block: |
+ global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy-%i.pid
+ user haproxy
+ group haproxy
+ daemon
+
+ defaults
+ mode tcp
+ log global
+ option dontlognull
+ option redispatch
+ retries 3
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout check 10s
+ maxconn 3000
+
+ # Listener for SAP healthcheck
+ listen healthcheck
+ bind *:{{ haproxy_item.port }}
+ loop: "{{ __sap_ha_pacemaker_cluster_healthcheck_list_ascs }}"
+ loop_control:
+ loop_var: haproxy_item
+ label: "{{ haproxy_item.name }}: {{ haproxy_item.port }}"
+ when:
+ - sap_ha_pacemaker_cluster_host_type | select('search', 'nwas_abap_ascs') | length > 0
+
+
+# - name: "SAP HA Prepare Pacemaker - IBM Cloud VS - haproxy listener configuration"
+# ansible.builtin.blockinfile:
+# state: present
+# insertafter: EOF
+# dest: /etc/haproxy/haproxy.cfg
+# marker_begin: "---- haproxy health check listener ----"
+# marker_end: "----"
+# content: |
+ # when:
+ # - sap_ha_pacemaker_cluster_host_type | select('search', 'hana') | length > 0
+
+- name: "SAP HA Prepare Pacemaker - IBM Cloud VS - haproxy service start (without enable on boot)"
+ ansible.builtin.service:
+ name: haproxy
+ state: started
+ enabled: false # Do not start on boot
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_msazure_vm.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_msazure_vm.yml
index 33c4f2d5a..94d09abb3 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_msazure_vm.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/preconfigure_cloud_msazure_vm.yml
@@ -1,2 +1,4 @@
---
# Requirement to enable the fencing resource to function.
+#
+# TODO: firewall config for the LB health probe port
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/platform/register_sysinfo_cloud_ibmcloud_powervs.yml b/roles/sap_ha_pacemaker_cluster/tasks/platform/register_sysinfo_cloud_ibmcloud_powervs.yml
index 63be853f6..2445e4c2d 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/platform/register_sysinfo_cloud_ibmcloud_powervs.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/platform/register_sysinfo_cloud_ibmcloud_powervs.yml
@@ -1,11 +1,30 @@
---
# Ansible facts rely on SMBIOS/DMI, which does not exist on ppc64le CPU Architecture.
+# Discovered input used for plug (via pcmk_host_map)
+# The Instance ID in the IBM Power Virtual Server Workspace, is identical string to the UUID reported in the host
# alt command using IBM Power RSCT binary: /opt/rsct/bin/ctgethscid | grep PartitionUUID | cut -d \" -f2
# alt command using cloud-init data: cat /run/cloud-init/instance-data.json | grep uuid | cut -d \" -f4
-- name: SAP HA Prepare Pacemaker - IBM Power VS from IBM Cloud - IBM Power Virtual Server instance ID
+# alt command using cloud-init data: cat /run/cloud-init/instance-data.json | grep instance_id | cut -d \" -f4
+- name: "SAP HA Prepare Pacemaker - IBM Cloud Power VS - IBM Power Virtual Server UUID"
ansible.builtin.shell: |
- set -o pipefail && echo $(tr -d '\0' < /proc/device-tree/ibm,partition-name)
- register: __sap_ha_pacemaker_cluster_register_ibmcloud_powervs_workspace_guid
+ set -o pipefail && echo $(tr -d '\0' < /proc/device-tree/ibm,partition-uuid)
+ register: __sap_ha_pacemaker_cluster_register_ibmcloud_powervs_host
+ changed_when: false
+ check_mode: false
+
+- name: "SAP HA Prepare Pacemaker - Ensure ipcalc binary installed"
+ ansible.builtin.package:
+ name:
+ - ipcalc
+ state: present
+
+- name: "SAP HA Prepare Pacemaker - IBM Cloud PowerVS - Calculate network interface subnet CIDR"
+ ansible.builtin.shell: |
+ set -o pipefail && ipcalc --prefix \
+ {{ ansible_facts[sap_ha_pacemaker_cluster_vip_client_interface].ipv4.network
+ + '/' + ansible_facts[sap_ha_pacemaker_cluster_vip_client_interface].ipv4.netmask }} \
+ | sed 's|PREFIX=||'
+ register: __sap_ha_pacemaker_cluster_vip_client_interface_subnet_cidr
changed_when: false
check_mode: false
diff --git a/roles/sap_ha_pacemaker_cluster/tasks/validate_input_parameters.yml b/roles/sap_ha_pacemaker_cluster/tasks/validate_input_parameters.yml
index ad8831114..43702c91d 100644
--- a/roles/sap_ha_pacemaker_cluster/tasks/validate_input_parameters.yml
+++ b/roles/sap_ha_pacemaker_cluster/tasks/validate_input_parameters.yml
@@ -4,7 +4,7 @@
ansible.builtin.assert:
that:
- sap_ha_pacemaker_cluster_hana_sid | length == 3
- - sap_ha_pacemaker_cluster_hana_sid not in __sap_sid_prohibited
+ - sap_ha_pacemaker_cluster_hana_sid not in __sap_ha_pacemaker_cluster_sid_prohibited
fail_msg: |
Host type = {{ sap_ha_pacemaker_cluster_host_type }}
Requires 'sap_ha_pacemaker_cluster_hana_sid' to be defined!
@@ -37,7 +37,7 @@
ansible.builtin.assert:
that:
- sap_ha_pacemaker_cluster_nwas_abap_sid | length == 3
- - sap_ha_pacemaker_cluster_nwas_abap_sid not in __sap_sid_prohibited
+ - sap_ha_pacemaker_cluster_nwas_abap_sid not in __sap_ha_pacemaker_cluster_sid_prohibited
fail_msg: |
Host type = {{ sap_ha_pacemaker_cluster_host_type }}
Requires 'sap_ha_pacemaker_cluster_nwas_abap_sid' to be defined!
@@ -117,14 +117,14 @@
when:
- sap_ha_pacemaker_cluster_host_type | select('search', 'nwas_abap_ascs') | length > 0
-- name: "SAP HA Prepare Pacemaker - (NetWeaver ERS) Verify that the VIP is defined"
- ansible.builtin.assert:
- that:
- - sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address is defined
- - sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | length > 0
- fail_msg: "Host type = '{{ sap_ha_pacemaker_cluster_host_type }}', but 'sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address' is not defined."
- when:
- - sap_ha_pacemaker_cluster_host_type | select('search', 'nwas_abap_ascs_ers') | length > 0
+# - name: "SAP HA Prepare Pacemaker - (NetWeaver ERS) Verify that the VIP is defined"
+# ansible.builtin.assert:
+# that:
+# - sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address is defined
+# - sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | length > 0
+# fail_msg: "Host type = '{{ sap_ha_pacemaker_cluster_host_type }}', but 'sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address' is not defined."
+# when:
+# - sap_ha_pacemaker_cluster_host_type | select('search', 'nwas_abap_ascs_ers') | length > 0
- name: "SAP HA Prepare Pacemaker - (NetWeaver PAS) Verify that the VIP is defined"
ansible.builtin.assert:
diff --git a/roles/sap_ha_pacemaker_cluster/templates/cluster_create_config.j2 b/roles/sap_ha_pacemaker_cluster/templates/cluster_create_config.j2
index 87a96a067..622cf8e48 100644
--- a/roles/sap_ha_pacemaker_cluster/templates/cluster_create_config.j2
+++ b/roles/sap_ha_pacemaker_cluster/templates/cluster_create_config.j2
@@ -1,4 +1,5 @@
---
+
# This is an input variables file automatically generated for use with the
# 'ha_cluster' linux system role.
#
@@ -47,4 +48,8 @@ ha_cluster_resource_groups:
# Definition of all cluster resources
ha_cluster_resource_primitives:
-{{ ha_cluster_resource_primitives | default()| to_nice_yaml(indent=2) }}
+{{ ha_cluster_resource_primitives | default() | to_nice_yaml(indent=2) }}
+
+# Definition of corosync totem settings
+ha_cluster_totem:
+{{ ha_cluster_totem | default() | to_nice_yaml(indent=2) }}
diff --git a/roles/sap_ha_pacemaker_cluster/vars/main.yml b/roles/sap_ha_pacemaker_cluster/vars/main.yml
index e0dbd3eb6..f8ecb25fa 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/main.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/main.yml
@@ -1,15 +1,15 @@
---
# SAP System IDs that are reserved and must not be used
# Reference: SAP Note 1979280
-__sap_sid_prohibited: ['ADD', 'ADM', 'ALL', 'AMD', 'AND', 'ANY', 'ARE', 'ASC',
- 'AUX', 'AVG', 'BIN', 'BIT', 'CDC', 'COM', 'CON', 'DAA',
- 'DBA', 'DBM', 'DBO', 'DTD', 'ECO', 'END', 'EPS', 'EXE',
- 'FOR', 'GET', 'GID', 'IBM', 'INT', 'KEY', 'LIB', 'LOG',
- 'LPT', 'MAP', 'MAX', 'MEM', 'MIG', 'MIN', 'MON', 'NET',
- 'NIX', 'NOT', 'NUL', 'OFF', 'OLD', 'OMS', 'OUT', 'PAD',
- 'PRN', 'RAW', 'REF', 'ROW', 'SAP', 'SET', 'SGA', 'SHG',
- 'SID', 'SQL', 'SUM', 'SYS', 'TMP', 'TOP', 'TRC', 'UID',
- 'USE', 'USR', 'VAR']
+__sap_ha_pacemaker_cluster_sid_prohibited: ['ADD', 'ADM', 'ALL', 'AMD', 'AND', 'ANY', 'ARE', 'ASC',
+ 'AUX', 'AVG', 'BIN', 'BIT', 'CDC', 'COM', 'CON', 'DAA',
+ 'DBA', 'DBM', 'DBO', 'DTD', 'ECO', 'END', 'EPS', 'EXE',
+ 'FOR', 'GET', 'GID', 'IBM', 'INT', 'KEY', 'LIB', 'LOG',
+ 'LPT', 'MAP', 'MAX', 'MEM', 'MIG', 'MIN', 'MON', 'NET',
+ 'NIX', 'NOT', 'NUL', 'OFF', 'OLD', 'OMS', 'OUT', 'PAD',
+ 'PRN', 'RAW', 'REF', 'ROW', 'SAP', 'SET', 'SGA', 'SHG',
+ 'SID', 'SQL', 'SUM', 'SYS', 'TMP', 'TOP', 'TRC', 'UID',
+ 'USE', 'USR', 'VAR']
# ansible_facts required by the role
__sap_ha_pacemaker_cluster_required_facts:
@@ -19,7 +19,7 @@ __sap_ha_pacemaker_cluster_required_facts:
- distribution # subset: min, hardware
- distribution_major_version # subset: min, hardware
- distribution_version # subset: min, hardware
- - hostname #ubset: min, hardware
+ - hostname # subset: min, hardware
- os_family # subset: min, hardware
# - selinux # subset: min, hardware
# - service_mgr # subset: min, hardware
@@ -31,13 +31,55 @@ __sap_ha_pacemaker_cluster_required_facts:
# This is automatically adjusted during preparation tasks.
__sap_ha_pacemaker_cluster_nic_multi_bool: false
-# By default use the construction of IPaddr2 VIP resources.
-# Platforms define different methods as applicable.
+# By default use the construction of IPaddr2 VIP resources
+# Platforms define different methods out of optional agents, as applicable.
sap_ha_pacemaker_cluster_vip_method: ipaddr
+sap_ha_pacemaker_cluster_vip_group_prefix: ''
+
+__sap_ha_pacemaker_cluster_available_vip_agents:
+ ipaddr:
+ agent: "ocf:heartbeat:IPaddr2"
+
+# Health check helper variable for platforms that require it
+sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name: "hc_{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}"
+sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name: "hc_{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_resource_name: "hc_{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_resource_name: "hc_{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_resource_name: "hc_{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_resource_name: "hc_{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name }}"
+
+# For convenience to distinguish between VIP and HC resources:
+__sap_ha_pacemaker_cluster_vip_resource_list:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_secondary_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_resource_name }}"
+
+__sap_ha_pacemaker_cluster_healthcheck_resource_list:
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_hana_secondary_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_resource_name }}"
+ - "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_resource_name }}"
+
+# Health check default port as string
+# Note: difference between HANA primary and read-only required
+# Ports must be pre-defined empty to skip entering construct_vars_vip_resources_*
+# includes when not overridden.
+sap_ha_pacemaker_cluster_healthcheck_hana_primary_port: ''
+sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port: ''
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port: ''
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port: ''
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_port: ''
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_port: ''
# (cloud) platform helper variable - leave empty for default = not cloud
__sap_ha_pacemaker_cluster_platform: ''
+__sap_ha_pacemaker_cluster_no_log: true
# ATTENTION:
# Any variables for 'ha_cluster' which this SAP role supports/inherits should also
@@ -52,9 +94,20 @@ __sap_ha_pacemaker_cluster_platform: ''
# Remove this section when validated.
#
# Never set defaults for these:
-#__sap_ha_pacemaker_cluster_cluster_name:
-#__sap_ha_pacemaker_cluster_hacluster_user_password:
+# __sap_ha_pacemaker_cluster_cluster_name:
+# __sap_ha_pacemaker_cluster_hacluster_user_password:
+# Predefine host_map for variable construction
+__sap_ha_pacemaker_cluster_pcmk_host_map: ''
+
+###############################################################################
+# 'ha_cluster' Linux System Role transparent integration
+#
+# For each additional 'ha_cluster' role variable used in this SAP HA role,
+# make sure to update the following:
+# - below list of role internal variables
+# - tasks/import_hacluster_vars_from_inventory.yml
+# - tasks/construct_final_hacluster_vars.yml
# Pre-define internal optional parameters to avoid defaults in the code:
__sap_ha_pacemaker_cluster_sap_extra_packages: []
@@ -67,9 +120,10 @@ __sap_ha_pacemaker_cluster_constraints_order: []
__sap_ha_pacemaker_cluster_extra_packages: []
__sap_ha_pacemaker_cluster_fence_agent_packages: []
__sap_ha_pacemaker_cluster_repos: []
-__sap_ha_pacemaker_cluster_resource_primitives: []
-__sap_ha_pacemaker_cluster_resource_groups: []
__sap_ha_pacemaker_cluster_resource_clones: []
+__sap_ha_pacemaker_cluster_resource_groups: []
+__sap_ha_pacemaker_cluster_resource_primitives: []
-# Predefine host_map for variable construction
-__sap_ha_pacemaker_cluster_pcmk_host_map: ''
+# Pre-define this parameter in its dictionary format:
+__sap_ha_pacemaker_cluster_corosync_totem:
+ options: []
diff --git a/roles/sap_ha_pacemaker_cluster/vars/nwas_abap_ascs_ers.yml b/roles/sap_ha_pacemaker_cluster/vars/nwas_abap_ascs_ers.yml
index 2daf0d2b5..c9f42abb6 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/nwas_abap_ascs_ers.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/nwas_abap_ascs_ers.yml
@@ -1,6 +1,4 @@
---
-#sap_ha_pacemaker_cluster_vip_resource_name: "vip_{{ sap_ha_pacemaker_cluster_nwas_abap_sid }}"
-
# The following directories are appended to the 'nfs_path' of the '/usr/sap' storage
# definition.
# Therefore, the /usr/sap prefix must be left out of the listed path items.
diff --git a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_aws_ec2_vs.yml b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_aws_ec2_vs.yml
index 238960267..b6f2248d5 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_aws_ec2_vs.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_aws_ec2_vs.yml
@@ -27,9 +27,14 @@ sap_ha_pacemaker_cluster_stonith_default:
# secret_key: "{{ sap_ha_pacemaker_cluster_aws_secret_access_key }}"
# region: "{{ sap_ha_pacemaker_cluster_aws_region }}"
+# Platform corosync totem configuration
+sap_ha_pacemaker_cluster_corosync_totem:
+ options:
+ token: 29000
+
# Platform specific VIP handling
sap_ha_pacemaker_cluster_vip_method: aws_vpc_move_ip
-sap_ha_pacemaker_cluster_vip_resource_group_name: vipgroup
+sap_ha_pacemaker_cluster_vip_group_prefix: '' # the default supported VIP agent is a single resource only
__sap_ha_pacemaker_cluster_available_vip_agents:
# Testing only! Not officially supported for SAP on AWS.
diff --git a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_gcp_ce_vm.yml b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_gcp_ce_vm.yml
index 34e9ade20..2e766b33c 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_gcp_ce_vm.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_gcp_ce_vm.yml
@@ -17,29 +17,63 @@ __sap_ha_pacemaker_cluster_repos:
__sap_ha_pacemaker_cluster_gcp_hosts: []
sap_ha_pacemaker_cluster_stonith_default:
- id: "res_fence_gce_{{ ansible_hostname }}"
+ id: "res_fence_gce"
agent: "stonith:fence_gce"
options:
project: "{{ sap_ha_pacemaker_cluster_gcp_project }}"
zone: "{{ sap_ha_pacemaker_cluster_gcp_region_zone }}"
- port: "{{ ansible_hostname }}"
+ pcmk_reboot_timeout: 300
+ pcmk_monitor_retries: 4
+ pcmk_delay_max: 30
# Platform specific VIP handling
sap_ha_pacemaker_cluster_vip_method: gcp_nlb_reserved_ip_haproxy # gcp_vpc_move_route
-sap_ha_pacemaker_cluster_vip_resource_group_name: vipgroup
+sap_ha_pacemaker_cluster_vip_group_prefix: group_
+
+# GCP needs haproxy and ports defined
+sap_ha_pacemaker_cluster_healthcheck_hana_primary_port: "620{{ sap_ha_pacemaker_cluster_hana_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port: >-
+ {% if sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address | length > 0 -%}
+ 620{{ sap_ha_pacemaker_cluster_hana_instance_nr | int + 1 }}
+ {%- else %}{% endif %}
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}"
+
+# Platform corosync totem configuration
+sap_ha_pacemaker_cluster_corosync_totem:
+ options:
+ token: 20000
+ token_retransmits_before_loss_const: 10
+ join: 60
+ max_messages: 20
__sap_ha_pacemaker_cluster_available_vip_agents:
ipaddr:
agent: "ocf:heartbeat:IPaddr2"
- # Recommended method is to use an internal passthrough Network Load Balancer (NLB for TCP/UDP) and Reserved Static Internal IP Address, with host health check response using socat or HAProxy
- # Alternative method is to use static route Virtual IP (outside VPC Subnet ranges) using VPC Routing Table
- # Refer to Google Cloud Compute Engine Reserved Static Internal IP Address, https://cloud.google.com/compute/docs/ip-addresses/reserve-static-internal-ip-address
- # Refer to Google Cloud Load Balancing - Internal passthrough Network Load Balancer overview, https://cloud.google.com/load-balancing/docs/internal
- # Refer to SAP HANA guidance 1, https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#virtual_ip_address
- # Refer to SAP HANA guidance 2, https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#vip_implementation
- # Refer to SAP NetWeaver guidance, https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#virtual_ip_address
+ # Recommended method is to use an internal passthrough Network Load Balancer (NLB for TCP/UDP)
+ # and Reserved Static Internal IP Address, with host health check response using socat or HAProxy.
+ #
+ # Alternative method is to use static route Virtual IP (outside VPC Subnet ranges) using
+ # VPC Routing Table.
+ #
+ # Refer to Google Cloud Compute Engine Reserved Static Internal IP Address:
+ # https://cloud.google.com/compute/docs/ip-addresses/reserve-static-internal-ip-address
+ #
+ # Refer to Google Cloud Load Balancing - Internal passthrough Network Load Balancer overview:
+ # https://cloud.google.com/load-balancing/docs/internal
+ #
+ # Refer to SAP HANA guidance 1:
+ # https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#virtual_ip_address
+ #
+ # Refer to SAP HANA guidance 2:
+ # https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#vip_implementation
+ #
+ # Refer to SAP NetWeaver guidance:
+ # https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#virtual_ip_address
# Recommended method
# Use Linux Pacemaker resource class/standard (pcs resource standards) as
@@ -49,12 +83,14 @@ __sap_ha_pacemaker_cluster_available_vip_agents:
agent: "service:haproxy"
with: ipaddr
- # Alternative method
+ # Alternative method:
# Move Virtual/Floating IP, must be outside of the VPC Subnet Range, by replacing
# (temporary route, delete, create, delete temp.)
# the VPC Routing Table record with next-hop as the VM Instance's ID.
+ #
# Execution via Python urllib to GCP Compute Engine API and GCP Metadata Server API
- # heartbeat:gcp-vpc-move-route is the replacement of heartbeat:gcp-vpc-move-ip (which uses execution via GCloud CLI, and is itself a fork of external/gcp:route)
+ # heartbeat:gcp-vpc-move-route is the replacement of heartbeat:gcp-vpc-move-ip
+ # (which uses execution via GCloud CLI, and is itself a fork of external/gcp:route)
gcp_vpc_move_route:
agent: "ocf:heartbeat:gcp-vpc-move-route"
with: ipaddr
diff --git a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_powervs.yml b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_powervs.yml
index 82e71e1c9..e5938f11d 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_powervs.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_powervs.yml
@@ -6,8 +6,8 @@
sap_ha_pacemaker_cluster_fence_agent_packages:
- fence-agents-ibm-powervs
-#__sap_ha_pacemaker_cluster_platform_extra_packages:
-# -
+# __sap_ha_pacemaker_cluster_platform_extra_packages:
+# -
__sap_ha_pacemaker_cluster_repos:
- id: "rhel-{{ ansible_distribution_major_version }}-for-{{ ansible_architecture }}-highavailability-e4s-rpms"
@@ -28,26 +28,35 @@ sap_ha_pacemaker_cluster_stonith_default:
token: "{{ sap_ha_pacemaker_cluster_ibmcloud_api_key }}"
region: "{{ sap_ha_pacemaker_cluster_ibmcloud_region }}"
crn: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_crn }}"
- instance: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_guid }}" # Identified during execution initial tasks, populated when variables are imported
- plug: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_instance_id }}" # Identified during execution initial tasks, populated when variables are imported
- api-type: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_api_type | default('public') }}" # Dependent on network interface attachments, if no public network interface then 'private' value must be provided.
- proxy: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_forward_proxy_url | default('') }}" # Dependent on network interface attachments, if no public network interface then a valid HTTP Proxy URL value must be provided.
-sap_ha_pacemaker_cluster_fence_options:
- pcmk_reboot_retries: 4
- pcmk_reboot_timeout: 600
- pcmk_monitor_timeout: 600
- pcmk_status_timeout: 60
- power_timeout: 240
+ # Identified during execution initial tasks, populated when variables are imported
+ instance: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_guid }}"
+
+ # Identified during execution initial tasks, populated when variables are imported.
+ # Unnecessary when using pcmk_host_map. Identified during execution initial tasks, populated when variables are imported
+ # plug: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_instance_id }}"
+
+ # Dependent on network interface attachments, if no public network interface
+ # then 'private' value must be provided.
+ api-type: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_api_type | default('public') }}"
+
+ # Dependent on network interface attachments, if no public network interface
+ # then a valid HTTP Proxy URL value must be provided.
+ proxy: "{{ sap_ha_pacemaker_cluster_ibmcloud_powervs_forward_proxy_url | default('') }}"
+
+ pcmk_reboot_timeout: 600
+ pcmk_monitor_timeout: 600
+ pcmk_status_timeout: 60
# Platform specific VIP handling
-sap_ha_pacemaker_cluster_vip_method: ipaddr
-sap_ha_pacemaker_cluster_vip_resource_group_name: vipgroup
+sap_ha_pacemaker_cluster_vip_method: ipaddr_custom
__sap_ha_pacemaker_cluster_available_vip_agents:
- # IPaddr2 requires a Virtual IP within a common Network (i.e. 1 VLAN / 1 VPC / 1 VNet), one or more Subnets may be used
- # With this design restriction, IPaddr2 is only to be used for High Availability within a single location (i.e. 1 Availability Zone / Datacenter / Location within 1 Region)
- ipaddr:
+ # IPaddr2 requires a Virtual IP within a common Network (i.e. 1 VLAN / 1 VPC / 1 VNet),
+ # one or more Subnets may be used.
+ # With this design restriction, IPaddr2 is only to be used for High Availability within
+ # a single location (i.e. 1 Availability Zone / Datacenter / Location within 1 Region).
+ ipaddr_custom:
agent: "ocf:heartbeat:IPaddr2"
diff --git a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_vs.yml b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_vs.yml
index 076495c61..e70b1380d 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_vs.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_ibmcloud_vs.yml
@@ -6,8 +6,8 @@
sap_ha_pacemaker_cluster_fence_agent_packages:
- fence-agents-ibm-vpc
-#__sap_ha_pacemaker_cluster_platform_extra_packages:
-# -
+# __sap_ha_pacemaker_cluster_platform_extra_packages:
+# -
__sap_ha_pacemaker_cluster_repos:
- id: "rhel-{{ ansible_distribution_major_version }}-for-{{ ansible_architecture }}-highavailability-e4s-rpms"
@@ -26,20 +26,38 @@ sap_ha_pacemaker_cluster_stonith_default:
options:
apikey: "{{ sap_ha_pacemaker_cluster_ibmcloud_api_key }}"
region: "{{ sap_ha_pacemaker_cluster_ibmcloud_region }}"
+ pcmk_monitor_timeout: 600
# Platform specific VIP handling
-sap_ha_pacemaker_cluster_vip_method: haproxy_with_ipaddr
-sap_ha_pacemaker_cluster_vip_resource_group_name: vipgroup
+sap_ha_pacemaker_cluster_vip_method: ibmcloud_alb_haproxy
+
+# For HAPROXY an non-empty port default is required to enter the resource creation flow.
+# TODO: task logic that configures actual haproxy listening ports,
+# otherwise pairs like ASCS/ERS will conflict
+sap_ha_pacemaker_cluster_healthcheck_hana_primary_port: "620{{ sap_ha_pacemaker_cluster_hana_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port: >-
+ {% if sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address | length > 0 -%}
+ 620{{ sap_ha_pacemaker_cluster_hana_instance_nr | int + 1 }}
+ {%- else %}{% endif %}
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}"
-__sap_ha_pacemaker_cluster_available_vip_agents:
- # IPaddr2 requires a Virtual IP within a common Network (i.e. 1 VLAN / 1 VPC / 1 VNet), one or more Subnets may be used
- # With this design restriction, IPaddr2 is only to be used for High Availability within a single location (i.e. 1 Availability Zone / Datacenter / Location within 1 Region)
- ipaddr:
- agent: "ocf:heartbeat:IPaddr2"
+__sap_ha_pacemaker_cluster_available_vip_agents:
- # Use haproxy daemon to listen for and respond to health check probe monitoring private network requests from IBM Cloud Application Load Balancer (ALB),
- # if failure to respond then the Load Balancer will perform failover activities
- haproxy_with_ipaddr:
- agent: "ocf:heartbeat:haproxy"
- with: ipaddr
+ # Refer to IBM Cloud Load Balancer - Private Network Load Balancer (NLB Layer 4)
+ # for HA within 1 AZ of 1 Region: https://cloud.ibm.com/docs/vpc?topic=vpc-network-load-balancers
+ #
+ # Refer to IBM Cloud Load Balancer - Private Application Load Balancer (ALB Layer 7)
+ # for HA across 2 AZ of 1 Region: https://cloud.ibm.com/docs/vpc?topic=vpc-load-balancers-about
+ #
+ # The IBM Cloud Load Balancer's Back-end Pool Health Check will poll for connection response
+ # from a host listening port (using a designated OS process such as netcat, socat or HAProxy).
+
+ # Use haproxy daemon to listen for and respond to health check probe monitoring private network
+ # requests from IBM Cloud Application Load Balancer (ALB),
+ # if failure to respond then the Load Balancer will perform failover activities.
+ ibmcloud_alb_haproxy:
+ agent: "service:haproxy"
diff --git a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_msazure_vm.yml b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_msazure_vm.yml
index cf23a19a2..d8607e3ba 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_msazure_vm.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/platform_cloud_msazure_vm.yml
@@ -3,6 +3,9 @@
#
# TODO: make sure to first respect 'ha_cluster' native variables
+# The packages of the following lists will be installed by the 'ha_cluster' Linux System Role.
+# Any packages that are pre-requisites for variable construction must be installed before, e.g.
+# in the preconfigure-* tasks.
sap_ha_pacemaker_cluster_fence_agent_packages:
- fence-agents-azure-arm
@@ -29,17 +32,31 @@ sap_ha_pacemaker_cluster_stonith_default:
# Platform specific VIP handling
sap_ha_pacemaker_cluster_vip_method: azure_lb
-sap_ha_pacemaker_cluster_vip_resource_group_name: vipgroup
+
+# The VIP layer consists of 2 components - the VIP and the health check resource
+sap_ha_pacemaker_cluster_vip_group_prefix: group_
+
+sap_ha_pacemaker_cluster_healthcheck_hana_primary_port: "620{{ sap_ha_pacemaker_cluster_hana_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_hana_secondary_port: >-
+ {% if sap_ha_pacemaker_cluster_vip_hana_secondary_ip_address | length > 0 -%}
+ 620{{ sap_ha_pacemaker_cluster_hana_instance_nr | int + 1 }}
+ {%- else %}{% endif %}
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_ascs_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_ers_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_pas_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_pas_instance_nr }}"
+sap_ha_pacemaker_cluster_healthcheck_nwas_abap_aas_port: "620{{ sap_ha_pacemaker_cluster_nwas_abap_aas_instance_nr }}"
__sap_ha_pacemaker_cluster_available_vip_agents:
- # IPaddr2 requires a Virtual IP within a common Network (i.e. 1 VLAN / 1 VPC / 1 VNet), one or more Subnets may be used
- # With this design restriction, IPaddr2 is only to be used for High Availability within a single location (i.e. 1 Availability Zone / Datacenter / Location within 1 Region)
+ # IPaddr2 requires a Virtual IP within a common Network (i.e. 1 VLAN / 1 VPC / 1 VNet),
+ # one or more Subnets may be used.
+ # With this design restriction, IPaddr2 is only to be used for High Availability within
+ # a single location (i.e. 1 Availability Zone / Datacenter / Location within 1 Region)
ipaddr:
agent: "ocf:heartbeat:IPaddr2"
- # Use nc/socat to listen for and respond to health check probe monitoring requests from Azure Load Balancer,
- # if failure to respond then the Load Balancer will perform failover activities
+ # Use nc/socat to listen for and respond to health check probe monitoring requests from
+ # Azure Load Balancer, if failure to respond then the Load Balancer will perform failover activities
azure_lb:
agent: "ocf:heartbeat:azure-lb"
with: ipaddr
diff --git a/roles/sap_ha_pacemaker_cluster/vars/platform_hyp_ibmpower_vm.yml b/roles/sap_ha_pacemaker_cluster/vars/platform_hyp_ibmpower_vm.yml
index 083622f47..dc4300e65 100644
--- a/roles/sap_ha_pacemaker_cluster/vars/platform_hyp_ibmpower_vm.yml
+++ b/roles/sap_ha_pacemaker_cluster/vars/platform_hyp_ibmpower_vm.yml
@@ -6,8 +6,8 @@
sap_ha_pacemaker_cluster_fence_agent_packages:
- fence-agents-lpar
-#__sap_ha_pacemaker_cluster_platform_extra_packages:
-# -
+# __sap_ha_pacemaker_cluster_platform_extra_packages:
+# -
__sap_ha_pacemaker_cluster_repos:
- id: "rhel-{{ ansible_distribution_major_version }}-for-{{ ansible_architecture }}-highavailability-e4s-rpms"
@@ -30,24 +30,27 @@ sap_ha_pacemaker_cluster_stonith_default:
username: "{{ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_host_login }}"
password: "{{ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_host_login_password }}"
hmc_version: "{{ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_host_version | default('4') }}"
- managed: "{{ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_host_mtms }}" # Identified during execution initial tasks, populated when variables are imported
- #plug: "{{ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_partition_name }}" # Unnecessary when using pcmk_host_map. Identified during execution initial tasks, populated when variables are imported
+ managed: "{{ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_host_mtms }}"
+ # Identified during execution initial tasks, populated when variables are imported
-sap_ha_pacemaker_cluster_fence_options:
- pcmk_reboot_retries: 4
- pcmk_reboot_timeout: 600
- pcmk_monitor_timeout: 600
- pcmk_status_timeout: 60
- power_timeout: 240
+ # plug: "{{ sap_ha_pacemaker_cluster_ibmpower_vm_hmc_system_partition_name }}"
+ # Unnecessary when using pcmk_host_map. Identified during execution initial tasks, populated when variables are imported
+
+ pcmk_reboot_retries: 4
+ pcmk_reboot_timeout: 600
+ pcmk_monitor_timeout: 600
+ pcmk_status_timeout: 60
+ power_timeout: 240
# Platform specific VIP handling
sap_ha_pacemaker_cluster_vip_method: ipaddr
-sap_ha_pacemaker_cluster_vip_resource_group_name: vipgroup
__sap_ha_pacemaker_cluster_available_vip_agents:
- # IPaddr2 requires a Virtual IP within a common Network (i.e. 1 VLAN / 1 VPC / 1 VNet), one or more Subnets may be used
- # With this design restriction, IPaddr2 is only to be used for High Availability within a single location (i.e. 1 Availability Zone / Datacenter / Location within 1 Region)
+ # IPaddr2 requires a Virtual IP within a common Network (i.e. 1 VLAN / 1 VPC / 1 VNet),
+ # one or more Subnets may be used
+ # With this design restriction, IPaddr2 is only to be used for High Availability within
+ # a single location (i.e. 1 Availability Zone / Datacenter / Location within 1 Region)
ipaddr:
agent: "ocf:heartbeat:IPaddr2"
diff --git a/roles/sap_hana_install/.ansible-lint b/roles/sap_hana_install/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_hana_install/.ansible-lint
+++ b/roles/sap_hana_install/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_hana_install/README.md b/roles/sap_hana_install/README.md
index 42ad2ca06..c81786080 100644
--- a/roles/sap_hana_install/README.md
+++ b/roles/sap_hana_install/README.md
@@ -267,7 +267,7 @@ These checks are only performed if `sap_hana_install_force` is set to `true`. It
- If file `hdblcm` is found, skip the next step and proceed with the `hdblcm` existence check.
- - If file `hdblcm` ist not found, proceed with the next step.
+ - If file `hdblcm` is not found, proceed with the next step.
- Prepare SAR files for `hdblcm`:
diff --git a/roles/sap_hana_install/defaults/main.yml b/roles/sap_hana_install/defaults/main.yml
index 771c566ba..c388228b8 100644
--- a/roles/sap_hana_install/defaults/main.yml
+++ b/roles/sap_hana_install/defaults/main.yml
@@ -20,7 +20,7 @@ sap_hana_install_software_extract_directory: "{{ sap_hana_install_software_direc
# set the value to true. By default, this directory will not be removed
sap_hana_install_cleanup_extract_directory: false
-# Set this variabe to `yes` if you want to copy the SAR files from `sap_hana_install_software_directory`
+# Set this variable to `yes` if you want to copy the SAR files from `sap_hana_install_software_directory`
# to `sap_hana_install_software_extract_directory/sarfiles` before extracting.
# This might be useful if the SAR files are on a slow fileshare.
sap_hana_install_copy_sarfiles: no
@@ -59,7 +59,7 @@ sap_hana_install_verify_signature: no
sap_hana_install_configfile_directory: "{{ sap_hana_install_software_extract_directory }}/configfiles"
# If a custom path for sap_hana_install_configfile_directory was defined and if there is a requirement to cleanup this directory,
-# then set "sap_hana_install_cleanup_configfile_directory" as true. Incase if a custom path was not defined and
+# then set "sap_hana_install_cleanup_configfile_directory" as true. In case if a custom path was not defined and
# "sap_hana_install_cleanup_extract_directory" was set as true, then the configfiles will be removed.
sap_hana_install_cleanup_configfile_directory: false
diff --git a/roles/sap_hana_install/meta/runtime.yml b/roles/sap_hana_install/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_hana_install/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_hana_install/tasks/assert-addhosts-loop-block.yml b/roles/sap_hana_install/tasks/assert-addhosts-loop-block.yml
index 0210a5cf1..72439716a 100644
--- a/roles/sap_hana_install/tasks/assert-addhosts-loop-block.yml
+++ b/roles/sap_hana_install/tasks/assert-addhosts-loop-block.yml
@@ -10,7 +10,7 @@
msg: "Instance profile: '/hana/shared/{{ sap_hana_install_sid }}/profile/\
{{ sap_hana_install_sid }}_HDB{{ sap_hana_install_number }}_{{ line_item }}'"
-- name: SAP HANA Add Hosts - Assert that there is no instance profile for the addional hosts
+- name: SAP HANA Add Hosts - Assert that there is no instance profile for the additional hosts
ansible.builtin.assert:
that: not __sap_hana_install_register_instance_profile_addhost.stat.exists
fail_msg:
@@ -28,7 +28,7 @@
ansible.builtin.debug:
msg: "Instance directory in /usr/sap: '/usr/sap/{{ sap_hana_install_sid }}/HDB{{ sap_hana_install_number }}/{{ line_item }}'"
-- name: SAP HANA Add Hosts - Assert that there is no SAP HANA instance directory in '/usr/sap' for the addional hosts
+- name: SAP HANA Add Hosts - Assert that there is no SAP HANA instance directory in '/usr/sap' for the additional hosts
ansible.builtin.assert:
that: not __sap_hana_install_register_usr_sap_instance_directory.stat.exists
fail_msg:
diff --git a/roles/sap_hana_install/tasks/hana_addhosts.yml b/roles/sap_hana_install/tasks/hana_addhosts.yml
index 94e21d847..776be4f79 100644
--- a/roles/sap_hana_install/tasks/hana_addhosts.yml
+++ b/roles/sap_hana_install/tasks/hana_addhosts.yml
@@ -23,7 +23,8 @@
when: not ansible_check_mode
block:
- - name: SAP HANA Add Hosts - Run 'hdblcm --list_systems'
+# Reason for noqa: We can safely fail at the last command in the pipeline.
+ - name: SAP HANA Add Hosts - Run 'hdblcm --list_systems' # noqa risky-shell-pipe
ansible.builtin.shell: |
./hdblcm --list_systems | awk '/\/hana\/shared\/{{ sap_hana_install_sid }}/{a=1}
/hosts:/{if (a==1){
@@ -32,7 +33,7 @@
args:
chdir: "{{ sap_hana_install_install_path }}/{{ sap_hana_install_sid }}/hdblcm"
register: __sap_hana_install_register_hdblcm_list_systems
- changed_when: no
+ changed_when: false
- name: SAP HANA Add Hosts - Show the output of hdblcm --list_systems
ansible.builtin.debug:
@@ -88,7 +89,7 @@
args:
chdir: "{{ sap_hana_install_install_path }}/{{ sap_hana_install_sid }}/hdblcm"
register: __sap_hana_install_register_addhosts_result
- changed_when: no
+ changed_when: false
when: not ansible_check_mode
- name: SAP HANA Add Hosts - Show the HANA version and hosts
diff --git a/roles/sap_hana_install/tasks/hana_exists.yml b/roles/sap_hana_install/tasks/hana_exists.yml
index 0aa2be9bc..9dbeb50a3 100644
--- a/roles/sap_hana_install/tasks/hana_exists.yml
+++ b/roles/sap_hana_install/tasks/hana_exists.yml
@@ -4,16 +4,16 @@
- name: SAP HANA Checks - Check if saphostctrl is installed
ansible.builtin.stat:
path: /usr/sap/hostctrl/exe/saphostctrl
- check_mode: no
+ check_mode: false
register: __sap_hana_install_register_stat_saphostctrl
- failed_when: no
+ failed_when: false
- name: SAP HANA Checks - Check if SAP instances are installed with saphostctrl
when: __sap_hana_install_register_stat_saphostctrl.stat.exists
block:
- name: SAP HANA Checks - Get list of installed SAP instances
- ansible.builtin.shell: /usr/sap/hostctrl/exe/saphostctrl -function ListInstances | cut -d":" -f2-
+ ansible.builtin.shell: set -o pipefail && /usr/sap/hostctrl/exe/saphostctrl -function ListInstances | cut -d":" -f2-
register: __sap_hana_install_register_instancelist
changed_when: false
@@ -66,9 +66,9 @@
- name: SAP HANA Checks - Get status of '/hana/shared/{{ sap_hana_install_sid }}'
ansible.builtin.stat:
path: "/hana/shared/{{ sap_hana_install_sid }}"
- check_mode: no
+ check_mode: false
register: __sap_hana_install_register_stat_hana_shared_sid_assert
- failed_when: no
+ failed_when: false
- name: SAP HANA Checks - Get contents of '/hana/shared/{{ sap_hana_install_sid }}'
ansible.builtin.find:
@@ -87,9 +87,9 @@
- name: SAP HANA Checks - Get status of '/usr/sap/{{ sap_hana_install_sid }}'
ansible.builtin.stat:
path: "/usr/sap/{{ sap_hana_install_sid }}"
- check_mode: no
+ check_mode: false
register: __sap_hana_install_register_stat_usr_sap_sid_assert
- failed_when: no
+ failed_when: false
- name: SAP HANA Checks - Get contents of '/usr/sap/{{ sap_hana_install_sid }}'
ansible.builtin.find:
@@ -113,10 +113,10 @@
- name: SAP HANA Checks - Get info about '{{ sap_hana_install_sid | lower }}adm' user
ansible.builtin.command: getent passwd {{ sap_hana_install_sid | lower }}adm
- check_mode: no
+ check_mode: false
register: __sap_hana_install_register_getent_passwd_sidadm
- changed_when: no
- failed_when: no
+ changed_when: false
+ failed_when: false
- name: SAP HANA Checks - Fail if the user '{{ sap_hana_install_sid | lower }}adm' exists
ansible.builtin.fail:
@@ -135,10 +135,10 @@
- name: SAP HANA Checks - Get info about the ID of the 'sapsys' group
ansible.builtin.command: getent group sapsys
- check_mode: no
+ check_mode: false
register: __sap_hana_install_register_getent_group_sapsys
- changed_when: no
- failed_when: no
+ changed_when: false
+ failed_when: false
- name: SAP HANA Checks - Define new variable for the assertion
ansible.builtin.set_fact:
diff --git a/roles/sap_hana_install/tasks/post_install.yml b/roles/sap_hana_install/tasks/post_install.yml
index 927442f29..697615cca 100644
--- a/roles/sap_hana_install/tasks/post_install.yml
+++ b/roles/sap_hana_install/tasks/post_install.yml
@@ -120,6 +120,7 @@
args:
chdir: "{{ sap_hana_install_install_path }}/{{ sap_hana_install_sid }}/global/hdb/install/bin"
register: __sap_hana_install_register_installation_check
+ changed_when: false
when: sap_hana_install_use_hdbcheck | d(true)
- name: SAP HANA hdblcm installation check with hdbcheck - Display the result
@@ -133,6 +134,7 @@
args:
chdir: "{{ sap_hana_install_install_path }}/{{ sap_hana_install_sid }}/hdblcm"
register: __sap_hana_install_register_installation_check
+ changed_when: false
when: not sap_hana_install_use_hdbcheck | d(true)
- name: SAP HANA hdblcm installation check with hdblcm - Display the result
diff --git a/roles/sap_hana_install/tasks/pre_install/extract_sarfile.yml b/roles/sap_hana_install/tasks/pre_install/extract_sarfile.yml
index 98570de34..7cfed7751 100644
--- a/roles/sap_hana_install/tasks/pre_install/extract_sarfile.yml
+++ b/roles/sap_hana_install/tasks/pre_install/extract_sarfile.yml
@@ -36,12 +36,14 @@
mv ${extracted_dir} ..
args:
chdir: "{{ __sap_hana_install_tmp_software_extract_directory }}"
+ changed_when: true
when: "'SAPHOST' not in __sap_hana_install_passed_sarfile"
- name: SAP HANA hdblcm prepare - Move files into the correct place, SAP Host Agent
ansible.builtin.command: mv ./tmp/SAP_HOST_AGENT .
args:
chdir: "{{ sap_hana_install_software_extract_directory }}"
+ changed_when: true
when: "'SAPHOST' in __sap_hana_install_passed_sarfile"
- name: SAP HANA hdblcm prepare - Remove temporary extraction directory '{{ sap_hana_install_software_extract_directory }}/tmp'
diff --git a/roles/sap_hana_preconfigure/.ansible-lint b/roles/sap_hana_preconfigure/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_hana_preconfigure/.ansible-lint
+++ b/roles/sap_hana_preconfigure/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_hana_preconfigure/README.md b/roles/sap_hana_preconfigure/README.md
index 863f13e19..6ee18a682 100644
--- a/roles/sap_hana_preconfigure/README.md
+++ b/roles/sap_hana_preconfigure/README.md
@@ -26,15 +26,25 @@ for RHEL 8.x:
- rhel-8-for-[x86_64|ppc64le]-appstream-e4s-rpms
- rhel-8-for-[x86_64|ppc64le]-sap-solutions-e4s-rpms
+for RHEL 9.x:
+- rhel-9-for-[x86_64|ppc64le]-baseos-e4s-rpms
+- rhel-9-for-[x86_64|ppc64le]-appstream-e4s-rpms
+- rhel-9-for-[x86_64|ppc64le]-sap-solutions-e4s-rpms
+
for SLES 15.x:
-- SLE-Module-SAP-Applications15-[SP number]-Pool
+- SLE-Module-SAP-Applications15-[SP number]-Pool
- SLE-Module-SAP-Applications15-[SP number]-Updates
- SLE-Product-SLES_SAP15-[SP number]-Pool
- SLE-Product-SLES_SAP15-[SP number]-Updates
For details on configuring Red Hat, see the knowledge base article: [How to subscribe SAP HANA systems to the Update Services for SAP Solutions](https://access.redhat.com/solutions/3075991)). If you set role parameter sap_hana_preconfigure_enable_sap_hana_repos to `yes`, the role can enable these repos.
-To install HANA on Red Hat Enterprise Linux 6, 7, or 8, you need some additional packages which are contained in the rhel-sap-hana-for-rhel-7-[server|for-power-le]-e4s-rpms or rhel-8-for-[x86_64|ppc64le]-sap-solutions-e4s-rpms repo.
+To install HANA on Red Hat Enterprise Linux 7, 8, or 9, you need some additional packages which are contained in the
+- rhel-sap-hana-for-rhel-7-[server|for-power-le]-e4s-rpms,
+- rhel-8-for-[x86_64|ppc64le]-sap-solutions-e4s-rpms, or
+- rhel-9-for-[x86_64|ppc64le]-sap-solutions-e4s-rpms
+
+repository.
To get this repository you need to have one of the following products:
@@ -75,7 +85,8 @@ Do not run this role against an SAP HANA or other production system. The role wi
Changes
-------
-1) Previous versions of this role used variable sap_hana_preconfigure_use_tuned_where_possible to switch between either tuned settings or kernel command line settings (where applicable).
+1) Previous versions of this role used the variable sap_hana_preconfigure_use_tuned_where_possible to switch between either tuned settings
+or kernel command line settings (where applicable).
The current version modifies this behavior:
- The variable sap_hana_preconfigure_use_tuned_where_possible has been renamed to sap_hana_preconfigure_use_tuned
- The variable sap_hana_preconfigure_switch_to_tuned_profile_sap_hana has been removed.
@@ -83,10 +94,10 @@ The current version modifies this behavior:
If sap_hana_preconfigure_use_tuned is set to `no`, the role will perform a static configuration, including the modification of the linux command line in grub.
- The role can use tuned, or configure the kernel command line, or both.
-2) Previous versions of this role used variable sap_hana_preconfigure_selinux_state to set the SELinux state to disabled, which is
-mentioned in SAP notes 2292690 (RHEL 7) and 2777782 (RHEL 8). As role sap_general_preconfigure already allows to specify the desired
-SELinux state, and as sap_general_preconfigure is run before sap_hana_preconfigure, there is no need any more to let
-sap_hana_preconfigure configure the SELinux state. Same applies to the assertion of the SELinux state.
+2) Previous versions of this role used variable sap_hana_preconfigure_selinux_state to set the SELinux state to disabled.
+As the role sap_general_preconfigure already allows to specify the desired SELinux state, and as sap_general_preconfigure
+is always run before sap_hana_preconfigure, there is no need any more to let sap_hana_preconfigure configure the SELinux state.
+The same applies to the assertion of the SELinux state.
3) SLES systems are now configured using saptune rather than the ansible implementation of the notes.
@@ -159,10 +170,11 @@ For the RHEL System Roles for SAP, or for Red Hat Automation Hub, use 'redhat.rh
### sap_hana_preconfigure_min_rhel_release_check
- _Type:_ `bool`
-- _Default:_ `true`
+- _Default:_ `false`
-Check the RHEL release against a predefined list of known SAP HANA supported RHEL minor releases.
-If this parameter is set to `false`, the role will *not* perform this check.
+Check the RHEL release against parameter `sap_hana_preconfigure_supported_rhel_minor_releases`, which is a list of
+known SAP HANA supported RHEL minor releases. By default, the role will display a message and continue running if
+the RHEL release is not part of that list. If set to `true`, the role will fail in such a case.
### sap_hana_preconfigure_supported_rhel_minor_releases
- _Type:_ `list` with elements of type `str`
diff --git a/roles/sap_hana_preconfigure/defaults/main.yml b/roles/sap_hana_preconfigure/defaults/main.yml
index da4b64ddb..b1dfe49c5 100644
--- a/roles/sap_hana_preconfigure/defaults/main.yml
+++ b/roles/sap_hana_preconfigure/defaults/main.yml
@@ -34,9 +34,10 @@ sap_hana_preconfigure_system_roles_collection: 'fedora.linux_system_roles'
# - fedora.linux_system_roles
# - redhat.rhel_system_roles
-sap_hana_preconfigure_min_rhel_release_check: true
-# Check the RHEL release against a predefined list of known SAP HANA supported RHEL minor releases.
-# If this parameter is set to `false`, the role will *not* perform this check.
+sap_hana_preconfigure_min_rhel_release_check: false
+# Check the RHEL release against parameter `sap_hana_preconfigure_supported_rhel_minor_releases`, which is a list of
+# known SAP HANA supported RHEL minor releases. By default, the role will display a message and continue running if
+# the RHEL release is not part of that list. If set to `true`, the role will fail in such a case.
sap_hana_preconfigure_supported_rhel_minor_releases: "{{ __sap_hana_preconfigure_supported_rhel_minor_releases }}"
# Use this parameter to set your own list of SAP HANA supported RHEL minor releases.
@@ -183,5 +184,3 @@ sap_hana_preconfigure_saptune_solution: 'HANA'
sap_hana_preconfigure_saptune_azure: false
# On Azure, TCP timestamps, reuse and recycle should be disabled (SLES for SAP Applications).
# Set this parameter to `true` on Azure.
-
-# END: Default Variables for sap_hana_preconfigure
diff --git a/roles/sap_hana_preconfigure/handlers/main.yml b/roles/sap_hana_preconfigure/handlers/main.yml
index 49e5191c7..ee7eb5c9d 100644
--- a/roles/sap_hana_preconfigure/handlers/main.yml
+++ b/roles/sap_hana_preconfigure/handlers/main.yml
@@ -3,7 +3,7 @@
- name: "Check if server is booted in BIOS or UEFI mode"
ansible.builtin.stat:
path: /sys/firmware/efi
- get_checksum: no
+ get_checksum: false
register: __sap_hana_preconfigure_register_stat_sys_firmware_efi
listen: __sap_hana_preconfigure_regenerate_grub2_conf_handler
when:
@@ -19,6 +19,7 @@
- name: "Run grub-mkconfig (BIOS mode)"
ansible.builtin.command: grub2-mkconfig -o /boot/grub2/grub.cfg
register: __sap_hana_preconfigure_register_grub2_mkconfig_bios_mode
+ changed_when: true
listen: __sap_hana_preconfigure_regenerate_grub2_conf_handler
notify: __sap_hana_preconfigure_reboot_handler
when:
@@ -51,6 +52,7 @@
- name: "Run grub-mkconfig (UEFI mode)"
ansible.builtin.command: "grub2-mkconfig -o {{ __sap_hana_preconfigure_uefi_boot_dir }}"
register: __sap_hana_preconfigure_register_grub2_mkconfig_uefi_mode
+ changed_when: true
listen: __sap_hana_preconfigure_regenerate_grub2_conf_handler
notify: __sap_hana_preconfigure_reboot_handler
when:
@@ -69,6 +71,7 @@
- name: "Run grubby for enabling TSX"
ansible.builtin.command: grubby --args="tsx=on" --update-kernel=ALL
register: __sap_hana_preconfigure_register_grubby_update
+ changed_when: true
listen: __sap_hana_preconfigure_grubby_update_handler
notify: __sap_hana_preconfigure_reboot_handler
diff --git a/roles/sap_hana_preconfigure/meta/argument_specs.yml b/roles/sap_hana_preconfigure/meta/argument_specs.yml
index bcf189568..c1b59e3bb 100644
--- a/roles/sap_hana_preconfigure/meta/argument_specs.yml
+++ b/roles/sap_hana_preconfigure/meta/argument_specs.yml
@@ -83,10 +83,11 @@ argument_specs:
type: str
sap_hana_preconfigure_min_rhel_release_check:
- default: true
+ default: false
description:
- - Check the RHEL release against a predefined list of known SAP HANA supported RHEL minor releases.
- - If this parameter is set to `false`, the role will *not* perform this check.
+ - Check the RHEL release against parameter `sap_hana_preconfigure_supported_rhel_minor_releases`, which is a list of
+ - known SAP HANA supported RHEL minor releases. By default, the role will display a message and continue running if
+ - the RHEL release is not part of that list. If set to `true`, the role will fail in such a case.
required: false
type: bool
diff --git a/roles/sap_hana_preconfigure/meta/runtime.yml b/roles/sap_hana_preconfigure/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_hana_preconfigure/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/assert-installation.yml b/roles/sap_hana_preconfigure/tasks/RedHat/assert-installation.yml
index b7a451869..1a829bacc 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/assert-installation.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/assert-installation.yml
@@ -1,17 +1,21 @@
---
-- name: Assert that the RHEL release is supported for SAP HANA
+- name: Assert that the system is running a RHEL release which is supported for SAP HANA
ansible.builtin.assert:
- that: ansible_distribution_version in "{{ sap_hana_preconfigure_supported_rhel_minor_releases }}"
- fail_msg: "FAIL: The RHEL release {{ ansible_distribution_version }} is not supported for SAP HANA!"
+ that: ansible_distribution_version in sap_hana_preconfigure_supported_rhel_minor_releases
+ fail_msg:
+ - "FAIL: The RHEL release '{{ ansible_distribution_version }}' may not (yet) be supported for SAP HANA. Please check SAP note 2235581!"
+ - "NOTE: If necessary, adapt role parameter `sap_hana_preconfigure_supported_rhel_minor_releases` accordingly."
success_msg: "PASS: The RHEL release {{ ansible_distribution_version }} is supported for SAP HANA."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
when: sap_hana_preconfigure_min_rhel_release_check
- name: Report if the RHEL release is supported for SAP HANA
ansible.builtin.assert:
- that: ansible_distribution_version in "{{ sap_hana_preconfigure_supported_rhel_minor_releases }}"
- fail_msg: "WARN: The RHEL release {{ ansible_distribution_version }} is not supported for SAP HANA!"
+ that: ansible_distribution_version in sap_hana_preconfigure_supported_rhel_minor_releases
+ fail_msg:
+ - "WARN: The RHEL release '{{ ansible_distribution_version }}' may not (yet) be supported for SAP HANA. Please check SAP note 2235581!"
+ - "NOTE: If necessary, adapt role parameter `sap_hana_preconfigure_supported_rhel_minor_releases` accordingly."
success_msg: "INFO: The RHEL release {{ ansible_distribution_version }} is supported for SAP HANA."
ignore_errors: yes
when: not sap_hana_preconfigure_min_rhel_release_check
@@ -29,7 +33,7 @@
- name: Assert that all required repos are enabled
ansible.builtin.assert:
- that: "'{{ line_item }}' in __sap_hana_preconfigure_register_enabled_repos_assert.stdout_lines"
+ that: line_item in __sap_hana_preconfigure_register_enabled_repos_assert.stdout_lines
fail_msg: "FAIL: Repository '{{ line_item }}' is not enabled!"
success_msg: "PASS: Repository '{{ line_item }}' is enabled."
with_items:
@@ -50,7 +54,7 @@
- name: Assert that the RHEL release is locked correctly
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_subscription_manager_release_assert.stdout == '{{ ansible_distribution_version }}'"
+ that: __sap_hana_preconfigure_register_subscription_manager_release_assert.stdout == ansible_distribution_version
fail_msg: "FAIL: The RHEL release lock status is '{{ __sap_hana_preconfigure_register_subscription_manager_release_assert.stdout }}'
but the expected value is '{{ ansible_distribution_version }}'!"
success_msg: "PASS: The RHEL release is correctly locked to '{{ ansible_distribution_version }}'."
@@ -65,7 +69,7 @@
- name: Assert that all required packages are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in ansible_facts.packages"
+ that: line_item in ansible_facts.packages
fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
success_msg: "PASS: Package '{{ line_item }}' is installed."
with_items:
@@ -81,9 +85,13 @@
### # /opt/ibm/lop/configure
### # yum -y install ibm-power-managed-rhel7
###
-- name: Get install status of required IBM packages
+# Reasons for noqa:
+# command-instead-of-module: I need to examine the output of the yum info command.
+# risky-shell-pipe: If a package is not installed, the shell command will fail. But I want the command to
+# succeed so that the awk command can examine the result.
+- name: Get install status of required IBM packages # noqa command-instead-of-module risky-shell-pipe
ansible.builtin.shell: |
- set -o pipefail && yum info installed {{ __sap_hana_preconfigure_required_ppc64le | map('quote') | join(' ') }} |
+ yum info installed {{ __sap_hana_preconfigure_required_ppc64le | map('quote') | join(' ') }} |
awk '/Name/{n=$NF}/Version/{v=$NF}/Release/{r=$NF}/Description/{printf ("%s\n", n)}'
register: __sap_hana_preconfigure_register_required_ppc64le_packages_assert
changed_when: no
@@ -91,7 +99,7 @@
- name: Assert that all required IBM packages are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in __sap_hana_preconfigure_register_required_ppc64le_packages_assert.stdout_lines"
+ that: line_item in __sap_hana_preconfigure_register_required_ppc64le_packages_assert.stdout_lines
fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
success_msg: "PASS: Package '{{ line_item }}' is installed."
with_items:
@@ -108,7 +116,8 @@
- sap_hana_preconfigure_assert_ignore_errors | d(false)
block:
- - name: Assert - Create a list of minimum required package versions to be installed
+# Reason for noqa: We can safely fail at the last command in the pipeline.
+ - name: Assert - Create a list of minimum required package versions to be installed # noqa risky-shell-pipe
# How does it work?
# 1 - Print the required package name and version with a prefix "1" followed by a space.
# 2 - In the same output sequence, list all installed versions of this package with a prefix "2" followed by a space.
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2055470-loop-block.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2055470-loop-block.yml
index b0729f9fb..99033b7fe 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2055470-loop-block.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2055470-loop-block.yml
@@ -9,9 +9,14 @@
ignore_errors: yes
when: __sap_hana_preconfigure_register_stat_sysctl_ibm_largesend_conf_assert.stat.exists
+# needed because line_item.value is not evaluated correctly in the that: statement
+- name: 2055470 - Set fact for line_item.value
+ ansible.builtin.set_fact:
+ __sap_hana_preconfigure_sysctl_value_2055470: "{{ line_item.value }}"
+
- name: 2055470 - Assert that {{ line_item.name }} is set correctly in /etc/sysctl.d/ibm_largesend.conf
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_ibm_largesend_conf_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_hana_preconfigure_register_sysctl_ibm_largesend_conf_parameter_assert.stdout == __sap_hana_preconfigure_sysctl_value_2055470
fail_msg: "FAIL: The value of '{{ line_item.name }}' in /etc/sysctl.d/ibm_largesend.conf is
'{{ __sap_hana_preconfigure_register_sysctl_ibm_largesend_conf_parameter_assert.stdout }}' but the expected value is 'line_item.value'!"
success_msg: "PASS: The value of '{{ line_item.name }}' in /etc/sysctl.d/ibm_largesend.conf is
@@ -27,7 +32,7 @@
- name: 2055470 - Assert that {{ line_item.name }} is set correctly as per sysctl
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_ibm_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_hana_preconfigure_register_sysctl_ibm_parameter_assert.stdout == __sap_hana_preconfigure_sysctl_value_2055470
fail_msg: "FAIL: The current value of '{{ line_item.name }}' as per sysctl is
'{{ __sap_hana_preconfigure_register_sysctl_ibm_parameter_assert.stdout }}' but the expected value is '{{ line_item.value }}'!"
success_msg: "PASS: The current value of '{{ line_item.name }}' as per sysctl is
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2382421-loop-block.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2382421-loop-block.yml
index 1e86d0e9f..acc436cac 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2382421-loop-block.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2382421-loop-block.yml
@@ -9,9 +9,14 @@
ignore_errors: yes
when: __sap_hana_preconfigure_register_stat_sysctl_saphana_conf_assert.stat.exists
+# needed because line_item.value is not evaluated correctly in the that: statement
+- name: 2382421 - Set fact for line_item.value
+ ansible.builtin.set_fact:
+ __sap_hana_preconfigure_sysctl_value_2382421: "{{ line_item.value }}"
+
- name: 2382421 - Assert that {{ line_item.name }} is set correctly in {{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }}
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_saphana_conf_network_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_hana_preconfigure_register_sysctl_saphana_conf_network_parameter_assert.stdout == __sap_hana_preconfigure_sysctl_value_2382421
fail_msg: "FAIL: The value of '{{ line_item.name }}' in {{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }} is
'{{ __sap_hana_preconfigure_register_sysctl_saphana_conf_network_parameter_assert.stdout }}' but the expected value is 'line_item.value'!"
success_msg: "PASS: The value of '{{ line_item.name }}' in {{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }} is
@@ -27,7 +32,7 @@
- name: 2382421 - Assert that {{ line_item.name }} is set correctly as per sysctl
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_network_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_hana_preconfigure_register_sysctl_network_parameter_assert.stdout == __sap_hana_preconfigure_sysctl_value_2382421
fail_msg: "FAIL: The current value of '{{ line_item.name }}' as per sysctl is
'{{ __sap_hana_preconfigure_register_sysctl_network_parameter_assert.stdout }}' but the expected value is '{{ line_item.value }}'!"
success_msg: "PASS: The current value of '{{ line_item.name }}' as per sysctl is
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2777782-01-loop-block.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2777782-01-loop-block.yml
index 5a7f625ef..6dc476aaf 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2777782-01-loop-block.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-2777782-01-loop-block.yml
@@ -21,7 +21,7 @@
- name: Assert that there is an entry for '{{ line_item }}' in the SELinux configuration database
ansible.builtin.assert:
- that: __sap_hana_preconfigure_register_semanage_fcontext_hana.stdout | int != 0
+ that: (__sap_hana_preconfigure_register_semanage_fcontext_hana.stdout | int) != 0
fail_msg: "FAIL: There is no entry for '{{ line_item }}' in the SELinux configuration database!"
success_msg: "PASS: There is an entry for '{{ line_item }}' in the SELinux configuration database."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -35,7 +35,7 @@
- name: Assert that all files in '{{ line_item }}' and below have the 'usr_t' file context
ansible.builtin.assert:
- that: __sap_hana_preconfigure_register_ls_z_hana.stdout | int == 0
+ that: (__sap_hana_preconfigure_register_ls_z_hana.stdout | int) == 0
fail_msg: "FAIL: There is at least one file in '{{ line_item }}' or below without the 'usr_t' file context!"
success_msg: "PASS: All files in '{{ line_item }}' and below have the 'usr_t' file context."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-3024346-loop-block.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-3024346-loop-block.yml
index f343a882d..bb1d1dd76 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-3024346-loop-block.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-3024346-loop-block.yml
@@ -9,9 +9,14 @@
ignore_errors: yes
when: __sap_hana_preconfigure_register_etc_sysctl_netapp_hana_conf_assert.stat.exists
+# needed because line_item.value is not evaluated correctly in the that: statement
+- name: 3024346 - Set fact for line_item.value
+ ansible.builtin.set_fact:
+ __sap_hana_preconfigure_sysctl_value_3024346: "{{ line_item.value }}"
+
- name: 3024346 - Assert that {{ line_item.name }} is set correctly in {{ __sap_hana_preconfigure_etc_sysctl_netapp_hana_conf }}
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_netapp_hana_conf_network_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_hana_preconfigure_register_sysctl_netapp_hana_conf_network_parameter_assert.stdout == __sap_hana_preconfigure_sysctl_value_3024346
fail_msg: "FAIL: The value of '{{ line_item.name }}' in '{{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }}' is
'{{ __sap_hana_preconfigure_register_sysctl_netapp_hana_conf_network_parameter_assert.stdout }}' but the expected value is 'line_item.value'!"
success_msg: "PASS: The value of '{{ line_item.name }}' in '{{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }}' is
@@ -27,7 +32,7 @@
- name: 3024346 - Assert that {{ line_item.name }} is set correctly as per sysctl
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_netapp_hana_network_parameter_assert.stdout == '{{ line_item.value }}'"
+ that: __sap_hana_preconfigure_register_sysctl_netapp_hana_network_parameter_assert.stdout == __sap_hana_preconfigure_sysctl_value_3024346
fail_msg: "FAIL: The current value of '{{ line_item.name }}' as per sysctl is
'{{ __sap_hana_preconfigure_register_sysctl_netapp_hana_network_parameter_assert.stdout }}' but the expected value is '{{ line_item.value }}'!"
success_msg: "PASS: The current value of '{{ line_item.name }}' as per sysctl is
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrt-ccpp.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrt-ccpp.yml
index 4205365a2..fc96e8365 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrt-ccpp.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrt-ccpp.yml
@@ -3,4 +3,11 @@
- name: Assert that service abrt-ccpp is disabled, and inactive or stopped
ansible.builtin.include_tasks: assert-services.yml
vars:
- line_item: abrt-ccpp
+ __sap_hana_preconfigure_service_status: "{{ ansible_facts.services['abrt-ccpp.service'].status }}"
+ __sap_hana_preconfigure_service_state: "{{ ansible_facts.services['abrt-ccpp.service'].state }}"
+ __sap_hana_preconfigure_packages_and_services_pkg: "{{ __sap_hana_preconfigure_packages_and_services['abrt-ccpp']['pkg'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc: "{{ __sap_hana_preconfigure_packages_and_services['abrt-ccpp']['svc'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_status: "{{ __sap_hana_preconfigure_packages_and_services['abrt-ccpp']['svc_status'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_state: "{{ __sap_hana_preconfigure_packages_and_services['abrt-ccpp']['svc_state'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_enabled: "{{ __sap_hana_preconfigure_packages_and_services['abrt-ccpp']['systemd_enabled'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_state: "{{ __sap_hana_preconfigure_packages_and_services['abrt-ccpp']['systemd_state'] }}"
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrtd.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrtd.yml
index 54a1cf887..36a63798a 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrtd.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-abrtd.yml
@@ -3,4 +3,11 @@
- name: Assert that service abrtd is disabled, and inactive or stopped
ansible.builtin.include_tasks: assert-services.yml
vars:
- line_item: abrtd
+ __sap_hana_preconfigure_service_status: "{{ ansible_facts.services['abrtd.service'].status }}"
+ __sap_hana_preconfigure_service_state: "{{ ansible_facts.services['abrtd.service'].state }}"
+ __sap_hana_preconfigure_packages_and_services_pkg: "{{ __sap_hana_preconfigure_packages_and_services['abrtd']['pkg'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc: "{{ __sap_hana_preconfigure_packages_and_services['abrtd']['svc'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_status: "{{ __sap_hana_preconfigure_packages_and_services['abrtd']['svc_status'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_state: "{{ __sap_hana_preconfigure_packages_and_services['abrtd']['svc_state'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_enabled: "{{ __sap_hana_preconfigure_packages_and_services['abrtd']['systemd_enabled'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_state: "{{ __sap_hana_preconfigure_packages_and_services['abrtd']['systemd_state'] }}"
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-auto-numa-balancing.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-auto-numa-balancing.yml
index f75cbea44..94628b8ae 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-auto-numa-balancing.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-auto-numa-balancing.yml
@@ -4,7 +4,14 @@
- name: Assert that service numad is disabled, and inactive or stopped, if tuned is not to be used
ansible.builtin.include_tasks: assert-services.yml
vars:
- line_item: numad
+ __sap_hana_preconfigure_service_status: "{{ ansible_facts.services['numad.service'].status }}"
+ __sap_hana_preconfigure_service_state: "{{ ansible_facts.services['numad.service'].state }}"
+ __sap_hana_preconfigure_packages_and_services_pkg: "{{ __sap_hana_preconfigure_packages_and_services['numad']['pkg'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc: "{{ __sap_hana_preconfigure_packages_and_services['numad']['svc'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_status: "{{ __sap_hana_preconfigure_packages_and_services['numad']['svc_status'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_state: "{{ __sap_hana_preconfigure_packages_and_services['numad']['svc_state'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_enabled: "{{ __sap_hana_preconfigure_packages_and_services['numad']['systemd_enabled'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_state: "{{ __sap_hana_preconfigure_packages_and_services['numad']['systemd_state'] }}"
when: not sap_hana_preconfigure_use_tuned or
sap_hana_preconfigure_assert_all_config|d(false)
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-coredumps.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-coredumps.yml
index 99b25667f..572ad8d54 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-coredumps.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-coredumps.yml
@@ -29,7 +29,7 @@
- name: Assert that the hard limit of core file creation is 0 for all users
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_limits_sap_conf_core_hard_assert.stdout == '0'"
+ that: __sap_hana_preconfigure_register_limits_sap_conf_core_hard_assert.stdout == '0'
fail_msg: "FAIL: The hard limit of core file creation for all users in /etc/security/limits.d/99-sap.conf is
'{{ __sap_hana_preconfigure_register_limits_sap_conf_core_hard_assert.stdout }}' but the expected value is 0!"
success_msg: "PASS: The hard limit of core file creation for all users in /etc/security/limits.d/99-sap.conf is
@@ -46,7 +46,7 @@
- name: Assert that the soft limit of core file creation is 0 for all users
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_limits_sap_conf_core_soft_assert.stdout == '0'"
+ that: __sap_hana_preconfigure_register_limits_sap_conf_core_soft_assert.stdout == '0'
fail_msg: "FAIL: The soft limit of core file creation for all users in /etc/security/limits.d/99-sap.conf is
'{{ __sap_hana_preconfigure_register_limits_sap_conf_core_soft_assert.stdout }}' but the expected value is 0!"
success_msg: "PASS: The soft limit of core file creation for all users in /etc/security/limits.d/99-sap.conf is
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-firewalld.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-firewalld.yml
index 576b70e56..5cb157679 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-firewalld.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-firewalld.yml
@@ -3,4 +3,11 @@
- name: Assert that service firewalld is disabled, and inactive or stopped
ansible.builtin.include_tasks: assert-services.yml
vars:
- line_item: firewalld
+ __sap_hana_preconfigure_service_status: "{{ ansible_facts.services['firewalld.service'].status }}"
+ __sap_hana_preconfigure_service_state: "{{ ansible_facts.services['firewalld.service'].state }}"
+ __sap_hana_preconfigure_packages_and_services_pkg: "{{ __sap_hana_preconfigure_packages_and_services['firewalld']['pkg'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc: "{{ __sap_hana_preconfigure_packages_and_services['firewalld']['svc'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_status: "{{ __sap_hana_preconfigure_packages_and_services['firewalld']['svc_status'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_state: "{{ __sap_hana_preconfigure_packages_and_services['firewalld']['svc_state'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_enabled: "{{ __sap_hana_preconfigure_packages_and_services['firewalld']['systemd_enabled'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_state: "{{ __sap_hana_preconfigure_packages_and_services['firewalld']['systemd_state'] }}"
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-kdump.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-kdump.yml
index 66fe0b1de..de5c370fc 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-kdump.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-kdump.yml
@@ -3,4 +3,11 @@
- name: Assert that service kdump is disabled, and inactive or stopped
ansible.builtin.include_tasks: assert-services.yml
vars:
- line_item: kdump
+ __sap_hana_preconfigure_service_status: "{{ ansible_facts.services['kdump.service'].status }}"
+ __sap_hana_preconfigure_service_state: "{{ ansible_facts.services['kdump.service'].state }}"
+ __sap_hana_preconfigure_packages_and_services_pkg: "{{ __sap_hana_preconfigure_packages_and_services['kdump']['pkg'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc: "{{ __sap_hana_preconfigure_packages_and_services['kdump']['svc'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_status: "{{ __sap_hana_preconfigure_packages_and_services['kdump']['svc_status'] }}"
+ __sap_hana_preconfigure_packages_and_services_svc_state: "{{ __sap_hana_preconfigure_packages_and_services['kdump']['svc_state'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_enabled: "{{ __sap_hana_preconfigure_packages_and_services['kdump']['systemd_enabled'] }}"
+ __sap_hana_preconfigure_packages_and_services_systemd_state: "{{ __sap_hana_preconfigure_packages_and_services['kdump']['systemd_state'] }}"
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-ksm.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-ksm.yml
index 383f58c3b..ef35913cc 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-ksm.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-ksm.yml
@@ -22,7 +22,7 @@
- name: Assert that the mode of file /etc/init.d/boot.local is 0755
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_stat_boot_local_ksm_assert.stat.mode == '0755'"
+ that: __sap_hana_preconfigure_register_stat_boot_local_ksm_assert.stat.mode == '0755'
fail_msg: "FAIL: File /etc/init.d/boot.local has mode '{{ __sap_hana_preconfigure_register_stat_boot_local_ksm_assert.stat.mode }}'
but the expected mode is '0755'!"
success_msg: "PASS: File /etc/init.d/boot.local has mode 0755."
@@ -52,7 +52,7 @@
- name: Assert that ksm is currently disabled
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_ksm_active_assert.stdout == '0'"
+ that: __sap_hana_preconfigure_register_ksm_active_assert.stdout == '0'
fail_msg: "FAIL: KSM is currently enabled but it needs to be disabled!"
success_msg: "PASS: KSM is disabled currently."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-services.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-services.yml
index 55de317c8..cd01228d3 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-services.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-services.yml
@@ -1,41 +1,38 @@
---
-- name: Assert that service {{ __sap_hana_preconfigure_packages_and_services[line_item].svc }} is disabled
- if package {{ __sap_hana_preconfigure_packages_and_services[line_item].pkg }} is installed
- when: "__sap_hana_preconfigure_packages_and_services[line_item].pkg in ansible_facts.packages"
+- name: Assert that service {{ __sap_hana_preconfigure_packages_and_services_svc }} is disabled
+ if package {{ __sap_hana_preconfigure_packages_and_services_pkg }} is installed
+ when: "__sap_hana_preconfigure_packages_and_services_pkg in ansible_facts.packages"
block:
- - name: "Report that package {{ __sap_hana_preconfigure_packages_and_services[line_item].pkg }} is installed"
+ - name: "Report that package {{ __sap_hana_preconfigure_packages_and_services_pkg }} is installed"
ansible.builtin.debug:
- msg: "INFO: Package '{{ __sap_hana_preconfigure_packages_and_services[line_item].pkg }}' is installed."
+ msg: "INFO: Package '{{ __sap_hana_preconfigure_packages_and_services_pkg }}' is installed."
- - name: Assert that service {{ __sap_hana_preconfigure_packages_and_services[line_item].svc }} is
- {{ __sap_hana_preconfigure_packages_and_services[line_item].svc_status }}
+ - name: Assert that service {{ __sap_hana_preconfigure_packages_and_services_svc }} is
+ {{ __sap_hana_preconfigure_packages_and_services_svc_status }}
ansible.builtin.assert:
- that: "ansible_facts.services['{{ __sap_hana_preconfigure_packages_and_services[line_item].svc }}.service'].status ==
- '{{ __sap_hana_preconfigure_packages_and_services[line_item].svc_status }}'"
- fail_msg: "FAIL: Service '{{ __sap_hana_preconfigure_packages_and_services[line_item].svc }}' is not
- {{ __sap_hana_preconfigure_packages_and_services[line_item].svc_status }}!"
- success_msg: "PASS: Service '{{ __sap_hana_preconfigure_packages_and_services[line_item].svc }}' is
- {{ __sap_hana_preconfigure_packages_and_services[line_item].svc_status }}."
+ that: __sap_hana_preconfigure_service_status == __sap_hana_preconfigure_packages_and_services_svc_status
+ fail_msg: "FAIL: Service '{{ __sap_hana_preconfigure_packages_and_services_svc }}' is not
+ {{ __sap_hana_preconfigure_packages_and_services_svc_status }}!"
+ success_msg: "PASS: Service '{{ __sap_hana_preconfigure_packages_and_services_svc }}' is
+ {{ __sap_hana_preconfigure_packages_and_services_svc_status }}."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
- - name: Assert that service {{ __sap_hana_preconfigure_packages_and_services[line_item].svc }} is
- {{ __sap_hana_preconfigure_packages_and_services[line_item].svc_state }} or
- {{ __sap_hana_preconfigure_packages_and_services[line_item].systemd_state }}
+ - name: Assert that service {{ __sap_hana_preconfigure_packages_and_services_svc }} is
+ {{ __sap_hana_preconfigure_packages_and_services_svc_state }} or
+ {{ __sap_hana_preconfigure_packages_and_services_systemd_state }}
ansible.builtin.assert:
- that: "(ansible_facts.services['{{ __sap_hana_preconfigure_packages_and_services[line_item].svc }}.service'].state ==
- '{{ __sap_hana_preconfigure_packages_and_services[line_item].svc_state }}' or
- ansible_facts.services['{{ __sap_hana_preconfigure_packages_and_services[line_item].svc }}.service'].state ==
- '{{ __sap_hana_preconfigure_packages_and_services[line_item].systemd_state }}')"
- fail_msg: "FAIL: Service '{{ __sap_hana_preconfigure_packages_and_services[line_item].svc }}' is not
- {{ __sap_hana_preconfigure_packages_and_services[line_item].svc_state }}!"
- success_msg: "PASS: Service '{{ __sap_hana_preconfigure_packages_and_services[line_item].svc }}' is
- {{ __sap_hana_preconfigure_packages_and_services[line_item].svc_state }} or
- {{ __sap_hana_preconfigure_packages_and_services[line_item].systemd_state }}."
+ that: (__sap_hana_preconfigure_service_state == __sap_hana_preconfigure_packages_and_services_svc_state) or
+ (__sap_hana_preconfigure_service_state == __sap_hana_preconfigure_packages_and_services_systemd_state)
+ fail_msg: "FAIL: Service '{{ __sap_hana_preconfigure_packages_and_services_svc }}' is not
+ {{ __sap_hana_preconfigure_packages_and_services_svc_state }}!"
+ success_msg: "PASS: Service '{{ __sap_hana_preconfigure_packages_and_services_svc }}' is
+ {{ __sap_hana_preconfigure_packages_and_services_svc_state }} or
+ {{ __sap_hana_preconfigure_packages_and_services_systemd_state }}."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
-- name: "Report that package {{ __sap_hana_preconfigure_packages_and_services[line_item].pkg }} is not installed"
+- name: "Report that package {{ __sap_hana_preconfigure_packages_and_services_pkg }} is not installed"
ansible.builtin.debug:
- msg: "PASS: Package '{{ __sap_hana_preconfigure_packages_and_services[line_item].pkg }}' is not installed."
- when: "__sap_hana_preconfigure_packages_and_services[line_item].pkg not in ansible_facts.packages"
+ msg: "PASS: Package '{{ __sap_hana_preconfigure_packages_and_services_pkg }}' is not installed."
+ when: "__sap_hana_preconfigure_packages_and_services_pkg not in ansible_facts.packages"
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tsx.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tsx.yml
index 591f1f93f..3a64d2d3c 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tsx.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tsx.yml
@@ -10,7 +10,7 @@
# There are CPUs which are not capable of enabling the rtm flag, so we just report the status:
- name: Get all CPU flags
- ansible.builtin.shell: lscpu | grep "^Flags:"
+ ansible.builtin.shell: set -o pipefail && lscpu | grep "^Flags:"
register: __sap_hana_preconfigure_register_lscpu_flags
changed_when: no
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tuned.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tuned.yml
index 304c029e8..91ca78fc4 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tuned.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/assert-tuned.yml
@@ -17,7 +17,7 @@
- name: "Assert that tuned is enabled - use_tuned: yes"
ansible.builtin.assert:
- that: "ansible_facts.services['tuned.service']['status'] == 'enabled'"
+ that: ansible_facts.services['tuned.service']['status'] == 'enabled'
fail_msg: "FAIL: Service 'tuned' is not enabled!"
success_msg: "PASS: Service 'tuned' is enabled."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -27,7 +27,7 @@
- name: "Assert that tuned is active - use_tuned: yes"
ansible.builtin.assert:
- that: "ansible_facts.services['tuned.service']['state'] == 'running'"
+ that: ansible_facts.services['tuned.service']['state'] == 'running'
fail_msg: "FAIL: Service 'tuned' is not active!"
success_msg: "PASS: Service 'tuned' is active."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -44,7 +44,7 @@
- name: "Assert that tuned is disabled - use_tuned: no"
ansible.builtin.assert:
- that: "ansible_facts.services['tuned.service']['status'] == 'disabled'"
+ that: ansible_facts.services['tuned.service']['status'] == 'disabled'
fail_msg: "FAIL: Service 'tuned' is not disabled!"
success_msg: "PASS: Service 'tuned' is disabled."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -54,7 +54,7 @@
- name: "Assert that tuned is inactive - use_tuned: no"
ansible.builtin.assert:
- that: "ansible_facts.services['tuned.service']['state'] == 'inactive'"
+ that: ansible_facts.services['tuned.service']['state'] == 'inactive'
fail_msg: "FAIL: Service 'tuned' is not inactive!"
success_msg: "PASS: Service 'tuned' is inactive."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -70,7 +70,7 @@
sap_hana_preconfigure_assert_all_config)"
- name: Get active tuned profile
- ansible.builtin.shell: /usr/sbin/tuned-adm active | grep ":" | cut -d ":" -f 2 | awk '{$1=$1;print}'
+ ansible.builtin.shell: set -o pipefail && /usr/sbin/tuned-adm active | awk '/:/{print $NF}'
check_mode: no
register: __sap_hana_preconfigure_register_current_tuned_profile_assert
ignore_errors: yes
@@ -88,7 +88,7 @@
- name: Assert that tuned profile '{{ sap_hana_preconfigure_tuned_profile }}' is currently active
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_current_tuned_profile_assert.stdout == '{{ sap_hana_preconfigure_tuned_profile }}'"
+ that: __sap_hana_preconfigure_register_current_tuned_profile_assert.stdout == sap_hana_preconfigure_tuned_profile
fail_msg: "FAIL: The tuned profile '{{ sap_hana_preconfigure_tuned_profile }}' is currently not active!
Currently active profile: '{{ __sap_hana_preconfigure_register_current_tuned_profile_assert.stdout }}'."
success_msg: "PASS: The tuned profile '{{ sap_hana_preconfigure_tuned_profile }}' is currently active."
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/configure-tuned.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/configure-tuned.yml
index a5de66bfa..152d6d0ee 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/configure-tuned.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/configure-tuned.yml
@@ -22,7 +22,7 @@
owner: root
group: root
mode: '0644'
- backup: yes
+ backup: true
- name: Perform steps for setting tuned profile
when: sap_hana_preconfigure_use_tuned
@@ -32,11 +32,11 @@
ansible.builtin.service:
name: tuned
state: started
- enabled: yes
+ enabled: true
- name: Get currently active tuned profile
- ansible.builtin.shell: /usr/sbin/tuned-adm active | grep ":" | cut -d ":" -f 2 | awk '{$1=$1;print}'
- check_mode: no
+ ansible.builtin.shell: set -o pipefail && /usr/sbin/tuned-adm active | awk '/:/{print $NF}'
+ check_mode: false
register: __sap_hana_preconfigure_register_current_tuned_profile
changed_when: false
@@ -53,8 +53,8 @@
changed_when: true
- name: Show new active tuned profile
- ansible.builtin.shell: /usr/sbin/tuned-adm active | grep ":" | cut -d ":" -f 2 | awk '{$1=$1;print}'
- check_mode: no
+ ansible.builtin.shell: set -o pipefail && /usr/sbin/tuned-adm active | awk '/:/{print $NF}'
+ check_mode: false
register: __sap_hana_preconfigure_register_new_tuned_profile
changed_when: false
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-coredumps.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-coredumps.yml
index 524f35cb9..a82bb1ada 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-coredumps.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-coredumps.yml
@@ -2,7 +2,7 @@
# Reasons for noqa: 1. Tabs can increase readability;
# 2. The example in man limits.conf is tab formatted;
-# 3. It is difficult to replace tabs by spaces for entries for which their lenghts are not known
+# 3. It is difficult to replace tabs by spaces for entries for which their lengths are not known
- name: Disable core file creation for all users # noqa no-tabs
ansible.builtin.lineinfile:
path: /etc/security/limits.d/99-sap.conf
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-ksm.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-ksm.yml
index 6a644d3a2..2e9f64c2f 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-ksm.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/disable-ksm.yml
@@ -19,7 +19,7 @@
- name: Configure - Get initial status of KSM
ansible.builtin.command: cat /sys/kernel/mm/ksm/run
- check_mode: no
+ check_mode: false
register: __sap_hana_preconfigure_register_ksm_status_before
ignore_errors: true
changed_when: false
@@ -27,6 +27,7 @@
- name: Disable KSM on the running system
ansible.builtin.shell: echo 0 > /sys/kernel/mm/ksm/run
register: __sap_hana_preconfigure_register_disable_ksm
+ changed_when: true
when: __sap_hana_preconfigure_register_ksm_status_before.stdout != '0'
- name: Configure - Get status of KSM
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/generic/enable-tsx.yml b/roles/sap_hana_preconfigure/tasks/RedHat/generic/enable-tsx.yml
index 9176357d6..4566cd726 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/generic/enable-tsx.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/generic/enable-tsx.yml
@@ -8,6 +8,7 @@
- name: Enable TSX at boot time
ansible.builtin.command: /bin/true
notify: __sap_hana_preconfigure_grubby_update_handler
+ changed_when: true
when:
- ansible_architecture == 'x86_64'
- ansible_distribution == 'RedHat'
diff --git a/roles/sap_hana_preconfigure/tasks/RedHat/installation.yml b/roles/sap_hana_preconfigure/tasks/RedHat/installation.yml
index e4d3cb19b..c93046e30 100644
--- a/roles/sap_hana_preconfigure/tasks/RedHat/installation.yml
+++ b/roles/sap_hana_preconfigure/tasks/RedHat/installation.yml
@@ -4,12 +4,23 @@
ansible.builtin.setup:
gather_subset: distribution_version
-- name: Ensure that the system is running a RHEL release which is supported for SAP HANA
+- name: Report if the system is running a RHEL release which is supported for SAP HANA
+ ansible.builtin.debug:
+ msg:
+ - "WARN: The RHEL release '{{ ansible_distribution_version }}' may not (yet) be supported for SAP HANA. Please check SAP note 2235581!"
+ - " If necessary, adapt role parameter `sap_hana_preconfigure_supported_rhel_minor_releases` accordingly."
+ when:
+ - not sap_hana_preconfigure_min_rhel_release_check
+ - ansible_distribution_version not in sap_hana_preconfigure_supported_rhel_minor_releases
+
+- name: Fail if the system is running a RHEL release which may not be supported for SAP HANA
ansible.builtin.assert:
that: ansible_distribution_version in sap_hana_preconfigure_supported_rhel_minor_releases
- fail_msg: "The RHEL release {{ ansible_distribution_version }} is not supported for SAP HANA!"
- success_msg: "The RHEL release {{ ansible_distribution_version }} is supported for SAP HANA."
- ignore_errors: "{{ not sap_hana_preconfigure_min_rhel_release_check }}"
+ fail_msg:
+ - "The RHEL release '{{ ansible_distribution_version }}' may not (yet) be supported for SAP HANA. Please check SAP note 2235581!"
+ - "If necessary, adapt role parameter `sap_hana_preconfigure_supported_rhel_minor_releases` accordingly."
+ success_msg: "The RHEL release '{{ ansible_distribution_version }}' is supported for SAP HANA."
+ when: sap_hana_preconfigure_min_rhel_release_check
- name: Perform steps for enabling repos for SAP HANA
when: sap_hana_preconfigure_enable_sap_hana_repos
@@ -44,6 +55,7 @@
- name: Set the minor RHEL release
ansible.builtin.command: subscription-manager release --set="{{ ansible_distribution_version }}"
+ changed_when: true
when:
- sap_hana_preconfigure_set_minor_release
- __sap_hana_preconfigure_register_subscription_manager_release.stdout != ansible_distribution_version
@@ -83,6 +95,7 @@
- name: Accept the license for the IBM Service and Productivity Tools
ansible.builtin.shell: LESS=+q /opt/ibm/lop/configure <<<'y'
+ changed_when: true
when:
- ansible_architecture == "ppc64le"
- sap_hana_preconfigure_install_ibm_power_tools | d(true)
@@ -120,7 +133,8 @@
- __sap_hana_preconfigure_min_pkgs | d([])
block:
- - name: Create a list of minimum required package versions to be installed
+# Reason for noqa: We can safely fail at the last command in the pipeline.
+ - name: Create a list of minimum required package versions to be installed # noqa risky-shell-pipe
# How does it work?
# 1 - Print the required package name and version with a prefix "1" followed by a space.
# 2 - In the same output sequence, list all installed versions of this package with a prefix "2" followed by a space.
@@ -226,9 +240,11 @@
- name: Call reboot handler if necessary as per role sap_general_preconfigure
ansible.builtin.command: /bin/true
notify: __sap_hana_preconfigure_reboot_handler
+ changed_when: true
when: sap_general_preconfigure_fact_reboot_required | d(false)
- name: Call reboot handler if necessary as per this role
ansible.builtin.command: /bin/true
notify: __sap_hana_preconfigure_reboot_handler
+ changed_when: true
when: __sap_hana_preconfigure_register_needs_restarting is failed
diff --git a/roles/sap_hana_preconfigure/tasks/SLES/assert-configuration.yml b/roles/sap_hana_preconfigure/tasks/SLES/assert-configuration.yml
index 34fd6f217..6aee0145c 100644
--- a/roles/sap_hana_preconfigure/tasks/SLES/assert-configuration.yml
+++ b/roles/sap_hana_preconfigure/tasks/SLES/assert-configuration.yml
@@ -23,14 +23,14 @@
- name: Assert that saptune_check executed correctly
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_saptune_check.rc == 0"
+ that: __sap_hana_preconfigure_register_saptune_check.rc == 0
fail_msg: "FAIL: the command saptune_check fails"
success_msg: "PASS: the command saptune_check executes as expected"
- name: Discover active solution
ansible.builtin.command: saptune solution enabled
register: __sap_hana_preconfigure_register_saptune_status
- changed_when: no
+ changed_when: false
- name: Set solution fact
ansible.builtin.set_fact:
diff --git a/roles/sap_hana_preconfigure/tasks/SLES/assert-installation.yml b/roles/sap_hana_preconfigure/tasks/SLES/assert-installation.yml
index da532ecee..c54bb6106 100644
--- a/roles/sap_hana_preconfigure/tasks/SLES/assert-installation.yml
+++ b/roles/sap_hana_preconfigure/tasks/SLES/assert-installation.yml
@@ -3,6 +3,7 @@
# debug:
# verbosity: "{{ debuglevel }}"
#
+
#Capture all patterns along with their install status
- name: Get zypper pattern information
ansible.builtin.command: zypper patterns
diff --git a/roles/sap_hana_preconfigure/tasks/SLES/configuration.yml b/roles/sap_hana_preconfigure/tasks/SLES/configuration.yml
index 0e084615e..b588e9d88 100644
--- a/roles/sap_hana_preconfigure/tasks/SLES/configuration.yml
+++ b/roles/sap_hana_preconfigure/tasks/SLES/configuration.yml
@@ -1,31 +1,50 @@
---
+- name: Takover saptune and enable
+ when: __sap_hana_preconfigure_run_saptune
+ block:
+ - name: Make sure that sapconf and tuned are stopped and disabled
+ ansible.builtin.command: "saptune service takeover"
+ register: __sap_saptune_takeover
+ changed_when: __sap_saptune_takeover.rc == 0
-#- name: Enable Debugging
-# debug:
-# verbosity: "{{ debuglevel }}"
-#
-- name: Ensure saptune is running and enabled
- ansible.builtin.systemd:
- name: saptune
- state: started
- enabled: yes
+ - name: Ensure saptune is running and enabled
+ ansible.builtin.systemd:
+ name: saptune
+ state: started
+ enabled: true
-- name: Ensure saptune_check executes correctly
- ansible.builtin.command: saptune_check
- changed_when: no
+ - name: Ensure saptune_check executes correctly
+ ansible.builtin.command: saptune_check
+ changed_when: false
-- name: Discover active solution
- ansible.builtin.command: saptune solution enabled
- register: __sap_hana_preconfigure_register_saptune_status
- changed_when: no
+ - name: Discover active solution
+ ansible.builtin.command: saptune solution enabled
+ register: __sap_hana_preconfigure_register_saptune_status
+ changed_when: false
-- name: Set fact for active solution
- ansible.builtin.set_fact:
- __sap_hana_preconfigure_fact_solution_configured: "{{ (__sap_hana_preconfigure_register_saptune_status.stdout | regex_search('(\\S+)', '\\1'))[0] | default('NONE') }}" # Capture the first block on none whitespace
+ - name: Set fact for active solution
+ ansible.builtin.set_fact:
+ # Capture the first block on none whitespace
+ __sap_hana_preconfigure_fact_solution_configured:
+ "{{ (__sap_hana_preconfigure_register_saptune_status.stdout | regex_search('(\\S+)', '\\1'))[0] | default('NONE') }}"
-- name: Show configured solution
- ansible.builtin.debug:
- var: __sap_hana_preconfigure_fact_solution_configured
+ - name: Show configured solution
+ ansible.builtin.debug:
+ var: __sap_hana_preconfigure_fact_solution_configured
+
+- name: Enable sapconf
+ when: not __sap_hana_preconfigure_run_saptune
+ block:
+ - name: Enable sapconf service
+ ansible.builtin.systemd:
+ name: sapconf
+ state: started
+ enabled: true
+
+ - name: Restart sapconf service
+ ansible.builtin.systemd:
+ name: sapconf
+ state: restarted
# If this is a cluster node on Azure, we need to override to disable tcp timestamps, reuse and recycle.
# This can be done by copying the sapnote file 2382421 from /usr/share/saptune/notes to /etc/saptune/override
@@ -34,41 +53,42 @@
- name: Disable TCP timestamps, recycle & reuse
ansible.builtin.blockinfile:
path: /etc/saptune/override/2382421
- create: yes
- backup: yes
+ create: true
+ backup: true
owner: root
group: root
mode: '0640'
marker: ""
block: |
- [sysctl]
- net.ipv4.tcp_timestamps = 0
- net.ipv4.tcp_tw_reuse = 0
- net.ipv4.tcp_tw_recycle = 0
+ [sysctl]
+ net.ipv4.tcp_timestamps = 0
+ net.ipv4.tcp_tw_reuse = 0
+ net.ipv4.tcp_tw_recycle = 0
when:
- sap_hana_preconfigure_saptune_azure
-- name: Check if saptune solution needs to be applied
- ansible.builtin.command: "saptune solution verify {{ sap_hana_preconfigure_saptune_solution }}"
- register: __sap_hana_preconfigure_register_saptune_verify
- changed_when: no # We're only checking, not changing!
- failed_when: no # We expect this to fail if it has not previously been applied
-
-- name: Ensure no solution is currently applied
- ansible.builtin.command: "saptune solution revert {{ __sap_hana_preconfigure_fact_solution_configured }}"
- when:
- - __sap_hana_preconfigure_fact_solution_configured != 'NONE'
- - __sap_hana_preconfigure_register_saptune_verify.rc != 0
+- name: Apply saptune solution
+ when: __sap_hana_preconfigure_run_saptune
+ block:
+ - name: Check if saptune solution needs to be applied
+ ansible.builtin.command: "saptune solution verify {{ sap_hana_preconfigure_saptune_solution }}"
+ register: __sap_hana_preconfigure_register_saptune_verify
+ changed_when: false # We're only checking, not changing!
+ failed_when: false # We expect this to fail if it has not previously been applied
-- name: Ensure saptune solution is applied
- ansible.builtin.command: "saptune solution apply {{ sap_hana_preconfigure_saptune_solution }}"
- when: __sap_hana_preconfigure_register_saptune_verify.rc != 0
+ - name: Ensure no solution is currently applied
+ ansible.builtin.command: "saptune solution revert {{ __sap_hana_preconfigure_fact_solution_configured }}"
+ changed_when: true
+ when:
+ - __sap_hana_preconfigure_fact_solution_configured != 'NONE'
+ - __sap_hana_preconfigure_register_saptune_verify.rc != 0
-- name: Ensure solution was successful
- ansible.builtin.command: "saptune solution verify {{ sap_hana_preconfigure_saptune_solution }}"
- changed_when: no # We're only checking, not changing!
+ - name: Ensure saptune solution is applied
+ ansible.builtin.command: "saptune solution apply {{ sap_hana_preconfigure_saptune_solution }}"
+ changed_when: true
+ when:
+ - __sap_hana_preconfigure_register_saptune_verify.rc != 0
-- name: Make sure that sapconf and tuned are stopped and disabled
- ansible.builtin.command: "saptune service takeover"
- register: __sap_saptune_takeover
- changed_when: __sap_saptune_takeover.rc == 0
+ - name: Ensure solution was successful
+ ansible.builtin.command: "saptune solution verify {{ sap_hana_preconfigure_saptune_solution }}"
+ changed_when: false # We're only checking, not changing!
diff --git a/roles/sap_hana_preconfigure/tasks/SLES/installation.yml b/roles/sap_hana_preconfigure/tasks/SLES/installation.yml
index 01d2d2736..3788ec552 100644
--- a/roles/sap_hana_preconfigure/tasks/SLES/installation.yml
+++ b/roles/sap_hana_preconfigure/tasks/SLES/installation.yml
@@ -10,16 +10,53 @@
name: "*"
when: sap_hana_preconfigure_update | bool
-- name: Ensure saphana pattern is installed
- community.general.zypper:
- type: pattern
- name: sap-hana
- state: present
- force: yes
+# -----------
+- name: Get contents of /etc/products.d/baseproduct
+ ansible.builtin.stat:
+ path: /etc/products.d/baseproduct
+ register: sles_baseproduct
+ when: ansible_os_family == 'Suse'
+
+- name: Set fact if baseproduct contains SLES without SLES_SAP
+ ansible.builtin.set_fact:
+ __sap_hana_preconfigure_run_saptune: false
+ when:
+ - '"SLES_SAP" not in sles_baseproduct.stat.lnk_target'
+ - '"SLES" in sles_baseproduct.stat.lnk_target'
+ - ansible_os_family == 'Suse'
+
+- name: Output
+ ansible.builtin.debug:
+ msg:
+ - "OS Family: {{ ansible_os_family }}"
+ - "saptune: {{ __sap_hana_preconfigure_run_saptune }}"
+ - "link: {{ sles_baseproduct.stat.lnk_target }}"
+# -----------
+
+- name: Prepare saptune
+ when:
+ - __sap_hana_preconfigure_run_saptune
+ block:
+
+ - name: Ensure saphana pattern is installed
+ community.general.zypper:
+ type: pattern
+ name: sap-hana
+ state: present
+ force: true
+
+ - name: Ensure saptune is installed
+ community.general.zypper:
+ type: package
+ name: "saptune={{ sap_hana_preconfigure_saptune_version }}"
+ state: present
+ force: true
-- name: Ensure saptune is installed
+- name: Ensure sapconf is installed
community.general.zypper:
type: package
- name: "saptune={{ sap_hana_preconfigure_saptune_version }}"
+ name: "sapconf"
state: present
- force: yes
+ force: true
+ when:
+ - not __sap_hana_preconfigure_run_saptune
diff --git a/roles/sap_hana_preconfigure/tasks/sapnote/2009879.yml b/roles/sap_hana_preconfigure/tasks/sapnote/2009879.yml
index 130ba5111..a3c35f2fa 100644
--- a/roles/sap_hana_preconfigure/tasks/sapnote/2009879.yml
+++ b/roles/sap_hana_preconfigure/tasks/sapnote/2009879.yml
@@ -7,7 +7,7 @@
# We include the release specific stuff here
#
-# Todo: move repo checking to RedHat/installion.yml
+# Todo: move repo checking to RedHat/installation.yml
# Disable for now, as there is no support for ppc64le
#- name: Check for EUS or E4S repository
# shell: |
@@ -31,8 +31,7 @@
- name: Configure - Display SAP note number 2009879 and its version
ansible.builtin.debug:
msg: "SAP note {{ (__sap_hana_preconfigure_sapnotes_versions | selectattr('number', 'match', '^2009879$') | first).number }}
- (version {{ (__sap_hana_preconfigure_sapnotes_versions | selectattr('number', 'match', '^2009879$') | first).version }}): SAP HANA Guidelines for R
-HEL 7 (pdf)"
+ (version {{ (__sap_hana_preconfigure_sapnotes_versions | selectattr('number', 'match', '^2009879$') | first).version }}): SAP HANA Guidelines for RHEL 7 (pdf)"
- name: Include 2009879_X.yml
ansible.builtin.include_tasks: 2009879_{{ ansible_distribution_major_version }}.yml
diff --git a/roles/sap_hana_preconfigure/tasks/sapnote/2055470.yml b/roles/sap_hana_preconfigure/tasks/sapnote/2055470.yml
index aeee28a8b..21a0a7512 100644
--- a/roles/sap_hana_preconfigure/tasks/sapnote/2055470.yml
+++ b/roles/sap_hana_preconfigure/tasks/sapnote/2055470.yml
@@ -19,7 +19,8 @@
msg: "SAP note {{ (__sap_hana_preconfigure_sapnotes_versions | selectattr('number', 'match', '^2055470$') | first).number }}
(version {{ (__sap_hana_preconfigure_sapnotes_versions | selectattr('number', 'match', '^2055470$') | first).version }}): SAP HANA on POWER settings"
- - name: Ensure MTU size is 9000 on all interfaces
+# Reason for noqa: Failing early can cause unpredictable outputs.
+ - name: Ensure MTU size is 9000 on all interfaces # noqa risky-shell-pipe
ansible.builtin.shell: |
mtu=$(nmcli conn show {{ line_item }} | grep 802-3-ethernet.mtu | awk -F: '{printf("%d", $2)}')
if [ "$mtu" != "9000" ]; then
diff --git a/roles/sap_hana_preconfigure/tasks/sapnote/assert-2009879_7.yml b/roles/sap_hana_preconfigure/tasks/sapnote/assert-2009879_7.yml
index cef71a27e..f6176b580 100644
--- a/roles/sap_hana_preconfigure/tasks/sapnote/assert-2009879_7.yml
+++ b/roles/sap_hana_preconfigure/tasks/sapnote/assert-2009879_7.yml
@@ -49,7 +49,7 @@
- name: RHEL 7.2 - Assert that file /usr/lib64/libssl.so.1.0.1 is a link to /usr/lib64/libssl.so.1.0.1e
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_stat_libssl_rhel_72_assert.stat.lnk_target == '/usr/lib64/libssl.so.1.0.1e'"
+ that: __sap_hana_preconfigure_register_stat_libssl_rhel_72_assert.stat.lnk_target == '/usr/lib64/libssl.so.1.0.1e'
fail_msg: "FAIL: File /usr/lib64/libssl.so.1.0.1 is not a link to /usr/lib64/libssl.so.1.0.1e!"
success_msg: "PASS: File /usr/lib64/libssl.so.1.0.1 is a link to /usr/lib64/libssl.so.1.0.1e."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -77,7 +77,7 @@
- name: RHEL 7.2 - Assert that file /usr/lib64/libcrypto.so.1.0.1 is a link to /usr/lib64/libcrypto.so.1.0.1e
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_stat_libcrypto_rhel_72_assert.stat.lnk_target == '/usr/lib64/libcrypto.so.1.0.1e'"
+ that: __sap_hana_preconfigure_register_stat_libcrypto_rhel_72_assert.stat.lnk_target == '/usr/lib64/libcrypto.so.1.0.1e'
fail_msg: "FAIL: File /usr/lib64/libcrypto.so.1.0.1 is not a link to /usr/lib64/libcrypto.so.1.0.1e!"
success_msg: "PASS: File /usr/lib64/libcrypto.so.1.0.1 is a link to /usr/lib64/libcrypto.so.1.0.1e."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -109,7 +109,7 @@
- name: Assert that file /usr/lib64/libssl.so.1.0.1 is a link to /usr/lib64/libssl.so.10
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_stat_libssl_rhel_7_assert.stat.lnk_target == '/usr/lib64/libssl.so.10'"
+ that: __sap_hana_preconfigure_register_stat_libssl_rhel_7_assert.stat.lnk_target == '/usr/lib64/libssl.so.10'
fail_msg: "FAIL: File /usr/lib64/libssl.so.1.0.1 is not a link to /usr/lib64/libssl.so.10!"
success_msg: "PASS: File /usr/lib64/libssl.so.1.0.1 is a link to /usr/lib64/libssl.so.10."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
@@ -137,7 +137,7 @@
- name: Assert that file /usr/lib64/libcrypto.so.1.0.1 is a link to /usr/lib64/libcrypto.so.10
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_stat_libcrypto_rhel_7_assert.stat.lnk_target == '/usr/lib64/libcrypto.so.10'"
+ that: __sap_hana_preconfigure_register_stat_libcrypto_rhel_7_assert.stat.lnk_target == '/usr/lib64/libcrypto.so.10'
fail_msg: "FAIL: File /usr/lib64/libcrypto.so.1.0.1 is not a link to /usr/lib64/libcrypto.so.10!"
success_msg: "PASS: File /usr/lib64/libcrypto.so.1.0.1 is a link to /usr/lib64/libcrypto.so.10."
ignore_errors: "{{ sap_hana_preconfigure_assert_ignore_errors | d(false) }}"
diff --git a/roles/sap_hana_preconfigure/tasks/sapnote/assert-2382421.yml b/roles/sap_hana_preconfigure/tasks/sapnote/assert-2382421.yml
index d8e7a622f..e847d374e 100644
--- a/roles/sap_hana_preconfigure/tasks/sapnote/assert-2382421.yml
+++ b/roles/sap_hana_preconfigure/tasks/sapnote/assert-2382421.yml
@@ -65,8 +65,8 @@
- name: 2382421 - Assert that net.core.wmem_max is set correctly in {{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }}
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_saphana_conf_wmem_max_assert.stdout ==
- '{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_assert.stdout.split()[-1] }}'"
+ that: __sap_hana_preconfigure_register_sysctl_saphana_conf_wmem_max_assert.stdout ==
+ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_assert.stdout.split()[-1]
fail_msg: "FAIL: The value of 'net.core.wmem_max' in {{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }} is
'{{ __sap_hana_preconfigure_register_sysctl_saphana_conf_wmem_max_assert.stdout }}' but the expected value is
'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_assert.stdout.split()[-1] }}'!"
@@ -83,8 +83,8 @@
- name: 2382421 - Assert that net.core.wmem_max is set correctly as per sysctl
ansible.builtin.assert:
- that: "'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_max_assert.stdout.split()[-1] }}' ==
- '{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_assert.stdout.split()[-1] }}'"
+ that: __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_max_assert.stdout.split()[-1] ==
+ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_assert.stdout.split()[-1]
fail_msg: "FAIL: The current value of 'net.core.wmem_max as per sysctl is
'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_max_assert.stdout.split()[-1] }}' but the expected value is
'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_wmem_assert.stdout.split()[-1] }}'!"
@@ -111,8 +111,8 @@
- name: 2382421 - Assert that net.core.rmem_max is set correctly in {{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }}
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_sysctl_saphana_conf_rmem_max_assert.stdout ==
- '{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_assert.stdout.split()[-1] }}'"
+ that: __sap_hana_preconfigure_register_sysctl_saphana_conf_rmem_max_assert.stdout ==
+ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_assert.stdout.split()[-1]
fail_msg: "FAIL: The value of 'net.core.rmem_max' in {{ __sap_hana_preconfigure_etc_sysctl_saphana_conf }} is
'{{ __sap_hana_preconfigure_register_sysctl_saphana_conf_rmem_max_assert.stdout }}' but the expected value is
'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_assert.stdout.split()[-1] }}'!"
@@ -129,8 +129,8 @@
- name: 2382421 - Assert that net.core.rmem_max is set correctly as per sysctl
ansible.builtin.assert:
- that: "'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_max_assert.stdout.split()[-1] }}' ==
- '{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_assert.stdout.split()[-1] }}'"
+ that: __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_max_assert.stdout.split()[-1] ==
+ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_assert.stdout.split()[-1]
fail_msg: "FAIL: The current value of 'net.core.rmem_max as per sysctl is
'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_max_assert.stdout.split()[-1] }}' but the expected value is
'{{ __sap_hana_preconfigure_register_sysctl_ipv4_tcp_rmem_assert.stdout.split()[-1] }}'!"
diff --git a/roles/sap_hana_preconfigure/tasks/sapnote/assert-3024346.yml b/roles/sap_hana_preconfigure/tasks/sapnote/assert-3024346.yml
index d44cfd7c4..8910a1dd0 100644
--- a/roles/sap_hana_preconfigure/tasks/sapnote/assert-3024346.yml
+++ b/roles/sap_hana_preconfigure/tasks/sapnote/assert-3024346.yml
@@ -80,7 +80,7 @@
- name: Assert that 'options sunrpc tcp_max_slot_table_entries' is set correctly in /etc/modprobe.d/sunrpc.conf
ansible.builtin.assert:
- that: "__sap_hana_preconfigure_register_tcp_max_slot_table_entries_assert.stdout == '128'"
+ that: __sap_hana_preconfigure_register_tcp_max_slot_table_entries_assert.stdout == '128'
fail_msg: "FAIL: The value of 'options sunrpc tcp_max_slot_table_entries' in '/etc/modprobe.d/sunrpc.conf' is
'{{ __sap_hana_preconfigure_register_tcp_max_slot_table_entries_assert.stdout }}' but the expected value is '128'!"
success_msg: "PASS: The value of 'options sunrpc tcp_max_slot_table_entries' in '/etc/modprobe.d/sunrpc.conf' is
diff --git a/roles/sap_hana_preconfigure/vars/RedHat_8.yml b/roles/sap_hana_preconfigure/vars/RedHat_8.yml
index 6982ceae3..5b77385af 100644
--- a/roles/sap_hana_preconfigure/vars/RedHat_8.yml
+++ b/roles/sap_hana_preconfigure/vars/RedHat_8.yml
@@ -7,6 +7,7 @@ __sap_hana_preconfigure_supported_rhel_minor_releases:
- "8.2"
- "8.4"
- "8.6"
+ - "8.8"
# required repos for RHEL 8:
__sap_hana_preconfigure_req_repos_redhat_8_0_x86_64:
@@ -181,15 +182,17 @@ __sap_hana_preconfigure_min_packages_8_7_x86_64:
__sap_hana_preconfigure_min_packages_8_7_ppc64le:
__sap_hana_preconfigure_min_packages_8_8_x86_64:
+ - [ 'kernel', '4.18.0-477.13.1.el8_8' ]
__sap_hana_preconfigure_min_packages_8_8_ppc64le:
+ - [ 'kernel', '4.18.0-477.13.1.el8_8' ]
__sap_hana_preconfigure_min_pkgs: "{{ lookup('vars', '__sap_hana_preconfigure_min_packages_' + ansible_distribution_version | string | replace(\".\", \"_\") + '_' + ansible_architecture) }}"
__sap_hana_preconfigure_packages:
# SAP NOTE 2772999:
- expect
-# package graphwiz: graph visualization toos, for supportability)
+# package graphwiz: graph visualization tools, for supportability)
- graphviz
# package iptraf-ng: TCP/IP network monitor, for supportability)
- iptraf-ng
@@ -228,7 +231,7 @@ __sap_hana_preconfigure_packages:
__sap_hana_preconfigure_packages_min_install:
# SAP NOTE 2772999:
- expect
-# package graphwiz: graph visualization toos, for supportability)
+# package graphwiz: graph visualization tools, for supportability)
# - graphviz
# package iptraf-ng: TCP/IP network monitor, for supportability)
# - iptraf-ng
diff --git a/roles/sap_hana_preconfigure/vars/RedHat_9.yml b/roles/sap_hana_preconfigure/vars/RedHat_9.yml
index 11c862ef9..2eaa8ed3d 100644
--- a/roles/sap_hana_preconfigure/vars/RedHat_9.yml
+++ b/roles/sap_hana_preconfigure/vars/RedHat_9.yml
@@ -3,6 +3,7 @@
# supported RHEL 9 minor releases for SAP HANA:
__sap_hana_preconfigure_supported_rhel_minor_releases:
- "9.0"
+ - "9.2"
# required repos for RHEL 9:
__sap_hana_preconfigure_req_repos_redhat_9_0_x86_64:
@@ -117,14 +118,14 @@ __sap_hana_preconfigure_req_repos_redhat_9_10_ppc64le:
# required SAP notes for RHEL 9:
__sap_hana_preconfigure_sapnotes_versions_x86_64:
- - { number: '3108302', version: '3' }
- - { number: '2382421', version: '40' }
+ - { number: '3108302', version: '8' }
+ - { number: '2382421', version: '45' }
- { number: '3024346', version: '3' }
__sap_hana_preconfigure_sapnotes_versions_ppc64le:
- - { number: '2055470', version: '87' }
- - { number: '3108302', version: '3' }
- - { number: '2382421', version: '40' }
+ - { number: '2055470', version: '90' }
+ - { number: '3108302', version: '8' }
+ - { number: '2382421', version: '45' }
- { number: '3024346', version: '3' }
__sap_hana_preconfigure_sapnotes_versions: "{{ lookup('vars', '__sap_hana_preconfigure_sapnotes_versions_' + ansible_architecture) }}"
@@ -147,8 +148,10 @@ __sap_hana_preconfigure_min_packages_9_1_x86_64:
__sap_hana_preconfigure_min_packages_9_1_ppc64le:
__sap_hana_preconfigure_min_packages_9_2_x86_64:
+ - [ 'kernel', '5.14.0-284.25.1.el9_2' ]
__sap_hana_preconfigure_min_packages_9_2_ppc64le:
+ - [ 'kernel', '5.14.0-284.25.1.el9_2' ]
__sap_hana_preconfigure_min_packages_9_3_x86_64:
@@ -195,7 +198,7 @@ __sap_hana_preconfigure_packages:
# package libxcrypt-compat: needed SAP HANA and also by sapstartsrv on RHEL 9:
# - libxcrypt-compat # now installed by role sap_general_preconfigure, see also SAP note 3108316, version 4.
# For support purposes:
-# package graphwiz: graph visualization toos, for supportability)
+# package graphwiz: graph visualization tools, for supportability)
- graphviz
# package iptraf-ng: TCP/IP network monitor, for supportability)
- iptraf-ng
@@ -223,7 +226,7 @@ __sap_hana_preconfigure_packages_min_install:
# required for SAP HANA on RHEL 9:
- libxcrypt-compat
# For support purposes:
-# package graphwiz: graph visualization toos, for supportability)
+# package graphwiz: graph visualization tools, for supportability)
# - graphviz
# package iptraf-ng: TCP/IP network monitor, for supportability)
# - iptraf-ng
diff --git a/roles/sap_hana_preconfigure/vars/SLES_15.yml b/roles/sap_hana_preconfigure/vars/SLES_15.yml
index a69c294bd..32dac1dea 100644
--- a/roles/sap_hana_preconfigure/vars/SLES_15.yml
+++ b/roles/sap_hana_preconfigure/vars/SLES_15.yml
@@ -24,3 +24,7 @@ __sap_hana_preconfigure_packages:
#
__sap_hana_preconfigure_grub_file: /tmp/grub
+
+# SLES_SAP is using saptune, but SLES is using sapconf.
+# Default value true runs saptune, but installation.yml auto-detects base product and adjusts.
+__sap_hana_preconfigure_run_saptune: true
diff --git a/roles/sap_hostagent/.ansible-lint b/roles/sap_hostagent/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_hostagent/.ansible-lint
+++ b/roles/sap_hostagent/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_hostagent/CONTRIBUTING.md b/roles/sap_hostagent/CONTRIBUTING.md
index 5350483e8..8f5bcb523 100644
--- a/roles/sap_hostagent/CONTRIBUTING.md
+++ b/roles/sap_hostagent/CONTRIBUTING.md
@@ -26,10 +26,10 @@ According to SAP Note the command is: saphostexec -upgrade
- get the installable Version
- install/upgrade if required
-### Important Steps from the documention:
+### Important Steps from the documentation:
- requires root
- - Optional Paramter: `-pf ` defaults to /usr/sap/exe
+ - Optional Parameter: `-pf ` defaults to /usr/sap/exe
1. Install Host agent (from unpacked directory)
```
diff --git a/roles/sap_hostagent/README.md b/roles/sap_hostagent/README.md
index 50132e720..13aa84fa4 100644
--- a/roles/sap_hostagent/README.md
+++ b/roles/sap_hostagent/README.md
@@ -48,7 +48,7 @@ It is also important that your disks are setup according to the [SAP storage req
|sap_hostagent_rpm_remote_path|Local directory path where RPM file is located|yes, unless `sap_hostagent_rpm_local_path` is used|
|sap_hostagent_rpm_file_name|Local RPM file name|yes|
|sap_hostagent_agent_tmp_directory|Temporary directory path that will be created on the target host|no (defaulted in the role)|
-|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not afer the installation| no (defaulted in the role)|
+|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not after the installation| no (defaulted in the role)|
### SAR based installations (content on ansible control node)
@@ -60,7 +60,7 @@ It is also important that your disks are setup according to the [SAP storage req
|sap_hostagent_sapcar_local_path|Local directory path where SAPCAR tool file is located|yes|
|sap_hostagent_sapcar_file_name|Local SAPCAR tool file name|yes|
|sap_hostagent_agent_tmp_directory|Temporary directory path that will be created on the target host|no (defaulted in the role)|
-|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not afer the installation| no (defaulted in the role)|
+|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not after the installation| no (defaulted in the role)|
### SAR based installations (with content existing on target node)
@@ -72,7 +72,7 @@ It is also important that your disks are setup according to the [SAP storage req
|sap_hostagent_sapcar_remote_path|Remote directory path of SAR archive|yes|
|sap_hostagent_sapcar_file_name|Remote file name of SAR archive|yes|
|sap_hostagent_agent_tmp_directory|Temporary directory path that will be created on the target host|no (defaulted in the role)|
-|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not afer the installation| no (defaulted in the role)|
+|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not after the installation| no (defaulted in the role)|
### SAP Bundle based installations
@@ -82,7 +82,7 @@ It is also important that your disks are setup according to the [SAP storage req
|sap_hostagent_installation_type|Source type of the installation for SAPHOSTAGENT|yes with `bundle` value|
|sap_hostagent_bundle_path|Target host directory path where SAP Installation Bundle has been unarchived|
|sap_hostagent_agent_tmp_directory|Temporary directory path that will be created on the target host|no (defaulted in the role)|
-|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not afer the installation| no (defaulted in the role)|
+|sap_hostagent_clean_tmp_directory|Boolean variable to indicate if the temporary directory will be removed or not after the installation| no (defaulted in the role)|
### SSL Configuration
@@ -99,7 +99,7 @@ Right now the role will configure the PSE and create a CSR. Adding signed certif
Before using this role ensure your system has been configured properly to run SAP applications.
-You can use the supported role `sap_general_preconfigure` comming with RHEL 7 and 8 with RHEL for SAP Solutions Subscription
+You can use the supported role `sap_general_preconfigure` coming with RHEL 7 and 8 with RHEL for SAP Solutions Subscription
The upstream version of this role can be found [here](https://github.com/linux-system-roles/sap_general_preconfigure)
diff --git a/roles/sap_hostagent/meta/runtime.yml b/roles/sap_hostagent/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_hostagent/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_hostagent/tasks/common_post.yml b/roles/sap_hostagent/tasks/common_post.yml
index 009f93fb0..2d877f8f6 100644
--- a/roles/sap_hostagent/tasks/common_post.yml
+++ b/roles/sap_hostagent/tasks/common_post.yml
@@ -1,7 +1,8 @@
---
+
# Ensure {{ sap_hostagent_agent_tmp_directory }} directory is removed from the target host
- name: Ensure {{ sap_hostagent_agent_tmp_directory }} directory does not exist
- file:
+ ansible.builtin.file:
path: "{{ sap_hostagent_agent_tmp_directory }}"
state: absent
mode: '0755'
diff --git a/roles/sap_hostagent/tasks/common_pre.yml b/roles/sap_hostagent/tasks/common_pre.yml
index 7f752cf0f..4c2e5acc2 100644
--- a/roles/sap_hostagent/tasks/common_pre.yml
+++ b/roles/sap_hostagent/tasks/common_pre.yml
@@ -2,7 +2,7 @@
# Ensure {{ sap_hostagent_agent_tmp_directory }} directory exists on the target host
- name: Ensure {{ sap_hostagent_agent_tmp_directory }} directory exists
- file:
+ ansible.builtin.file:
path: "{{ sap_hostagent_agent_tmp_directory }}"
state: directory
- mode: '0755'
\ No newline at end of file
+ mode: '0755'
diff --git a/roles/sap_hostagent/tasks/config_ssl.yml b/roles/sap_hostagent/tasks/config_ssl.yml
index e0a32b7c8..3cdb5510c 100644
--- a/roles/sap_hostagent/tasks/config_ssl.yml
+++ b/roles/sap_hostagent/tasks/config_ssl.yml
@@ -2,7 +2,7 @@
# Ensure 'sapadm' user home directory exists
- name: Ensure 'sapadm' user home directory exists
- file:
+ ansible.builtin.file:
path: "/home/sapadm"
state: directory
mode: '0755'
@@ -12,7 +12,7 @@
# Ensure SSL is configured for agent communication
# https://help.sap.com/viewer/6e1636d91ccc458c987094ee1fb864ae/HAG_CURRENT_VERSION/en-US/6aac42c2e742413da050eaecd57f785d.html
- name: Prepare the Personal Security Environment (PSE) for the server
- file:
+ ansible.builtin.file:
path: "/usr/sap/hostctrl/exe/sec"
state: directory
mode: '0755'
@@ -20,12 +20,12 @@
group: sapsys
- name: Ensure any previous PSE generated by this role does not exists
- file:
+ ansible.builtin.file:
path: /usr/sap/hostctrl/exe/sec/SAPSSLS.pse
state: absent
- name: Create the server PSE, the server certificate therein, and the Certificate Signing Request (CSR)
- command: >
+ ansible.builtin.command: >
/usr/sap/hostctrl/exe/sapgenpse gen_pse
-p SAPSSLS.pse
-x "{{ sap_hostagent_ssl_passwd }}"
@@ -42,7 +42,7 @@
changed_when: "'Certificate Request:' in ssl_config_output.stdout"
- name: Grant SAP Host Agent access to the server PSE
- command: >
+ ansible.builtin.command: >
/usr/sap/hostctrl/exe/sapgenpse seclogin
-p SAPSSLS.pse
-x "{{ sap_hostagent_ssl_passwd }}"
@@ -57,11 +57,11 @@
register: ssl_config_output
changed_when: "'Added SSO-credentials for PSE' in ssl_config_output.stdout"
-# Right now there is no option to import teh signed certificate. The CSR must be signed
+# Right now there is no option to import the signed certificate. The CSR must be signed
# by a valid CA in order to import it
-#
+#
# - name: Import the signed certificate into the server PSE
-# command: >
+# ansible.builtin.command: >
# /usr/sap/hostctrl/exe/sapgenpse import_own_cert
# -p SAPSSLS.pse
# -x "{{ sap_hostagent_ssl_passwd }}"
@@ -74,7 +74,7 @@
# SECUDIR: /usr/sap/hostctrl/exe/sec
- name: Verify the server certificate chain
- command: >
+ ansible.builtin.command: >
/usr/sap/hostctrl/exe/sapgenpse get_my_name
-x "{{ sap_hostagent_ssl_passwd }}"
-v
@@ -89,6 +89,6 @@
changed_when: "'Opening PSE' in ssl_config_output.stdout"
- name: Restart SAPHOSTAGENT
- command: /usr/sap/hostctrl/exe/saphostexec -restart
+ ansible.builtin.command: /usr/sap/hostctrl/exe/saphostexec -restart
register: ssl_config_output
changed_when: "'start hostcontrol using profile' in ssl_config_output.stdout"
diff --git a/roles/sap_hostagent/tasks/deploy_bundle.yml b/roles/sap_hostagent/tasks/deploy_bundle.yml
index 01621c356..0e859ae4f 100644
--- a/roles/sap_hostagent/tasks/deploy_bundle.yml
+++ b/roles/sap_hostagent/tasks/deploy_bundle.yml
@@ -1,15 +1,15 @@
---
- name: Extract the SAPHOSTAGENT TGZ file from the Bundle
- unarchive:
+ ansible.builtin.unarchive:
src: "{{ sap_hostagent_bundle_path }}/HOSTAGENT.TGZ"
dest: "{{ sap_hostagent_agent_tmp_directory }}"
remote_src: yes
mode: '0755'
- name: Install SAPHOSTAGENT
- command: "{{ sap_hostagent_agent_tmp_directory }}/global/hdb/saphostagent_setup/saphostexec -install"
+ ansible.builtin.command: "{{ sap_hostagent_agent_tmp_directory }}/global/hdb/saphostagent_setup/saphostexec -install"
register: installagent
args:
chdir: "{{ sap_hostagent_agent_tmp_directory }}/global/hdb/saphostagent_setup/"
- changed_when: "'Install service' in installagent.stdout"
\ No newline at end of file
+ changed_when: "'Install service' in installagent.stdout"
diff --git a/roles/sap_hostagent/tasks/deploy_rpm.yml b/roles/sap_hostagent/tasks/deploy_rpm.yml
index b311f7c38..8b53a8496 100644
--- a/roles/sap_hostagent/tasks/deploy_rpm.yml
+++ b/roles/sap_hostagent/tasks/deploy_rpm.yml
@@ -1,22 +1,22 @@
---
- name: Copy RPM based SAPHOSTAGENT to the target host
- copy:
+ ansible.builtin.copy:
src: "{{ sap_hostagent_rpm_local_path }}/{{ sap_hostagent_rpm_file_name }}"
dest: "{{ sap_hostagent_agent_tmp_directory }}/{{ sap_hostagent_rpm_file_name }}"
mode: '0755'
when: sap_hostagent_rpm_local_path is defined
- name: Ensure RPM file is installed
- yum:
+ ansible.builtin.yum:
name: "{{ sap_hostagent_agent_tmp_directory }}/{{ sap_hostagent_rpm_file_name }}"
state: present
disable_gpg_check: yes
when: sap_hostagent_rpm_local_path is defined
- name: Ensure RPM file is installed
- yum:
+ ansible.builtin.yum:
name: "{{ sap_hostagent_rpm_remote_path }}/{{ sap_hostagent_rpm_file_name }}"
state: present
disable_gpg_check: yes
- when: sap_hostagent_rpm_remote_path is defined
\ No newline at end of file
+ when: sap_hostagent_rpm_remote_path is defined
diff --git a/roles/sap_hostagent/tasks/deploy_sar.yml b/roles/sap_hostagent/tasks/deploy_sar.yml
index bb388071b..121da7254 100644
--- a/roles/sap_hostagent/tasks/deploy_sar.yml
+++ b/roles/sap_hostagent/tasks/deploy_sar.yml
@@ -1,19 +1,19 @@
---
- name: Copy SAR based SAPHOSTAGENT to the target host
- copy:
+ ansible.builtin.copy:
src: "{{ sap_hostagent_sar_local_path }}/{{ sap_hostagent_sar_file_name }}"
dest: "{{ sap_hostagent_agent_tmp_directory }}/{{ sap_hostagent_sar_file_name }}"
mode: '0755'
- name: Copy SAPCAR tool to the target host
- copy:
+ ansible.builtin.copy:
src: "{{ sap_hostagent_sapcar_local_path }}/{{ sap_hostagent_sapcar_file_name }}"
dest: "{{ sap_hostagent_agent_tmp_directory }}/{{ sap_hostagent_sapcar_file_name }}"
mode: '0755'
- name: Extract the SAPHOSTAGENT archive using SAPCAR
- command: >-
+ ansible.builtin.command: >-
{{ sap_hostagent_agent_tmp_directory }}/{{ sap_hostagent_sapcar_file_name }} \
-xvf {{ sap_hostagent_agent_tmp_directory }}/{{ sap_hostagent_sar_file_name }} -manifest SIGNATURE.SMF
register: extractagent
@@ -22,8 +22,8 @@
changed_when: "'SAPCAR: processing archive' in extractagent.stdout"
- name: Install SAPHOSTAGENT
- command: "{{ sap_hostagent_agent_tmp_directory }}/saphostexec -install"
+ ansible.builtin.command: "{{ sap_hostagent_agent_tmp_directory }}/saphostexec -install"
register: installagent
args:
chdir: "{{ sap_hostagent_agent_tmp_directory }}"
- changed_when: "'Install service' in installagent.stdout"
\ No newline at end of file
+ changed_when: "'Install service' in installagent.stdout"
diff --git a/roles/sap_hostagent/tasks/deploy_sar_remote.yml b/roles/sap_hostagent/tasks/deploy_sar_remote.yml
index b4936a193..f99763ed8 100644
--- a/roles/sap_hostagent/tasks/deploy_sar_remote.yml
+++ b/roles/sap_hostagent/tasks/deploy_sar_remote.yml
@@ -1,7 +1,7 @@
---
- name: Extract the SAPHOSTAGENT archive using SAPCAR
- command: >-
+ ansible.builtin.command: >-
{{ sap_hostagent_sapcar_remote_path }}/{{ sap_hostagent_sapcar_file_name }} \
-xvf {{ sap_hostagent_sar_remote_path }}/{{ sap_hostagent_sar_file_name }} -manifest SIGNATURE.SMF
register: extractagent
@@ -10,7 +10,7 @@
changed_when: "'SAPCAR: processing archive' in extractagent.stdout"
- name: Install SAPHOSTAGENT
- command: "{{ sap_hostagent_agent_tmp_directory }}/saphostexec -install"
+ ansible.builtin.command: "{{ sap_hostagent_agent_tmp_directory }}/saphostexec -install"
register: installagent
args:
chdir: "{{ sap_hostagent_agent_tmp_directory }}"
diff --git a/roles/sap_hostagent/tasks/main.yml b/roles/sap_hostagent/tasks/main.yml
index c6cc9ad1a..17d29203a 100644
--- a/roles/sap_hostagent/tasks/main.yml
+++ b/roles/sap_hostagent/tasks/main.yml
@@ -1,27 +1,27 @@
---
-# Execute common pre installation tasks
-- import_tasks: common_pre.yml
+- name: Execute common pre installation tasks
+ ansible.builtin.import_tasks: common_pre.yml
-# Deploy SAPHOSTAGENT using RPM file
-- import_tasks: deploy_rpm.yml
+- name: Deploy SAPHOSTAGENT using RPM file
+ ansible.builtin.import_tasks: deploy_rpm.yml
when: sap_hostagent_installation_type == "rpm"
-# Deploy SAPHOSTAGENT using SAR file
-- import_tasks: deploy_sar.yml
+- name: Deploy SAPHOSTAGENT using SAR file
+ ansible.builtin.import_tasks: deploy_sar.yml
when: sap_hostagent_installation_type == "sar"
- # Deploy SAPHOSTAGENT using SAR file existing on system
-- import_tasks: deploy_sar_remote.yml
+- name: Deploy SAPHOSTAGENT using SAR file existing on system
+ ansible.builtin.import_tasks: deploy_sar_remote.yml
when: sap_hostagent_installation_type == "sar-remote"
-# Deploy SAPHOSTAGENT using SAP Installation Bundle
-- import_tasks: deploy_bundle.yml
+- name: Deploy SAPHOSTAGENT using SAP Installation Bundle
+ ansible.builtin.import_tasks: deploy_bundle.yml
when: sap_hostagent_installation_type == "bundle"
-# Configuring SSL for SAP Host Agent
-- import_tasks: config_ssl.yml
+- name: Configure SSL for SAP Host Agent
+ ansible.builtin.import_tasks: config_ssl.yml
when: sap_hostagent_config_ssl
-# Execute common post installation tasks
-- import_tasks: common_post.yml
+- name: Execute common post installation tasks
+ ansible.builtin.import_tasks: common_post.yml
diff --git a/roles/sap_hypervisor_node_preconfigure/.ansible-lint b/roles/sap_hypervisor_node_preconfigure/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_hypervisor_node_preconfigure/.ansible-lint
+++ b/roles/sap_hypervisor_node_preconfigure/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_hypervisor_node_preconfigure/README.md b/roles/sap_hypervisor_node_preconfigure/README.md
index cc26e0917..2dfe015d0 100644
--- a/roles/sap_hypervisor_node_preconfigure/README.md
+++ b/roles/sap_hypervisor_node_preconfigure/README.md
@@ -1,17 +1,177 @@
`EXPERIMENTAL`
-sap_hypervisor_node_preconfigure
-=======================
+# sap_hypervisor_node_preconfigure
+
+This role will configure the following hypervisors in order to run SAP workloads:
+* Red Hat OpenShift Virtualization (OCPV)
+* Red Hat Enterprise Virtualization (RHV)
+
+## Platform: Red Hat OpenShift Virtualization
+
+Will configure a plain vanilla OpenShift cluster so it can be used for SAP workloads.
+
+### Requirements
+* An OpenShift cluster, best without any previous customization.
+* The worker nodes should have > 96GB of memory.
+* Worker nodes need to have Intel CPUs that provide TSX feature.
+* Storage is required, e.g. via NFS, OpenShift Data Foundation or local storage. This role can setup access to a Netapp Filer via Trident storage connector.
+Local storage will be configures using host path provisioner.
+* Point the `KUBECONFIG` environment variable to your `kubeconfig`.
+* Make the role available in case you didn't install it already in an ansible roles directory, e.g.
+* Make sure to install the dependencies mentioned below are installed.
+* To ensure your local checkout it found by ansible:
+```
+mkdir -p ~/.ansible/roles/
+ln -sf ~/community.sap_install/roles/sap_hypervisor_node_preconfigure ~/.ansible/roles/
+```
+### Dependencies
-This role will set and check the required settings and parameters for a hypervisor running VMs for SAP HANA.
+Needs the ansible kubernetes module and the python3 kubernetes binding. On a RHEL based system the are named
+* python3-kubernetes
+* ansible-collection-kubernetes-core
-Requirements
-------------
-A RHV hypervisor.
+Needs `oc` binary available in path.
-Role Variables
---------------
+### Role Variables
+General variables are defined in sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml
+```
+# Install the trident NFS storage provider. If yes, expects configuration details under
+# sap_hypervisor_node_preconfigure_cluster_config.trident, see example config.
+sap_hypervisor_node_preconfigure_install_trident: True|False
+# URL of the trident installer package to use
+sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v23.01.0/trident-installer-23.01.0.tar.gz
+
+# should SRIOV be enabled for unsupported NICs
+sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: True|False
+
+# Amount of memory [GB] to be reserved for the hypervisor on hosts >= 512GB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512: 64 #GB
+# Amount of memory [GB] to be reserved for the hypervisor on hosts < 512GB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512: 32 #GB
+
+# Should the check for the minimal amount of memory be ignored? Minimal amount is 96 GB
+# If ignored, the amount of $hostmemory - $reserved is allocated with a lower bound of 0 in case $reserved > $hostmemory
+sap_hypervisor_node_preconfigure_ignore_minimal_memory_check: True|False
+
+# Define if the host path provisioner should be installed in order to use a local disk as storage device.
+# Uses the following variable to be set to the storage device to be used, e.g.:
+# sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device: /dev/sdb
+sap_hypervisor_node_preconfigure_install_hpp: True|False
+```
+The following variables are describing the nodes and networks to be used. It can make sense to have them in a separate file, e.g. see `playbooks/vars/sample-variables-sap-hypervisor-node-preconfigure-rh_ocp_virt.yml` for an example.
+```
+sap_hypervisor_node_preconfigure_cluster_config:
+ # URL under which the OCP cluster is reachable
+ cluster_url: ocpcluster.domain.org
+
+ # namespace under which the VMs are created, note this has to be
+ # openshift-sriov-network-operator in case of using SRIOV network
+ # devices
+ vm_namespace: sap
+
+ # Optional, configuration for trident driver for Netapp NFS filer
+ trident:
+ management: management.domain.org
+ data: datalif.netapp.domain.org
+ svm: sap_svm
+ backend: nas_backend
+ aggregate: aggregate_Name
+ username: admin
+ password: xxxxx
+ storage_driver: ontap-nas
+ storage_prefix: ocpv_sap_
+
+ # CPU cores which will be reserved for kubernetes
+ worker_kubernetes_reserved_cpus: "0,1"
+
+ # Storage device used for host path provisioner as local storage.
+ worker_localstorage_device: /dev/vdb
+
+ # detailed configuration for every worker that should be configured
+ workers:
+ - name: worker-0 # name must match the node name
+ networks: # Example network config
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
+
+ - bridge: # another bridge
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens2f0 # network IF name
+ description: storage
+ mtu: 9000
+ ipv4:
+ address:
+ - ip: 192.168.1.51 # IP config
+ prefix-length: 24
+ auto-dns: false
+ auto-gateway: false
+ enabled: true
+ name: storagebridge
+ state: up
+ type: linux-bridge
+ - name: multi # another SRIOV device
+ interface: ens2f1 # network IF name
+ type: sriov
+
+ - name: worker-1 # second worker configuration
+ networks: # Example network config
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
+```
+### Example Playbook
+See `playbooks/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml` for an example.
+### Example Usage
+Make sure to set the `KUBECONFIG` environment variable, e.g.
+```
+export KUBECONFIG=~/.kubeconfig
+```
+To invoke the example playbook with the example configuration using your localhost as ansible host use the following command line:
+```
+ansible-playbook --connection=local -i localhost, playbooks/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml -e @s/sample-sap-hypervisor-redhat_ocp_virt-preconfigure.yml
+```
+
+## Platform: RHEL KVM
+This Ansible Role allows preconfigure of Red Hat Virtualization (RHV), formerly called Red Hat Enterprise Virtualization (RHEV) prior to version 4.4 release. Red Hat Virtualization (RHV) consists of 'Red Hat Virtualization Manager (RHV-M)' and the 'Red Hat Virtualization Host (RHV-H)' hypervisor nodes that this Ansible Role preconfigures. Please note, Red Hat Virtualization is discontinued and maintenance support will end mid-2024. Extended life support for RHV ends mid-2026.
+This Ansible Role does not preconfigure RHEL KVM (RHEL-KVM) hypervisor nodes. Please note that RHEL KVM is standalone, and does not have Management tooling (previously provided by RHV-M).
+
+### Requirements
+* A RHV hypervisor.
+
+### Role Variables
`sap_hypervisor_node_preconfigure_reserved_ram (default: 100)` Reserve memory [GB] for hypervisor host. Depending in the use case should be at least 50-100GB.
`sap_hypervisor_node_preconfigure_reserve_hugepages (default: static)` Hugepage allocation method: {static|runtime}.
@@ -43,30 +203,37 @@ runtime: done with hugeadm which is faster, but can in some cases not ensure all
`sap_hypervisor_node_preconfigure_run_grub2_mkconfig (default: yes)` Update the grub2 config.
-Example Playbook
-----------------
-
+### Example Playbook
Simple example that just sets the parameters.
```
+---
- hosts: all
- roles:
- - sap_hypervisor_node_preconfigure
+ gather_facts: true
+ serial: 1
+ vars:
+ sap_hypervisor_node_platform: redhat_rhel_kvm
+ tasks:
+ - name: Include Role
+ ansible.builtin.include_role:
+ name: sap_hypervisor_node_preconfigure
```
Run in assert mode to verify that parameters have been set.
```
+---
- hosts: all
- roles:
- - sap_hypervisor_node_preconfigure
+ gather_facts: true
+ serial: 1
vars:
- - sap_hypervisor_node_preconfigure_assert: yes
+ sap_hypervisor_node_platform: redhat_rhel_kvm
+ sap_hypervisor_node_preconfigure_assert: yes
+ tasks:
+ - name: Include Role
+ ansible.builtin.include_role:
+ name: sap_hypervisor_node_preconfigure
```
-License
--------
-
+### License
Apache 2.0
-Author Information
-------------------
-
+### Author Information
Nils Koenig (nkoenig@redhat.com)
diff --git a/roles/sap_hypervisor_node_preconfigure/defaults/main.yml b/roles/sap_hypervisor_node_preconfigure/defaults/main.yml
index d083000e2..741dfca6f 100644
--- a/roles/sap_hypervisor_node_preconfigure/defaults/main.yml
+++ b/roles/sap_hypervisor_node_preconfigure/defaults/main.yml
@@ -2,3 +2,114 @@
# ibmpower_phyp, redhat_ocp_virt, redhat_rhel_kvm, vmware_vsphere
sap_hypervisor_node_platform:
+
+# Example configuration
+sap_hypervisor_node_preconfigure_cluster_config:
+
+ # URL under which the OCP cluster is reachable
+ cluster_url: ocpcluster.domain.org
+
+ # namespace under which the VMs are created, note this has to be
+ # openshift-sriov-network-operator in case of using SRIOV network
+ # devices
+ vm_namespace: sap
+
+ # Optional, configuration for trident driver for Netapp NFS filer
+ trident:
+ management: management.domain.org
+ data: datalif.netapp.domain.org
+ svm: sap_svm
+ backend: nas_backend
+ aggregate: aggregate_Name
+ username: admin
+ password: xxxxx
+ storage_driver: ontap-nas
+ storage_prefix: ocpv_sap_
+
+ # CPU cores reserved for kubernetes on worker node
+ worker_kubernetes_reserved_cpus: "0,1"
+
+ # Storage device which should be used if host path provisioner is used
+ worker_localstorage_device: /dev/vdb
+
+ # detailed configuration for every worker that should be configured
+ workers:
+ - name: worker-0 # name must match the node name
+ networks: # Example network config
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
+
+ - bridge: # another bridge
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens2f0 # network IF name
+ description: storage
+ mtu: 9000
+ ipv4:
+ address:
+ - ip: 192.168.1.40 # IP config
+ prefix-length: 24
+ auto-dns: false
+ auto-gateway: false
+ enabled: true
+ name: storagebridge
+ state: up
+ type: linux-bridge
+ - name: multi # another SRIOV device
+ interface: ens2f1 # network IF name
+ type: sriov
+
+ - name: worker-1 # second worker configuration
+ networks: # Example network config
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+ - name: storagebridge
+ description: storage
+ state: up
+ type: linux-bridge
+ bridge: # another bridge
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens2f0 # network IF name
+ mtu: 9000
+ ipv4:
+ address:
+ - ip: 192.168.1.41 # IP config
+ prefix-length: 24
+ auto-dns: false
+ auto-gateway: false
+ enabled: true
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/main.yml b/roles/sap_hypervisor_node_preconfigure/handlers/main.yml
index f920c7196..d94364078 100644
--- a/roles/sap_hypervisor_node_preconfigure/handlers/main.yml
+++ b/roles/sap_hypervisor_node_preconfigure/handlers/main.yml
@@ -1,4 +1,3 @@
---
-
-- name: SAP certified hypervisor node preconfigure - Include Handler Tasks for {{ sap_hypervisor_node_platform }}
- ansible.builtin.include_tasks: "{{ role_path }}/handlers/platform/{{ sap_hypervisor_node_platform }}/main.yml"
+- name: Hypervisor node preconfigure - Include Handler Tasks for {{ sap_hypervisor_node_platform }}
+ ansible.builtin.import_tasks: "platform/{{ sap_hypervisor_node_platform }}/main.yml"
diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml b/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml
index 21c4a5b4e..f773bdd89 100644
--- a/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml
+++ b/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml
@@ -1,5 +1,4 @@
---
-
- name: "Check if server is booted in BIOS or UEFI mode"
ansible.builtin.stat:
path: /sys/firmware/efi
@@ -27,7 +26,6 @@
become: true
become_user: root
-
- name: "Debug grub-mkconfig BIOS mode"
ansible.builtin.debug:
var: __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_bios_mode.stdout_lines,
@@ -40,14 +38,16 @@
- name: "Set the grub.cfg location RHEL"
ansible.builtin.set_fact:
__sap_hypervisor_node_preconfigure_uefi_boot_dir: /boot/efi/EFI/redhat/grub.cfg
- when:
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
- ansible_distribution == 'RedHat'
- name: "Set the grub.cfg location SLES"
ansible.builtin.set_fact:
__sap_hypervisor_node_preconfigure_uefi_boot_dir: /boot/efi/EFI/BOOT/grub.cfg
- when:
- - ansible_distribution == 'SLES' or ansible_distribution == 'SLES_SAP'
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - ansible_distribution == 'SLES' or ansible_distribution == 'SLES_SAP'
- name: "Run grub-mkconfig (UEFI mode)"
ansible.builtin.command: "grub2-mkconfig -o {{ __sap_hypervisor_node_preconfigure_uefi_boot_dir }}"
@@ -60,7 +60,6 @@
become: true
become_user: root
-
- name: "Debug grub-mkconfig UEFI"
ansible.builtin.debug:
var: __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_uefi_mode.stdout_lines,
@@ -70,12 +69,6 @@
- __sap_hypervisor_node_preconfigure_register_stat_sys_firmware_efi.stat.exists
- sap_hypervisor_node_preconfigure_run_grub2_mkconfig|d(true)
-- name: "Run grubby for enabling TSX"
- ansible.builtin.command: grubby --args="tsx=on" --update-kernel=ALL
- register: __sap_hypervisor_node_preconfigure_register_grubby_update
- listen: __sap_hypervisor_node_preconfigure_grubby_update_handler
- notify: __sap_hypervisor_node_preconfigure_reboot_handler
-
- name: Reboot the managed node
ansible.builtin.reboot:
test_command: /bin/true
diff --git a/roles/sap_hypervisor_node_preconfigure/meta/main.yml b/roles/sap_hypervisor_node_preconfigure/meta/main.yml
index 0b4c2c801..1b04b2f41 100644
--- a/roles/sap_hypervisor_node_preconfigure/meta/main.yml
+++ b/roles/sap_hypervisor_node_preconfigure/meta/main.yml
@@ -3,8 +3,12 @@ galaxy_info:
namespace: community
role_name: sap_hypervisor_node_preconfigure
author: Nils Koenig
- description: Provide the configuration of SAP-certified hypervisors
+ description: Provide the configuration of hypervisors for SAP workloads
license: Apache-2.0
- min_ansible_version: 2.9
- galaxy_tags: [ 'sap', 'hana', 'rhel', 'redhat', 'sles', 'suse' ]
+ min_ansible_version: "2.9"
+ galaxy_tags: [ 'sap', 'hana', 'rhel', 'redhat', 'openshift' ]
+ platforms:
+ - name: RHEL
+ versions:
+ 8
dependencies: []
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/main.yml
index ba71f1f87..ad1d9fe14 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/main.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/main.yml
@@ -1,7 +1,6 @@
---
-
- name: SAP certified hypervisor node preconfigure - Include Vars for {{ sap_hypervisor_node_platform }}
- ansible.builtin.include_vars: "{{ role_path }}/vars/platform_defaults_{{ sap_hypervisor_node_platform }}.yml"
+ ansible.builtin.include_vars: "vars/platform_defaults_{{ sap_hypervisor_node_platform }}.yml"
- name: SAP certified hypervisor node preconfigure - Include Tasks for {{ sap_hypervisor_node_platform }}
- ansible.builtin.include_tasks: "{{ role_path }}/tasks/platform/{{ sap_hypervisor_node_platform }}/main.yml"
+ ansible.builtin.include_tasks: "tasks/platform/{{ sap_hypervisor_node_platform }}/main.yml"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/99-kargs-worker.yml.j2 b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/99-kargs-worker.yml.j2
new file mode 100644
index 000000000..32064a8d7
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/99-kargs-worker.yml.j2
@@ -0,0 +1,17 @@
+apiVersion: machineconfiguration.openshift.io/v1
+kind: MachineConfig
+metadata:
+ labels:
+ machineconfiguration.openshift.io/role: worker
+ name: 99-kargs-worker
+spec:
+ config:
+ ignition:
+ version: 3.2.0
+ kernelArguments:
+ - intel_iommu=on
+ - iommu=pt
+ - default_hugepagesz=1GB
+ - hugepagesz=1GB
+ - hugepages={{ __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages }}
+ - tsx=on
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml
new file mode 100644
index 000000000..3af1dcf5b
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml
@@ -0,0 +1,19 @@
+---
+- name: Label nodes
+ ansible.builtin.command: "oc label node {{ __sap_hypervisor_node_preconfigure_register_worker.name }} cpumanager=true --overwrite=true"
+ register: __sap_hypervisor_node_preconfigure_label_node_result
+ changed_when: __sap_hypervisor_node_preconfigure_label_node_result.rc != 0
+
+- name: Include node network
+ ansible.builtin.include_tasks: node-network.yml
+ with_items: "{{ __sap_hypervisor_node_preconfigure_register_worker.networks }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_worker_network
+ index_var: __sap_hypervisor_node_preconfigure_register_worker_network_nr
+ when: __sap_hypervisor_node_preconfigure_register_worker.networks is defined
+
+# How to wait for node to be scheduleable? (NodeSchedulable)
+- name: Wait for all k8s nodes to be ready
+ ansible.builtin.command: oc wait --for=condition=Ready nodes --all --timeout=3600s
+ register: __sap_hypervisor_node_preconfigure_register_nodes_ready
+ changed_when: __sap_hypervisor_node_preconfigure_register_nodes_ready.rc != 0
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/create-sap-bridge.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/create-sap-bridge.yml
new file mode 100644
index 000000000..bbdbdfffd
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/create-sap-bridge.yml
@@ -0,0 +1,49 @@
+---
+- name: Create SAP bridge NodeNetworkConfigurationPolicy
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: nmstate.io/v1
+ kind: NodeNetworkConfigurationPolicy
+ metadata:
+ name: "sap-bridge-policy-{{ worker.name }}"
+ spec:
+ nodeSelector:
+ kubernetes.io/hostname: "{{ worker.name }}"
+ desiredState:
+ interfaces:
+ - name: sapbridge
+ description: "Linux bridge with {{ worker.sap_bridge_interface }} as physical port to access SAP network"
+ type: linux-bridge
+ state: up
+ ipv4:
+ enabled: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: "{{ worker.sap_bridge_interface }}"
+
+
+- name: Create SAP bridge NetworkAttachmentDefinition
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: "k8s.cni.cncf.io/v1"
+ kind: NetworkAttachmentDefinition
+ metadata:
+ kubernetes.io/hostname: "{{ worker.name }}"
+ machineconfiguration.openshift.io/role: "{{ worker.name }}"
+ namespace: "{{ vm_namespace }}"
+ name: sap-bridge-network-definition
+ annotations:
+ k8s.v1.cni.cncf.io/resourceName: bridge.network.kubevirt.io/sapbridge
+ spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "name": "sap-bridge-network-definition",
+ "type": "cnv-bridge",
+ "bridge": "sapbridge",
+ "macspoofchk": true
+ }'
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml
new file mode 100644
index 000000000..1a08c9306
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml
@@ -0,0 +1,74 @@
+---
+- name: Create the CNV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: openshift-cnv
+
+- name: Create CNV OperatorGroup kubevirt-hyperconverged-group
+ kubernetes.core.k8s:
+ state: present
+
+ definition:
+ apiVersion: operators.coreos.com/v1
+ kind: OperatorGroup
+ metadata:
+ name: kubevirt-hyperconverged-group
+ namespace: openshift-cnv
+ spec:
+ targetNamespaces:
+ - openshift-cnv
+
+- name: Create CNV Subscription
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ name: hco-operatorhub
+ namespace: openshift-cnv
+ spec:
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+ name: kubevirt-hyperconverged
+
+- name: Wait
+ ansible.builtin.pause:
+ seconds: 60
+
+- name: Get Install Plan Name
+ retries: 10
+ delay: 10
+ ansible.builtin.command: oc get subscriptions/hco-operatorhub --namespace openshift-cnv --output=jsonpath='{$.status.installplan.name}'
+ register: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name
+ until: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout != ""
+ changed_when: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout != ""
+
+- name: Wait for Install Plan to finish
+ ansible.builtin.command: "oc wait installplan \
+ {{ __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout }} --namespace openshift-cnv --for=condition='Installed' --timeout='5m'"
+ register: __sap_hypervisor_node_preconfigure_register_wait_for_installplan
+ changed_when: __sap_hypervisor_node_preconfigure_register_wait_for_installplan.rc != 0
+
+- name: Wait
+ ansible.builtin.pause:
+ seconds: 300
+
+- name: Create CNV HyperConverged
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: hco.kubevirt.io/v1beta1
+ kind: HyperConverged
+ metadata:
+ name: kubevirt-hyperconverged
+ namespace: openshift-cnv
+ spec:
+
+- name: Wait
+ ansible.builtin.pause:
+ seconds: 300
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml
new file mode 100644
index 000000000..daa713a4c
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml
@@ -0,0 +1,89 @@
+---
+- name: Create systemd files for local storage handling
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: MachineConfig
+ metadata:
+ annotations:
+ labels:
+ machineconfiguration.openshift.io/role: worker
+ name: 50-hpp-local
+ spec:
+ config:
+ ignition:
+ version: 2.2.0
+ systemd:
+ units:
+ - contents: |
+ [Unit]
+ Description=Create mountpoint /var/localstorage and initialize filesystem
+ Before=var-localstorage.mount
+ [Service]
+ Type=oneshot
+ ExecStart=/bin/bash -c "if [[ $(lsblk -o FSTYPE {{ sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device }} --noheadings) != 'xfs' ]]; then mkfs.xfs -f {{ sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device }}; fi"
+ ExecStart=/bin/mkdir -p /var/localstorage
+ enabled: true
+ name: create-mountpoint-var-localstorage.service
+ - contents: |
+ [Unit]
+ After=create-mountpoint-var-localstorage.service
+ Requires=create-mountpoint-var-localstorage.service
+ [Mount]
+ What={{ sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device }}
+ Where=/var/localstorage
+ Type=xfs
+ [Install]
+ WantedBy=local-fs.target
+ enabled: true
+ name: var-localstorage.mount
+ - contents: |
+ [Unit]
+ Description=Set SELinux chcon for hostpath provisioner
+ Before=kubelet.service
+ After=var-localstorage.mount
+ [Service]
+ ExecStart=/usr/bin/chcon -Rt container_file_t /var/localstorage
+ [Install]
+ WantedBy=multi-user.target
+ enabled: true
+ name: hostpath-provisioner.service
+
+- name: Wait for mountpoint to be ready
+ ansible.builtin.pause:
+ minutes: 3
+
+- name: Create hostpath provisioner (HPP)
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: hostpathprovisioner.kubevirt.io/v1beta1
+ kind: HostPathProvisioner
+ metadata:
+ name: hostpath-provisioner
+ spec:
+ imagePullPolicy: IfNotPresent
+ storagePools:
+ - name: localstorage
+ path: /var/localstorage
+ workload:
+ nodeSelector:
+ kubernetes.io/os: linux
+ machineconfiguration.openshift.io/role: worker
+
+- name: Create storage class for HPP
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: storage.k8s.io/v1
+ kind: StorageClass
+ metadata:
+ name: local
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ provisioner: kubevirt.io.hostpath-provisioner
+ reclaimPolicy: Delete
+ volumeBindingMode: WaitForFirstConsumer
+ parameters:
+ storagePool: localstorage
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml
new file mode 100644
index 000000000..5e1e4f46a
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml
@@ -0,0 +1,70 @@
+---
+- name: Create the nmstate operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ labels:
+ kubernetes.io/metadata.name: openshift-nmstate
+ name: openshift-nmstate
+ name: openshift-nmstate
+ spec:
+ finalizers:
+ - kubernetes
+
+- name: Create the OperatorGroup
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1
+ kind: OperatorGroup
+ metadata:
+ annotations:
+ olm.providedAPIs: NMState.v1.nmstate.io
+ generateName: openshift-nmstate-
+ name: openshift-nmstate-tn6k8
+ namespace: openshift-nmstate
+ spec:
+ targetNamespaces:
+ - openshift-nmstate
+
+- name: Pause to give operator a chance to install
+ ansible.builtin.pause:
+ minutes: 2
+
+- name: Subscribe to the nmstate Operator
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ labels:
+ operators.coreos.com/kubernetes-nmstate-operator.openshift-nmstate: ""
+ name: kubernetes-nmstate-operator
+ namespace: openshift-nmstate
+ spec:
+ channel: stable
+ installPlanApproval: Automatic
+ name: kubernetes-nmstate-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+
+- name: Pause to give operator a chance to install
+ ansible.builtin.pause:
+ minutes: 5
+
+- name: Create instance of the nmstate operator
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: nmstate.io/v1
+ kind: NMState
+ metadata:
+ name: nmstate
+
+- name: Pause to give instance a chance to come up
+ ansible.builtin.pause:
+ minutes: 5
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml
new file mode 100644
index 000000000..5fcb437d2
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml
@@ -0,0 +1,54 @@
+---
+- name: Create the SRIOV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: openshift-sriov-network-operator
+
+- name: Create the SRIOV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1
+ kind: OperatorGroup
+ metadata:
+ name: sriov-network-operators
+ namespace: openshift-sriov-network-operator
+ spec:
+ targetNamespaces:
+ - openshift-sriov-network-operator
+
+- name: Create the SRIOV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ name: sriov-network-operator-subscription
+ namespace: openshift-sriov-network-operator
+ spec:
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+ name: sriov-network-operator
+ channel: "stable"
+
+- name: Pause to give operator a chance to install
+ ansible.builtin.pause:
+ minutes: 3
+
+- name: Copy patch to enable unsupported NICs
+ ansible.builtin.copy:
+ src: sriov-enabled-unsupported-nics.sh
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/sriov-enabled-unsupported-nics.sh"
+ mode: "0755"
+ when: sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics
+
+- name: Enable unsupported NICs
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/sriov-enabled-unsupported-nics.sh"
+ when: sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics
+ register: __sap_hypervisor_node_preconfigure_register_enable_unsupported_nics
+ changed_when: __sap_hypervisor_node_preconfigure_register_enable_unsupported_nics.rc != 0
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml
new file mode 100644
index 000000000..c1788dbd1
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml
@@ -0,0 +1,48 @@
+---
+- name: Download trident
+ ansible.builtin.unarchive:
+ remote_src: true
+ src: "{{ sap_hypervisor_node_preconfigure_install_trident_url }}"
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/"
+
+- name: Uninstall trident
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-installer/tridentctl uninstall -n trident"
+ ignore_errors: true
+ register: __sap_hypervisor_node_preconfigure_register_uninstall_trident
+ changed_when: __sap_hypervisor_node_preconfigure_register_uninstall_trident.rc != 0
+
+- name: Install trident
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-installer/tridentctl install -n trident"
+ register: __sap_hypervisor_node_preconfigure_register_install_trident
+ changed_when: __sap_hypervisor_node_preconfigure_register_install_trident.rc != 0
+
+- name: Copy backend file
+ ansible.builtin.template:
+ src: "trident-backend.json.j2"
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-backend.json"
+ mode: "0644"
+
+- name: Create trident backend
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}\
+ /trident-installer/tridentctl -n trident create backend -f\
+ {{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}\
+ /trident-backend.json"
+ register: __sap_hypervisor_node_preconfigure_register_create_trident_backend
+ changed_when: __sap_hypervisor_node_preconfigure_register_create_trident_backend.rc != 0
+
+- name: Create storage class
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: storage.k8s.io/v1
+ kind: StorageClass
+ metadata:
+ name: nas
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ provisioner: csi.trident.netapp.io
+ parameters:
+ backendType: "{{ sap_hypervisor_node_preconfigure_cluster_config.trident.storage_driver }}"
+ snapshots: "true"
+ provisioningType: "thin"
+ encryption: "false"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml
new file mode 100644
index 000000000..bd5dd818d
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml
@@ -0,0 +1,15 @@
+---
+- name: Create ~/bin
+ ansible.builtin.file:
+ path: ~/bin
+ state: directory
+ mode: "0700"
+
+- name: Get and extract virtctl
+# become: yes
+ ansible.builtin.unarchive:
+ validate_certs: false
+ remote_src: true
+ src: "https://hyperconverged-cluster-cli-download-openshift-cnv.apps.\
+ {{ sap_hypervisor_node_preconfigure_cluster_config.cluster_url }}/amd64/linux/virtctl.tar.gz"
+ dest: ~/bin
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml
new file mode 100644
index 000000000..bd28ea55b
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml
@@ -0,0 +1,11 @@
+---
+- name: Personalize template
+ ansible.builtin.template:
+ src: 99-kargs-worker.yml.j2
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-{{ __sap_hypervisor_node_preconfigure_register_worker_name }}.yml.j2"
+ mode: "0644"
+
+- name: Enable hugepages
+ kubernetes.core.k8s:
+ state: present
+ src: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-{{ __sap_hypervisor_node_preconfigure_register_worker_name }}.yml.j2"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/label-worker-invtsc.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/label-worker-invtsc.yml
new file mode 100644
index 000000000..57a52da24
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/label-worker-invtsc.yml
@@ -0,0 +1,11 @@
+---
+- name: Label worker with invtsc flag
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: default
+ labels:
+ 'feature.node.kubernetes.io/cpu-feature-invtsc': enabled
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml
new file mode 100644
index 000000000..5e4cedd53
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml
@@ -0,0 +1,98 @@
+---
+- name: Get a list of all nodes from any namespace
+ kubernetes.core.k8s_info:
+ kind: Node
+ register: __sap_hypervisor_node_preconfigure_register_node_list
+
+- name: Generate list with worker node names
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_node_name_list:
+ "{{ __sap_hypervisor_node_preconfigure_register_worker_node_name_list | \
+ d([]) + [__sap_hypervisor_node_preconfigure_register_worker_node.name] }}"
+ with_items: "{{ sap_hypervisor_node_preconfigure_cluster_config.workers }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_worker_node
+
+- name: Filter hosts
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_nodes:
+ "{{ __sap_hypervisor_node_preconfigure_register_nodes | \
+ d([]) + [__sap_hypervisor_node_preconfigure_register_host] }}"
+ with_items: "{{ __sap_hypervisor_node_preconfigure_register_node_list['resources'] }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_host
+ when: __sap_hypervisor_node_preconfigure_register_host.metadata.name in __sap_hypervisor_node_preconfigure_register_worker_node_name_list
+
+- name: Assert that configured nodes are found
+ ansible.builtin.assert:
+ that: __sap_hypervisor_node_preconfigure_register_nodes is defined
+ fail_msg: No nodes found that match configuration provided in sap_hypervisor_node_preconfigure_cluster_config
+ success_msg: Configured nodes found
+
+# Determine available memory on first worker node.
+# This amount will be used for all nodes, so make sure all have an identical amount.
+- name: Get worker name
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_name:
+ "{{ __sap_hypervisor_node_preconfigure_register_nodes[0]['metadata']['labels']['kubernetes.io/hostname'] }}"
+
+- name: Get memory of first worker node (will be used for all worker nodes later on)
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_memory_gib:
+ "{{ (__sap_hypervisor_node_preconfigure_register_nodes[0]['status']['capacity']['memory'] | replace('Ki', '') | int / 1048576) }}"
+
+- name: Check if host has minimal amount of memory (96GiB)
+ ansible.builtin.assert:
+ that: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int >= 96
+ fail_msg: "Not enough memory on node {{ __sap_hypervisor_node_preconfigure_register_worker_name }}"
+ success_msg: "Enough memory on node {{ __sap_hypervisor_node_preconfigure_register_worker_name }}"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_minimal_memory_check }}"
+
+# calculate memory to be allocated as hugepages
+# if system < 512GiB memory use 32GiB as upper boundary, 64GB otherwise as upper boundary
+- name: Calculate amount of hugepages to reserve (host memory < 512 GiB)
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512 }}"
+ when: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int < 512
+
+- name: Calculate amount of hugepages to reserve (host memory >= 512 GiB)
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512 }}"
+ when: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int >= 512
+
+- name: Include prepare
+ ansible.builtin.include_tasks: prepare.yml
+- name: Include tuned virtual host
+ ansible.builtin.include_tasks: tuned-virtual-host.yml
+- name: Include install CNV operator
+ ansible.builtin.include_tasks: install-cnv-operator.yml
+ when: sap_hypervisor_node_preconfigure_install_operators
+- name: Include install sriov operator
+ ansible.builtin.include_tasks: install-sriov-operator.yml
+ when: sap_hypervisor_node_preconfigure_install_operators
+- name: Include install nmstate operator
+ ansible.builtin.include_tasks: install-nmstate-operator.yml
+ when: sap_hypervisor_node_preconfigure_install_operators
+- name: Include install virtctl
+ ansible.builtin.include_tasks: install-virtctl.yml
+- name: Include setup worker nodes
+ ansible.builtin.include_tasks: setup-worker-nodes.yml
+ when: sap_hypervisor_node_preconfigure_setup_workers
+
+# How to wait for node to be scheduleable? (NodeSchedulable)
+- name: Wait for all k8s nodes to be ready
+ ansible.builtin.command: oc wait --for=condition=Ready nodes --all --timeout=3600s
+ register: __sap_hypervisor_node_preconfigure_register_nodes_ready
+ changed_when: __sap_hypervisor_node_preconfigure_register_nodes_ready.rc != 0
+
+- name: Print nodes
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_register_nodes_ready.stdout_lines
+
+- name: Include Trident installation
+ ansible.builtin.include_tasks: install-trident.yml
+ when: sap_hypervisor_node_preconfigure_install_trident
+
+- name: Include local storage creation (HPP)
+ ansible.builtin.include_tasks: install-hpp.yml
+ when: sap_hypervisor_node_preconfigure_install_hpp
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml
new file mode 100644
index 000000000..421d24c82
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml
@@ -0,0 +1,103 @@
+---
+- name: Print network
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_register_worker_network
+
+- name: "Create NodeNetworkConfigurationPolicy\
+ {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }} on \
+ {{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: nmstate.io/v1
+ kind: NodeNetworkConfigurationPolicy
+ metadata:
+ name: "{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ spec:
+ nodeSelector:
+ kubernetes.io/hostname: "{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ desiredState:
+ interfaces:
+ - "{{ __sap_hypervisor_node_preconfigure_register_worker_network }}"
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'linux-bridge'
+
+- name: "Create NetworkAttachmentDefinition {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: "k8s.cni.cncf.io/v1"
+ kind: NetworkAttachmentDefinition
+ metadata:
+ namespace: "{{ sap_hypervisor_node_preconfigure_cluster_config.vm_namespace }}"
+ name: "{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-network-definition"
+ annotations:
+ k8s.v1.cni.cncf.io/resourceName: "bridge.network.kubevirt.io/{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}"
+ spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "name": "sapbridge-network-definition",
+ "type": "cnv-bridge",
+ "bridge": "sapbridge",
+ "macspoofchk": true
+ }'
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'linux-bridge'
+
+- name: Label the node with feature.node.kubernetes.io/network-sriov.capable=true
+ kubernetes.core.k8s:
+ definition:
+ apiVersion: v1
+ kind: Node
+ metadata:
+ name: "{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ labels:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ state: present
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'sriov'
+
+- name: "Create SRIOV NodeNetworkConfigurationPolicy \
+ {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }} on \
+ {{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: sriovnetwork.openshift.io/v1
+ kind: SriovNetworkNodePolicy
+ metadata:
+ name: "iface-{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-sriov-{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ namespace: openshift-sriov-network-operator
+ spec:
+ resourceName: "iface{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}sriov"
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ kubernetes.io/hostname: "{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ priority: 5
+ mtu: 9000
+ numVfs: 8
+ nicSelector:
+ pfNames: ['{{ __sap_hypervisor_node_preconfigure_register_worker_network.interface }}#0-7']
+ deviceType: vfio-pci
+ isRdma: false
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == "sriov"
+
+- name: "Create SriovNetwork Attachment Definition {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: sriovnetwork.openshift.io/v1
+ kind: SriovNetwork
+ metadata:
+ name: "iface-{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-sriov"
+ namespace: openshift-sriov-network-operator
+ spec:
+ ipam: |
+ {
+ "type": "host-local",
+ "subnet": "192.168.1.0/24",
+ "rangeStart": "192.168.1.200",
+ "rangeEnd": "192.168.1.210"
+ }
+ networkNamespace: openshift-sriov-network-operator
+ resourceName: "iface{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}sriov"
+ spoofChk: "off"
+ trust: "on"
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == "sriov"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml
new file mode 100644
index 000000000..0dfbfa1da
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml
@@ -0,0 +1,16 @@
+---
+- name: Gather Facts
+ ansible.builtin.gather_facts:
+
+- name: Create Tempdir
+ ansible.builtin.tempfile:
+ state: directory
+ suffix: "_sap_hypervisor_node_preconfigure"
+ register: __sap_hypervisor_node_preconfigure_register_tmpdir
+
+- name: "Create VM namespace {{ sap_hypervisor_node_preconfigure_cluster_config.vm_namespace }}"
+ kubernetes.core.k8s:
+ name: "{{ sap_hypervisor_node_preconfigure_cluster_config.vm_namespace }}"
+ api_version: v1
+ kind: Namespace
+ state: present
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml
new file mode 100644
index 000000000..5290093d4
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml
@@ -0,0 +1,81 @@
+---
+- name: Include configure worker
+ ansible.builtin.include_tasks: configure-worker-node.yml
+ with_items: "{{ sap_hypervisor_node_preconfigure_cluster_config.workers }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_worker
+ index_var: __sap_hypervisor_node_preconfigure_register_worker_nr
+
+- name: Enable CPU Manager by patching MCP worker
+ kubernetes.core.k8s:
+ state: patched
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: MachineConfigPool
+ metadata:
+ name: worker
+ labels:
+ custom-kubelet: cpumanager-enabled
+
+- name: Delete kubletconfig for cpumanager
+ kubernetes.core.k8s:
+ state: absent
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: KubeletConfig
+ metadata:
+ name: cpumanager-enabled
+ spec:
+ machineConfigPoolSelector:
+ matchLabels:
+ custom-kubelet: cpumanager-enabled
+ kubeletConfig:
+ cpuManagerPolicy: static
+ cpuManagerReconcilePeriod: 5s
+
+- name: Create kubletconfig for cpumanager worker with CPUs reserved for kubernetes
+ when: sap_hypervisor_node_preconfigure_cluster_config.worker_kubernetes_reserved_cpus is defined
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: KubeletConfig
+ metadata:
+ name: cpumanager-enabled
+ spec:
+ machineConfigPoolSelector:
+ matchLabels:
+ custom-kubelet: cpumanager-enabled
+ kubeletConfig:
+ cpuManagerPolicy: static
+ cpuManagerReconcilePeriod: 5s
+ reservedSystemCPUs: "{{ sap_hypervisor_node_preconfigure_cluster_config.worker_kubernetes_reserved_cpus }}"
+
+- name: Create kubletconfig for cpumanager worker
+ when: sap_hypervisor_node_preconfigure_cluster_config.worker_kubernetes_reserved_cpus is not defined
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: KubeletConfig
+ metadata:
+ name: cpumanager-enabled
+ machineconfiguration.openshift.io/role: worker
+ spec:
+ machineConfigPoolSelector:
+ matchLabels:
+ custom-kubelet: cpumanager-enabled
+ kubeletConfig:
+ cpuManagerPolicy: static
+ cpuManagerReconcilePeriod: 5s
+
+- name: Render template
+ ansible.builtin.template:
+ src: 99-kargs-worker.yml.j2
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-worker.yml"
+ mode: "0644"
+
+- name: Enable hugepages
+ kubernetes.core.k8s:
+ state: present
+ src: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-worker.yml"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/sriov-enabled-unsupported-nics.sh b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/sriov-enabled-unsupported-nics.sh
new file mode 100644
index 000000000..6cec1a678
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/sriov-enabled-unsupported-nics.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+# in order to allow unsupported SRIOV nics such as Mellanox
+oc patch sriovoperatorconfig default --type=merge -n openshift-sriov-network-operator --patch '{ "spec": { "enableOperatorWebhook": false } }'
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/trident-backend.json.j2 b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/trident-backend.json.j2
new file mode 100644
index 000000000..e422aab11
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/trident-backend.json.j2
@@ -0,0 +1,18 @@
+{
+ "nfsMountOptions": "nfsvers=3",
+ "defaults": {
+ "exportPolicy": "default"
+ },
+ "debug":false,
+ "managementLIF":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.management }}",
+ "dataLIF":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.data }}",
+ "svm":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.svm }}",
+ "backendName": "{{ sap_hypervisor_node_preconfigure_cluster_config.trident.backend }}",
+ "aggregate":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.aggregate }}",
+ "username":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.username }}",
+ "password":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.password }}",
+ "storageDriverName":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.storage_driver }}",
+ "storagePrefix":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.storage_prefix }}",
+ "version":1
+}
+
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/tuned-virtual-host.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/tuned-virtual-host.yml
new file mode 100644
index 000000000..e2dd4f483
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/tuned-virtual-host.yml
@@ -0,0 +1,21 @@
+---
+- name: Set virtual-host for worker nodes
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: tuned.openshift.io/v1
+ kind: Tuned
+ metadata:
+ name: virtual-host
+ namespace: openshift-cluster-node-tuning-operator
+ spec:
+ profile:
+ - data: |
+ [main]
+ include=virtual-host
+ name: virtual-host
+ recommend:
+ - match:
+ - label: "node-role.kubernetes.io/worker"
+ priority: 10
+ profile: virtual-host
diff --git a/roles/sap_hypervisor_node_preconfigure/files/platform/redhat_rhel_kvm/50_hana b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_hana
similarity index 100%
rename from roles/sap_hypervisor_node_preconfigure/files/platform/redhat_rhel_kvm/50_hana
rename to roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_hana
diff --git a/roles/sap_hypervisor_node_preconfigure/files/platform/redhat_rhel_kvm/50_iothread_pinning b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_iothread_pinning
similarity index 100%
rename from roles/sap_hypervisor_node_preconfigure/files/platform/redhat_rhel_kvm/50_iothread_pinning
rename to roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_iothread_pinning
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml
index 52cd899ce..f4ddb2543 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml
@@ -4,11 +4,13 @@
- name: Get kernel command line
ansible.builtin.command: cat /proc/cmdline
register: __sap_hypervisor_node_preconfigure_kernelcmdline_assert
+ changed_when: __sap_hypervisor_node_preconfigure_kernelcmdline_assert.rc != 0
- name: "Assert - Kernel same page merging (KSM): Get status"
- ansible.builtin.shell: systemctl status ksm
+ ansible.builtin.command: systemctl status ksm
register: __sap_hypervisor_node_preconfigure_ksmstatus_assert
ignore_errors: yes
+ changed_when: __sap_hypervisor_node_preconfigure_ksmstatus_assert.rc != 0
- name: "Assert - Kernel same page merging (KSM): Check if stopped"
ansible.builtin.assert:
@@ -18,9 +20,10 @@
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
- name: "Assert - Kernel same page merging (KSM) Tuning Daemon: Get status"
- ansible.builtin.shell: systemctl status ksmtuned
+ ansible.builtin.command: systemctl status ksmtuned
register: __sap_hypervisor_node_preconfigure_ksmtunedstatus_assert
ignore_errors: yes
+ changed_when: __sap_hypervisor_node_preconfigure_ksmtunedstatus_assert.rc != 0
- name: "Assert - Kernel same page merging (KSM) Tuning Daemon: Check if stopped"
ansible.builtin.assert:
@@ -30,10 +33,12 @@
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
- name: Check CPU Stepping
- ansible.builtin.shell: lscpu | awk '/Stepping/{print $2}'
+ ansible.builtin.shell: set -o pipefail && lscpu | awk '/Stepping/{print $2}'
register: __sap_hypervisor_node_preconfigure_cpu_stepping_output_assert
+ changed_when: __sap_hypervisor_node_preconfigure_cpu_stepping_output_assert.rc != 0
-- set_fact:
+- name: Register stepping as fact
+ ansible.builtin.set_fact:
__sap_hypervisor_node_preconfigure_cpu_stepping_assert: "{{ __sap_hypervisor_node_preconfigure_cpu_stepping_output_assert.stdout }}"
- name: Print CPU Stepping
@@ -42,11 +47,13 @@
# skylake:
- name: Assert - Check Intel Skylake CPU Platform
+ when: __sap_hypervisor_node_preconfigure_cpu_stepping_assert == "4"
block:
- name: Get ple_gap
ansible.builtin.command: grep -E '^options\s+kvm_intel.*?ple_gap\s*=\s*0.*$' /etc/modprobe.d/kvm.conf
register: __sap_hypervisor_node_preconfigure_skylake_plegap_assert
ignore_errors: yes
+ changed_when: __sap_hypervisor_node_preconfigure_skylake_plegap_assert.rc != 0
- name: Assert - Check if ple_gap=0
ansible.builtin.assert:
@@ -61,28 +68,31 @@
fail_msg: "FAIL: spectre_v2=retpoline is not on Kernel command line"
success_msg: "PASS: spectre_v2=retpoline is on Kernel command line"
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
- when: __sap_hypervisor_node_preconfigure_cpu_stepping_assert == "4"
- name: Assert - check sap_hypervisor_node_preconfigure_nx_huge_pages
+ when: sap_hypervisor_node_preconfigure_kvm_nx_huge_pages is defined
block:
- - name: "Assert - Check kvm.nx_huge_pages is {{ sap_hypervisor_node_preconfigure_nx_huge_pages }}"
+ - name: Set fact for sap_hypervisor_node_preconfigure_register_assert_nx_huge_pages
+ ansible.builtin.set_fact:
+ sap_hypervisor_node_preconfigure_register_assert_nx_huge_pages: "{{ __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout | regex_search('kvm.nx_huge_pages=(.+)', '\\1') | first }}"
+ - name: "Assert - Check kvm.nx_huge_pages is {{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
ansible.builtin.assert:
- that: "'kvm.nx_huge_pages={{ sap_hypervisor_node_preconfigure_nx_huge_pages }}' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
- fail_msg: "FAIL: kvm.nx_huge_pages is not {{ sap_hypervisor_node_preconfigure_nx_huge_pages }}"
- success_msg: "PASS: kvm.nx_huge_pages is {{ sap_hypervisor_node_preconfigure_nx_huge_pages }}"
+ that: sap_hypervisor_node_preconfigure_register_assert_nx_huge_pages == sap_hypervisor_node_preconfigure_kvm_nx_huge_pages
+ fail_msg: "FAIL: kvm.nx_huge_pages is not {{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
+ success_msg: "PASS: kvm.nx_huge_pages is {{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
- when: sap_hypervisor_node_preconfigure_nx_huge_pages is defined
-
-- name: Assert - check seccomp_sanbox=0
+- name: Assert - check seccomp_sandbox=0
block:
- - command: grep -E '^seccomp_sandbox\s+=\s+0.*$' /etc/libvirt/qemu.conf
+ - name: Get seccomp setting
+ ansible.builtin.command: grep -E '^seccomp_sandbox\s+=\s+0.*$' /etc/libvirt/qemu.conf
register: __sap_hypervisor_node_preconfigure_seccomp_assert
ignore_errors: yes
+ changed_when: __sap_hypervisor_node_preconfigure_seccomp_assert.rc != 0
- - name: "Assert - Check seccomp_sanbox=0 is in /etc/libvirt/qemu.conf"
+ - name: "Assert - Check seccomp_sandbox=0 is in /etc/libvirt/qemu.conf"
ansible.builtin.assert:
- that: "{{ __sap_hypervisor_node_preconfigure_seccomp_assert.rc }} == 0"
+ that: __sap_hypervisor_node_preconfigure_seccomp_assert is success
fail_msg: "FAIL: seccomp_sandbox != 0"
success_msg: "PASS: seccomp_sanbox == 0"
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
@@ -90,38 +100,36 @@
- name: Assert - check amount of 1G hugepages
block:
- name: Get amount of 1G hugepages
- ansible.builtin.shell: hugeadm --pool-list | grep 1073741824 | awk '{print $3}'
- register: __sap_hypervisor_node_preconfigure_1Ghugepages_assert
+ ansible.builtin.shell: set -o pipefail && hugeadm --pool-list | grep 1073741824 | awk '{print $3}'
+ register: __sap_hypervisor_node_preconfigure_1g_hugepages_assert
+ changed_when: __sap_hypervisor_node_preconfigure_1g_hugepages_assert.rc != 0
- name: "Check that at least {{ sap_hypervisor_node_preconfigure_reserved_ram }} GB are available for the hypervisor and the rest are 1G hugepages"
ansible.builtin.assert:
- that: "{{ ( ansible_memtotal_mb / 1024 )|int - sap_hypervisor_node_preconfigure_reserved_ram }} >= {{ __sap_hypervisor_node_preconfigure_1Ghugepages_assert.stdout }}"
+ that: ((ansible_memtotal_mb / 1024) | int - sap_hypervisor_node_preconfigure_reserved_ram | int) >= (__sap_hypervisor_node_preconfigure_1g_hugepages_assert.stdout | int)
fail_msg: "FAIL: Not enough memory reserved for hypervisor"
success_msg: "PASS: Enough memory reserved for hypervisor"
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
- name: Assert - check Kernel command line
block:
- - assert:
+ - name: Ensure iommu is enabled
+ ansible.builtin.assert:
that: "'intel_iommu=on' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
fail_msg: "FAIL: intel_iommu=on not on Kernel command line"
success_msg: "PASS: intel_iommu=on on Kernel command line"
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
- - assert:
+ - name: Ensure iommu passthrough is enabled
+ ansible.builtin.assert:
that: "'iommu=pt' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
fail_msg: "FAIL: iommu=pt not on Kernel command line"
success_msg: "PASS: iommu=pt on Kernel command line"
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
- - assert:
- that: "'tsx=off' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
- fail_msg: "FAIL: tsx=off not on Kernel command line"
- success_msg: "PASS: tsx=off on Kernel command line"
+ - name: Ensure tsx is on
+ ansible.builtin.assert:
+ that: "'tsx=on' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
+ fail_msg: "FAIL: tsx=on not in Kernel command line"
+ success_msg: "PASS: tsx=on in Kernel command line"
ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
-
-
-#- name: Trigger tuned profile sap-hana-kvm activation
-# include_tasks: set-tuned-profile.yml
-#
-##### install hooks: HP, cpufreq
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml
index 227d7e998..34aa3014e 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml
@@ -4,7 +4,7 @@
- name: Assert that all required packages are installed
ansible.builtin.assert:
- that: "'{{ line_item }}' in ansible_facts.packages"
+ that: line_item in ansible_facts.packages
fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
success_msg: "PASS: Package '{{ line_item }}' is installed."
with_items:
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml
index 2abf6750e..4838f18fd 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml
@@ -2,6 +2,7 @@
- name: Check file permissions
ansible.builtin.command: "stat -c%a /usr/libexec/vdsm/hooks/before_vm_start/{{ item }}"
register: __sap_hypervisor_node_preconfigure_register_file_permissions_assert
+ changed_when: __sap_hypervisor_node_preconfigure_register_file_permissions_assert.rc != 0
- name: Assert hook file permissions
ansible.builtin.assert:
@@ -14,16 +15,19 @@
ansible.builtin.file:
path: /tmp/sap_hypervisor_node_preconfigure
state: directory
+ mode: "0755"
- name: Copy hook for checking
ansible.builtin.copy:
dest: "/tmp/sap_hypervisor_node_preconfigure/{{ item }}"
src: "{{ item }}"
+ mode: "0755"
- name: Diff hook
ansible.builtin.command: "diff -uw /tmp/sap_hypervisor_node_preconfigure/{{ item }} /usr/libexec/vdsm/hooks/before_vm_start/{{ item }}"
register: __sap_hypervisor_node_preconfigure_register_hook_diff_assert
ignore_errors: yes
+ changed_when: __sap_hypervisor_node_preconfigure_register_hook_diff_assert.rc != 0
- name: Assert hook content
ansible.builtin.assert:
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml
index cb6508c2f..ab0d0c9b3 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml
@@ -4,6 +4,7 @@
- name: Get tuned profile
ansible.builtin.command: tuned-adm active
register: __sap_hypervisor_node_preconfigure_tuned_profile_assert
+ changed_when: __sap_hypervisor_node_preconfigure_tuned_profile_assert.rc != 0
- name: Verify tuned profile
ansible.builtin.assert:
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml
index e7ae07c18..b49399e4f 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml
@@ -1,60 +1,29 @@
---
# tasks file for sap_hypervisor_node_preconfigure
-- name: Test if kernel same page merging (KSM) exists
- ansible.builtin.shell: systemctl cat ksm
- register: ksm
- ignore_errors: true
- become: true
- become_user: root
-
-- name: Test if kernel same page merging (KSM) tuning daemon exists
- ansible.builtin.shell: systemctl cat ksmtuned
- register: ksmtuned
- ignore_errors: true
- become: true
- become_user: root
-
-- name: Stop kernel same page merging (KSM)
- ansible.builtin.shell: systemctl stop ksm
- when: ksm.rc == 0
- become: true
- become_user: root
-
-- name: Disable kernel same page merging (KSM)
- ansible.builtin.shell: systemctl disable ksm
- when: ksm.rc == 0
- become: true
- become_user: root
-
-- name: Stop Kernel Samepage Merging (KSM) Tuning Daemon
- ansible.builtin.shell: systemctl stop ksmtuned
- when: ksmtuned.rc == 0
- become: true
- become_user: root
-
-- name: Disable Kernel Samepage Merging (KSM) Tuning Daemon
- ansible.builtin.shell: systemctl disable ksmtuned
- when: ksmtuned.rc == 0
- become: true
- become_user: root
+- name: Stop and disable kernel same page merging (KSM)
+ ansible.builtin.systemd:
+ name: ksm
+ state: stopped
+ enabled: false
+
+- name: Stop and disable kernel same page merging (KSM) tuning daemon
+ ansible.builtin.systemd:
+ name: ksmtuned
+ state: stopped
+ enabled: false
- name: Check CPU Stepping
- ansible.builtin.shell: lscpu | awk '/Stepping/{print $2}'
+ ansible.builtin.shell: set -o pipefail && lscpu | awk '/Stepping/{print $2}'
register: cpu_stepping_output
- become: true
- become_user: root
+ changed_when: cpu_stepping_output.rc != 0
-- set_fact:
+- name: Register CPU stepping as fact
+ ansible.builtin.set_fact:
cpu_stepping: "{{ cpu_stepping_output.stdout }}"
become: true
become_user: root
-- name: Print CPU Stepping
- ansible.builtin.shell: echo "{{ cpu_stepping }}"
- become: true
- become_user: root
-
# skylake:
- name: Set ple_gap=0 on Intel Skylake CPU Platform
ansible.builtin.lineinfile:
@@ -108,7 +77,7 @@
become: true
become_user: root
-- name: Trigger tuned profile sap-hana-kvm activation
+- name: Include allocate hughepages at runtime
ansible.builtin.include_tasks: allocate-hugepages-at-runtime.yml
when: sap_hypervisor_node_preconfigure_reserve_hugepages == "runtime"
@@ -123,7 +92,7 @@
with_items:
- default_hugepagesz=1GB
- hugepagesz=1GB
- - hugepages={{ ( ansible_memtotal_mb / 1024 )|int - sap_hypervisor_node_preconfigure_reserved_ram }}
+ - hugepages={{ (ansible_memtotal_mb / 1024) | int - sap_hypervisor_node_preconfigure_reserved_ram }}
notify: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
tags: grubconfig
when: sap_hypervisor_node_preconfigure_reserve_hugepages == "static"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml
index 213a45bca..dd405a44b 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml
@@ -1,25 +1,28 @@
---
-- name: Display the role path
+- name: Display sap_hypervisor_node_preconfigure_assert
ansible.builtin.debug:
- var: role_path
+ var: sap_hypervisor_node_preconfigure_assert
- name: Set filename prefix to empty string if role is run in normal mode
ansible.builtin.set_fact:
assert_prefix: ""
- when: not sap_hypervisor_node_preconfigure_assert|d(false)
- name: Prepend filename with assert string if role is run in assert mode
ansible.builtin.set_fact:
assert_prefix: "assert-"
- when: sap_hypervisor_node_preconfigure_assert|d(false)
+ when: sap_hypervisor_node_preconfigure_assert
-- include_tasks: '{{ assert_prefix }}installation.yml'
+- name: Include "{{ assert_prefix }}installation.yml"
+ ansible.builtin.include_tasks: '{{ assert_prefix }}installation.yml'
-- include_tasks: '{{ assert_prefix }}configuration.yml'
+- name: Include "{{ assert_prefix }}configuration.yml"
+ ansible.builtin.include_tasks: '{{ assert_prefix }}configuration.yml'
-- include_tasks: '{{ assert_prefix }}set-tuned-profile.yml'
+- name: Include "{{ assert_prefix }}set-tuned-profile.yml"
+ ansible.builtin.include_tasks: '{{ assert_prefix }}set-tuned-profile.yml'
-- include_tasks: "{{ assert_prefix }}rhv-hooks.yml"
+- name: Include "{{ assert_prefix }}rhv-hooks.yml"
+ ansible.builtin.include_tasks: "{{ assert_prefix }}rhv-hooks.yml"
loop:
- - "{{ role_path }}/tasks/platform/{{ sap_hypervisor_node_platform }}/50_hana"
- - "{{ role_path }}/tasks/platform/{{ sap_hypervisor_node_platform }}/50_iothread_pinning"
+ - 50_hana
+ - 50_iothread_pinning
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml
index 045b55069..ee0d63a8d 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml
@@ -3,6 +3,7 @@
ansible.builtin.file:
path: /usr/libexec/vdsm/hooks/before_vm_start
state: directory
+ mode: "0755"
become: true
become_user: root
@@ -10,6 +11,6 @@
ansible.builtin.copy:
dest: "/usr/libexec/vdsm/hooks/before_vm_start/{{ item }}"
src: "{{ item }}"
- mode: '0755'
+ mode: "0755"
become: true
become_user: root
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml
index 415c4a194..91c3d7757 100644
--- a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml
@@ -3,12 +3,14 @@
ansible.builtin.file:
path: /usr/lib/tuned/sap-hana-kvm-host
state: directory
+ mode: "0755"
become: true
become_user: root
- name: Create sap-hana-kvm-host tuned profile
ansible.builtin.copy:
dest: "/usr/lib/tuned/sap-hana-kvm-host/tuned.conf"
+ mode: "0644"
content: |
#
# tuned configuration
@@ -36,5 +38,7 @@
- name: Activate tuned profile
ansible.builtin.command: tuned-adm profile sap-hana-kvm-host
+ register: __sap_hypervisor_node_preconfigre_register_tuned_activation_output
become: true
become_user: root
+ changed_when: __sap_hypervisor_node_preconfigre_register_tuned_activation_output.rc != 0
diff --git a/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml
new file mode 100644
index 000000000..8cc402c9a
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml
@@ -0,0 +1,28 @@
+---
+# vars file for sap_hypervisor_node_preconfigure
+#
+
+# Install and configure the host path provisioner (hpp) for a local storage disk
+sap_hypervisor_node_preconfigure_install_hpp: False
+
+# Install the trident NFS storage provider
+sap_hypervisor_node_preconfigure_install_trident: False
+# URL of the trident installer package to use
+sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v23.01.0/trident-installer-23.01.0.tar.gz
+
+# should SRIOV be enabled for unsupported NICs
+sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: True
+
+# Amount of memory [GiB] to be reserved for the hypervisor on hosts >= 512GiB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512: 64 #GiB
+# Amount of memory [GiB] to be reserved for the hypervisor on hosts < 512GiB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512: 32 #GiB
+
+# Should the check for the minimal amount of be ignored? Minimal amount is 96 GiB
+sap_hypervisor_node_preconfigure_ignore_minimal_memory_check: False
+
+# Should the operators be installed
+sap_hypervisor_node_preconfigure_install_operators: True
+
+# Configure the workers?
+sap_hypervisor_node_preconfigure_setup_workers: True
diff --git a/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
index db5407d19..fb02f0bbc 100644
--- a/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
+++ b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
@@ -32,10 +32,7 @@ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages: "auto"
# Intel Transactional Synchronization Extensions (TSX): {"on"|"off"}
# Note the importance of the quotes, otherwise off will be mapped to false
-sap_hypervisor_node_preconfigure_tsx: "off"
-
-# run role in assert mode?
-sap_hypervisor_node_preconfigure_assert: false
+sap_hypervisor_node_preconfigure_tsx: "on"
# fail if assertion is invalid
sap_hypervisor_node_preconfigure_ignore_failed_assertion: no
diff --git a/roles/sap_install_media_detect/.ansible-lint b/roles/sap_install_media_detect/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_install_media_detect/.ansible-lint
+++ b/roles/sap_install_media_detect/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_install_media_detect/README.md b/roles/sap_install_media_detect/README.md
index d936b39bd..23dfaa60c 100644
--- a/roles/sap_install_media_detect/README.md
+++ b/roles/sap_install_media_detect/README.md
@@ -3,7 +3,7 @@
Ansible Role for detection and extraction of SAP Software installation media
This role is used to prepare for installation of SAP Software, by searching a given directory for SAP installation media (e.g. SAR files),
-moving files to subdirectories (i.e. `/sap_hana` and `/sap_swpm`) with the directory/file ownership permissons, then extracting the detected files.
+moving files to subdirectories (i.e. `/sap_hana` and `/sap_swpm`) with the directory/file ownership permissions, then extracting the detected files.
Detection of installation media is available for SAP HANA and the various key installation files when using SAP SWPM to install
SAP Business Applications based upon SAP NetWeaver (e.g. SAP S/4HANA, SAP BW/4HANA, SAP ECC, SAP BW, SAP WebDispatcher etc).
diff --git a/roles/sap_install_media_detect/files/tmp/sapfile b/roles/sap_install_media_detect/files/tmp/sapfile
index 6a37c1f7d..037480246 100755
--- a/roles/sap_install_media_detect/files/tmp/sapfile
+++ b/roles/sap_install_media_detect/files/tmp/sapfile
@@ -18,7 +18,7 @@
set -o nounset
set -o pipefail
-usage () {
+usage() {
echo "sapfile: Determine and display SAP file type. If unknown, call the file command."
echo "Usage: sapfile [OPTION...] [FILE...]"
echo "Determine and display type of SAP FILEs, optionally followed by additional information."
@@ -26,6 +26,7 @@ usage () {
echo " -H|--header display a column header"
echo " -l|--long long listing: also display the file type, taken from the output of the file command"
echo " -e|--extra-long extra long listing: display all information required by role sap_install_media_detect"
+ echo " -s|--show-file-types show all supported file types"
echo " --lsar_file= the name of the program to list the content of a RAR file, if different from 'lsar',"
echo " either as the name of a file in one of the PATH directories or as an FQPN."
echo " When specifying the 'unrar' program, it will be called with option 'lb'."
@@ -38,6 +39,11 @@ usage () {
echo "- sapcar (SAP program to handle sapcar files; typical filename: SAPCAR_1115-70006178.EXE)"
}
+show_file_types() {
+ awk '!/BEGIN/&&!/END/&&/_sap_file_type=/{gsub (" ", ""); gsub ("\\{_sap_file_type=\"", ": "); gsub ("\"\\}", ""); print $NF}' $0 | \
+ sort | uniq
+}
+
# defaults:
_DELIMITER=";"
@@ -53,7 +59,7 @@ if [[ ${#} == 0 ]]; then
exit 1
fi
-options=":leHh-:"
+options=":leHsh-:"
while getopts "$options" opt; do
case ${opt} in
-)
@@ -64,6 +70,10 @@ while getopts "$options" opt; do
extra-long)
_DISPLAY_ALL_INFO="y"
;;
+ show-file-types)
+ show_file_types
+ exit 0
+ ;;
header)
_DISPLAY_HEADER="y"
;;
@@ -109,6 +119,10 @@ while getopts "$options" opt; do
e)
_DISPLAY_ALL_INFO="y"
;;
+ s)
+ show_file_types
+ exit 0
+ ;;
H)
_DISPLAY_HEADER="y"
;;
@@ -186,11 +200,11 @@ if [[ ${_DISPLAY_HEADER}. == "y." ]]; then
fi
for _FILE in "$@"; do
- _FILE_OUTPUT=$(file "${_FILE}" | sed 's,'"${_FILE}"': ,,')
- if [[ ${_FILE_OUTPUT}. == "data." ]] && [[ (${_FILE##*.} == "SAR" || ${_FILE##*.} == "sar") ]]; then
+ if [[ (${_FILE##*.} == "SAR" || ${_FILE##*.} == "sar") ]]; then
_GENERIC_FILE_TYPE="sapcar"
_list_content="${_SAPCAR_FILE} -tvf"
else
+ _FILE_OUTPUT=$(file "${_FILE}" | sed 's,'"${_FILE}"': ,,')
_GENERIC_FILE_TYPE=$(echo "${_FILE_OUTPUT}" | awk '
BEGIN{_file_type="other"}
/RAR self-extracting archive/{_file_type="rarexe"}
diff --git a/roles/sap_install_media_detect/meta/runtime.yml b/roles/sap_install_media_detect/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_install_media_detect/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_install_media_detect/tasks/cleanup/disable-epel-repo.yml b/roles/sap_install_media_detect/tasks/cleanup/disable-epel-repo.yml
index 606ef0609..c6668e1bd 100644
--- a/roles/sap_install_media_detect/tasks/cleanup/disable-epel-repo.yml
+++ b/roles/sap_install_media_detect/tasks/cleanup/disable-epel-repo.yml
@@ -21,6 +21,7 @@
- name: SAP Install Media Detect - Cleanup - Remove the EPEL GPG key for this OS version, using the rpm -e command # noqa command-instead-of-module
ansible.builtin.command: rpm -e "{{ __sap_install_media_detect_register_rpm_q_gpg_pubkeys.stdout.strip().split()[0] }}"
register: __sap_install_media_detect_register_rpm_e_epel_gpg_pubkey
+ changed_when: true
when:
- not sap_install_media_detect_use_rpm_key_module_for_removing_the_key
- __sap_install_media_detect_register_rpm_q_gpg_pubkeys.stdout | length != 0
@@ -29,6 +30,7 @@
- name: SAP Install Media Detect - Cleanup - Get the GPG keys for this OS version after removal, rpm -e
ansible.builtin.shell: set -o pipefail && rpm -q gpg-pubkey --qf '%{NAME}-%{VERSION}-%{RELEASE}\t%{SUMMARY}\n' | grep 'EPEL ({{ ansible_distribution_major_version }})'
register: __sap_install_media_detect_register_rpm_q_gpg_pubkeys_after_removal
+ changed_when: false
failed_when: false
when:
- not sap_install_media_detect_use_rpm_key_module_for_removing_the_key
diff --git a/roles/sap_install_media_detect/tasks/organize_files.yml b/roles/sap_install_media_detect/tasks/organize_files.yml
index dea5bfb1c..afa6c496f 100644
--- a/roles/sap_install_media_detect/tasks/organize_files.yml
+++ b/roles/sap_install_media_detect/tasks/organize_files.yml
@@ -11,133 +11,116 @@
loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
when: sap_install_media_detect_source == 'remote_dir'
-- name: SAP Install Media Detect - Organize all files - Remove existing archive extraction directories
+- name: SAP Install Media Detect - Organize all files - Ensure archive extraction directories are absent
ansible.builtin.file:
- path: "{{ __sap_install_media_detect_software_main_directory }}/{{ line_item.extraction_dir }}"
+ path: "{{ line_item }}"
state: absent
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ [__sap_install_media_detect_software_main_directory + '/'] | product(__sap_install_media_detect_fact_extraction_directories) | map('join') | list }}"
loop_control:
loop_var: line_item
when:
- sap_install_media_detect_extract_archives
- - line_item.extract_archive == 'y'
- - (line_item.archive_type == 'zip' or
- line_item.archive_type == 'rarexe' or
- line_item.archive_type == 'rar' or
- line_item.archive_type == 'sapcar')
- name: SAP Install Media Detect - Organize all files - Create archive extraction directories
ansible.builtin.file:
- path: "{{ __sap_install_media_detect_software_main_directory }}/{{ line_item.extraction_dir }}"
+ path: "{{ line_item }}"
state: directory
owner: root
group: root
mode: '0755'
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ [__sap_install_media_detect_software_main_directory + '/'] | product(__sap_install_media_detect_fact_extraction_directories) | map('join') | list }}"
loop_control:
loop_var: line_item
when:
- sap_install_media_detect_extract_archives
- - line_item.extract_archive == 'y'
- - (line_item.archive_type == 'zip' or
- line_item.archive_type == 'rarexe' or
- line_item.archive_type == 'rar' or
- line_item.archive_type == 'sapcar')
-- name: SAP Install Media Detect - Organize all files - Create target directories for archive files
+- name: SAP Install Media Detect - Organize all files - Ensure target directories exist
ansible.builtin.file:
- path: "{{ __sap_install_media_detect_software_main_directory }}/{{ line_item.target_dir }}"
+ path: "{{ line_item }}"
state: directory
owner: root
group: root
mode: '0755'
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ [__sap_install_media_detect_software_main_directory + '/'] | product(__sap_install_media_detect_fact_target_directories) | map('join') | list }}"
loop_control:
loop_var: line_item
when:
- sap_install_media_detect_move_or_copy_archives
- - line_item.target_dir != 'auto'
- - (line_item.archive_type == 'zip' or
- line_item.archive_type == 'rarexe' or
- line_item.archive_type == 'rar' or
- line_item.archive_type == 'sapcar' or
- line_item.archive_type == 'xml')
-- name: SAP Install Media Detect - Organize all files - Create target directory 'sap_hana'
+- name: SAP Install Media Detect - Organize all files - Ensure SWPM target directories exist
ansible.builtin.file:
- path: "{{ __sap_install_media_detect_software_main_directory }}/sap_hana"
+ path: "{{ line_item }}"
state: directory
owner: root
group: root
mode: '0755'
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop:
+ - "{{ __sap_install_media_detect_software_main_directory }}/sap_swpm"
+ - "{{ __sap_install_media_detect_software_main_directory }}/sap_swpm_download_basket"
loop_control:
loop_var: line_item
when:
- sap_install_media_detect_move_or_copy_archives
- - sap_install_media_detect_db | d('') == 'saphana'
- - line_item.target_dir == 'auto'
+ - sap_install_media_detect_swpm
-- name: SAP Install Media Detect - Organize all files - Create target directory 'sap_swpm_download_basket'
+- name: SAP Install Media Detect - Organize all files - Create target directory 'sap_hana'
ansible.builtin.file:
- path: "{{ __sap_install_media_detect_software_main_directory }}/sap_swpm_download_basket"
+ path: "{{ __sap_install_media_detect_software_main_directory }}/sap_hana"
state: directory
owner: root
group: root
mode: '0755'
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
- loop_control:
- loop_var: line_item
when:
- sap_install_media_detect_move_or_copy_archives
- - sap_install_media_detect_swpm
- - line_item.target_dir == 'auto'
+ - sap_install_media_detect_db | d('') == 'saphana'
- name: SAP Install Media Detect - Organize all files - Extract zip export archive files to separate subdirectories
ansible.builtin.shell: "set -o pipefail && unzip {{ line_item.file }} -d {{ __sap_install_media_detect_software_main_directory }}/{{ line_item.extraction_dir }}/{{ (line_item.file | splitext)[0] }}"
args:
chdir: "{{ __sap_install_media_detect_software_main_directory }}"
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ __sap_install_media_detect_fact_files_sapfile_results_extract_zip }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
+ changed_when: true
when:
- sap_install_media_detect_extract_archives
- - line_item.archive_type == 'zip'
- - line_item.extract_archive == 'y'
- line_item.sap_file_type is search("export")
- name: SAP Install Media Detect - Organize all files - Extract zip non-export archive files
ansible.builtin.shell: "set -o pipefail && unzip {{ line_item.file }} -d {{ __sap_install_media_detect_software_main_directory }}/{{ line_item.extraction_dir }}"
args:
chdir: "{{ __sap_install_media_detect_software_main_directory }}"
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ __sap_install_media_detect_fact_files_sapfile_results_extract_zip }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
+ changed_when: true
when:
- sap_install_media_detect_extract_archives
- - line_item.archive_type == 'zip'
- - line_item.extract_archive == 'y'
- not line_item.sap_file_type is search("export")
-- name: SAP Install Media Detect - Organize all files - Extract rar archive files
+- name: SAP Install Media Detect - Organize all files - Extract rar self-extracting archive files
ansible.builtin.shell: "set -o pipefail && {{ __sap_install_media_detect_rar_extract }} {{ line_item.file }}{{ __sap_install_media_detect_rar_extract_directory_argument }} {{ __sap_install_media_detect_software_main_directory }}/{{ line_item.extraction_dir }}"
args:
chdir: "{{ __sap_install_media_detect_software_main_directory }}"
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ __sap_install_media_detect_fact_files_sapfile_results_extract_rarexe }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
+ changed_when: true
when:
- sap_install_media_detect_extract_archives
- line_item.archive_type == 'rarexe'
- - line_item.extract_archive == 'y'
-# SAP HANA sapcar archive files have a directory structure with a single directory (e.g. SAP_HANA_DATBASE) which contains all files.
+# SAP HANA sapcar archive files have a directory structure with a single directory (e.g. SAP_HANA_DATABASE) which contains all files.
# We want the extracted files to be placed in extraction_dir under this directory name, allowing multiple directories in extraction_dir.
# So we create a temporary directory, move the file SIGNATURE.SMF (which the sapcar command extracts to the level above) to this directory,
# and then move the single directory to the extraction_dir.
-- name: SAP Install Media Detect - Organize all files - Create temp dir for sapcar archive files - {{ __sap_install_media_detect_software_main_directory }}/tmp_extract
+- name: SAP Install Media Detect - Organize all files - Create temp dir for HANA archive files - {{ __sap_install_media_detect_software_main_directory }}/tmp_extract
ansible.builtin.file:
path: "{{ __sap_install_media_detect_software_main_directory }}/tmp_extract"
state: directory
@@ -157,14 +140,13 @@
&& mv $extracted_dir {{ __sap_install_media_detect_software_main_directory }}/{{ line_item.extraction_dir }}/
args:
chdir: "{{ __sap_install_media_detect_software_main_directory }}/tmp_extract"
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ __sap_install_media_detect_fact_files_sapfile_results_extract_sapcar_hana }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
+ changed_when: true
when:
- sap_install_media_detect_extract_archives
- - line_item.extract_archive == 'y'
- - line_item.archive_type == 'sapcar'
- - line_item.sap_file_type is search('saphana')
- name: SAP Install Media Detect - Organize all files - Remove temp dir - {{ __sap_install_media_detect_software_main_directory }}/tmp_extract
ansible.builtin.file:
@@ -182,14 +164,13 @@
-manifest SIGNATURE.SMF
args:
chdir: "{{ __sap_install_media_detect_software_main_directory }}/{{ line_item.extraction_dir }}"
- loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
+ loop: "{{ __sap_install_media_detect_fact_files_sapfile_results_extract_sapcar_nonhana }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
+ changed_when: true
when:
- sap_install_media_detect_extract_archives
- - line_item.extract_archive == 'y'
- - line_item.archive_type == 'sapcar'
- - not line_item.sap_file_type is search('saphana')
- name: SAP Install Media Detect - Organize all files - Copy certain files to 'sap_hana' directory
ansible.builtin.copy:
@@ -202,6 +183,7 @@
loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
when:
- sap_install_media_detect_move_or_copy_archives
- sap_install_media_detect_db | d('') == 'saphana'
@@ -220,6 +202,7 @@
loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
when:
- sap_install_media_detect_move_or_copy_archives
- sap_install_media_detect_swpm
@@ -232,6 +215,8 @@
loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
+ changed_when: true
when:
- sap_install_media_detect_move_or_copy_archives
- line_item.copy_archive == 'y'
@@ -239,11 +224,13 @@
- line_item.sap_file_type != 'saphana_client'
- line_item.sap_file_type != 'sap_hostagent'
-- name: SAP Install Media Detect - Organize all files - Move archive files into subdirectories
+- name: SAP Install Media Detect - Organize all files - Move archive files into subdirectories if not already present
ansible.builtin.shell: "set -o pipefail && mv {{ __sap_install_media_detect_software_main_directory }}/{{ line_item.file }} {{ __sap_install_media_detect_software_main_directory }}/{{ line_item.target_dir }}/{{ line_item.file }}"
loop: "{{ __sap_install_media_detect_fact_files_sapfile_results }}"
loop_control:
loop_var: line_item
+ label: "{{ line_item.file }}"
+ changed_when: true
when:
- sap_install_media_detect_move_or_copy_archives
- line_item.copy_archive == 'n'
diff --git a/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_1.yml b/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_1.yml
index 3240c91ad..56f0835f7 100644
--- a/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_1.yml
+++ b/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_1.yml
@@ -35,6 +35,11 @@
loop_var: line_item
when: line_item is search("SAPCAR")
+- name: SAP Install Media Detect - Prepare - Assert the presence of SAPCAR
+ ansible.builtin.assert:
+ that: __sap_install_media_detect_fact_sapcar_path | d('') | length > 0
+ fail_msg: "There is no file with file name pattern '*SAPCAR*' in '{{ sap_install_media_detect_source_directory }}'."
+
- name: SAP Install Media Detect - Prepare - Ensure sapcar is executable
ansible.builtin.file:
path: "{{ __sap_install_media_detect_fact_sapcar_path }}"
diff --git a/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_2.yml b/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_2.yml
index 91268cb29..264e17ece 100644
--- a/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_2.yml
+++ b/roles/sap_install_media_detect/tasks/prepare/create_file_list_phase_2.yml
@@ -89,6 +89,15 @@
(sap_install_media_detect_export == 'sapecc' and item.stdout.split(';')[1] == 'sap_export_ecc') or
(sap_install_media_detect_export == 'sapecc_ides' and item.stdout.split(';')[1] == 'sap_export_ecc_ides')
+- name: SAP Install Media Detect - Prepare - Set fact for subsets of the sapfile results
+ ansible.builtin.set_fact:
+ __sap_install_media_detect_fact_target_directories: "{{ __sap_install_media_detect_fact_files_sapfile_results | map(attribute='target_dir') | unique | reject('equalto', 'auto') }}"
+ __sap_install_media_detect_fact_extraction_directories: "{{ __sap_install_media_detect_fact_files_sapfile_results | map(attribute='extraction_dir') | unique | reject('equalto', 'none') }}"
+ __sap_install_media_detect_fact_files_sapfile_results_extract_zip: "{{ __sap_install_media_detect_fact_files_sapfile_results | selectattr('archive_type', 'search', 'zip') | selectattr('extract_archive', 'search', 'y') }}"
+ __sap_install_media_detect_fact_files_sapfile_results_extract_rarexe: "{{ __sap_install_media_detect_fact_files_sapfile_results | selectattr('archive_type', 'search', 'rarexe') | selectattr('extract_archive', 'search', 'y') }}"
+ __sap_install_media_detect_fact_files_sapfile_results_extract_sapcar_hana: "{{ __sap_install_media_detect_fact_files_sapfile_results | selectattr('archive_type', 'search', 'sapcar') | selectattr('extract_archive', 'search', 'y') | selectattr('sap_file_type', 'search', 'saphana') }}"
+ __sap_install_media_detect_fact_files_sapfile_results_extract_sapcar_nonhana: "{{ __sap_install_media_detect_fact_files_sapfile_results | selectattr('archive_type', 'search', 'sapcar') | selectattr('extract_archive', 'search', 'y') | rejectattr('sap_file_type', 'search', 'saphana') }}"
+
- name: SAP Install Media Detect - Prepare - Asserts
when:
- sap_install_media_detect_assert_after_sapfile | d(true)
diff --git a/roles/sap_install_media_detect/tasks/prepare/enable_rar_handling.yml b/roles/sap_install_media_detect/tasks/prepare/enable_rar_handling.yml
index 774154ac7..a829654ab 100644
--- a/roles/sap_install_media_detect/tasks/prepare/enable_rar_handling.yml
+++ b/roles/sap_install_media_detect/tasks/prepare/enable_rar_handling.yml
@@ -74,7 +74,7 @@
- name: SAP Install Media Detect - Prepare - Install a rar extractor package (no EPEL)
- when: sap_install_media_detect_rar_package != 'EPEL' # When default is overriden
+ when: sap_install_media_detect_rar_package != 'EPEL' # When default is overridden
block:
- name: SAP Install Media Detect - Prepare - rar extractor - Set facts
diff --git a/roles/sap_install_media_detect/tasks/prepare/move_files_to_main_directory.yml b/roles/sap_install_media_detect/tasks/prepare/move_files_to_main_directory.yml
index 8b20bca14..d1b62656e 100644
--- a/roles/sap_install_media_detect/tasks/prepare/move_files_to_main_directory.yml
+++ b/roles/sap_install_media_detect/tasks/prepare/move_files_to_main_directory.yml
@@ -6,12 +6,31 @@
# If any files have been moved to non-extract subdirectories already, move them back to the top level, making the role idempotent
# Reason for noqa: When using pipefail and there is no result from the grep -v, this tail will fail but it should never fail
-- name: SAP Install Media Detect - Prepare - Find existing non-extract subdirectories # noqa risky-shell-pipe
- ansible.builtin.shell: ls -d {{ __sap_install_media_detect_software_main_directory }}/*/ | grep -v '_extracted'
+- name: SAP Install Media Detect - Prepare - Find the relevant non-extract subdirectories # noqa risky-shell-pipe
+ ansible.builtin.shell:
+ cmd: >
+ ls -d sap_hana sap_swpm_download_basket $({{ __sap_install_media_detect_sapfile_path }} -s) 2>/dev/null |
+ awk '{print ("'{{ __sap_install_media_detect_software_main_directory }}'/"$0"/")}'
+ chdir: "{{ __sap_install_media_detect_software_main_directory }}"
register: __sap_install_media_detect_register_subdirectories_phase_1b
changed_when: false
failed_when: false
+# Reason for noqa: When using pipefail and there is no result from the grep -v, this tail will fail but it should never fail
+- name: SAP Install Media Detect - Prepare - Find existing extract subdirectories # noqa risky-shell-pipe
+ ansible.builtin.shell: ls -d {{ __sap_install_media_detect_software_main_directory }}/*/ | grep '_extracted/$'
+ register: __sap_install_media_detect_register_subdirectories_phase_1b_extracted
+ changed_when: false
+ failed_when: false
+
+- name: SAP Install Media Detect - Prepare - Display the relevant non-extract subdirectories
+ ansible.builtin.debug:
+ var: __sap_install_media_detect_register_subdirectories_phase_1b.stdout_lines
+
+- name: SAP Install Media Detect - Prepare - Display existing extract subdirectories
+ ansible.builtin.debug:
+ var: __sap_install_media_detect_register_subdirectories_phase_1b_extracted.stdout_lines
+
- name: SAP Install Media Detect - Prepare - Create list of all files one level below '{{ __sap_install_media_detect_software_main_directory }}'
ansible.builtin.find:
paths: "{{ line_item }}"
@@ -31,17 +50,25 @@
- "{{ __sap_install_media_detect_register_find_result_phase_1b.results }}"
- files
-# Reason for noqa: Too much additional code required for determing if anything has changed or not
+# Reason for noqa: Too much additional code required for determining if anything has changed or not
- name: SAP Install Media Detect - Prepare - Move files back to '{{ __sap_install_media_detect_software_main_directory }}' # noqa no-changed-when
ansible.builtin.command: "mv {{ line_item }} {{ __sap_install_media_detect_software_main_directory }}/"
loop: "{{ __sap_install_media_detect_fact_find_result_phase_1b }}"
loop_control:
loop_var: line_item
-- name: SAP Install Media Detect - Prepare - Remove the subdirectories
+- name: SAP Install Media Detect - Prepare - Remove the relevant non-extract subdirectories
ansible.builtin.file:
path: "{{ line_item }}"
state: absent
loop: "{{ __sap_install_media_detect_register_subdirectories_phase_1b.stdout_lines }}"
loop_control:
loop_var: line_item
+
+- name: SAP Install Media Detect - Prepare - Remove the extract subdirectories
+ ansible.builtin.file:
+ path: "{{ line_item }}"
+ state: absent
+ loop: "{{ __sap_install_media_detect_register_subdirectories_phase_1b_extracted.stdout_lines }}"
+ loop_control:
+ loop_var: line_item
diff --git a/roles/sap_install_media_detect/tasks/prepare/provide_sapfile_utility.yml b/roles/sap_install_media_detect/tasks/prepare/provide_sapfile_utility.yml
index 4572d739b..971233b18 100644
--- a/roles/sap_install_media_detect/tasks/prepare/provide_sapfile_utility.yml
+++ b/roles/sap_install_media_detect/tasks/prepare/provide_sapfile_utility.yml
@@ -6,7 +6,7 @@
suffix: media_detect
register: __sap_install_media_detect_tmpdir
-- name: SAP Install Media Detect - Prepare - Copy file sapfile utility to '{{ __sap_install_media_detect_tmpdir.path }}'
+- name: SAP Install Media Detect - Prepare - Copy the sapfile utility to '{{ __sap_install_media_detect_tmpdir.path }}'
ansible.builtin.copy:
src: tmp/sapfile
dest: "{{ __sap_install_media_detect_tmpdir.path }}/sapfile"
@@ -14,6 +14,6 @@
group: root
mode: '0755'
-- name: SAP Install Media Detect - Prepare - Set fact for sapfile utility
+- name: SAP Install Media Detect - Prepare - Set fact for the sapfile utility
ansible.builtin.set_fact:
__sap_install_media_detect_sapfile_path: "{{ __sap_install_media_detect_tmpdir.path }}/sapfile"
diff --git a/roles/sap_maintain_etc_hosts/.ansible-lint b/roles/sap_maintain_etc_hosts/.ansible-lint
new file mode 100644
index 000000000..8a5df4d43
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/.ansible-lint
@@ -0,0 +1,16 @@
+---
+exclude_paths:
+ - tests/
+enable_list:
+ - yaml
+skip_list:
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_maintain_etc_hosts/.yamllint.yml b/roles/sap_maintain_etc_hosts/.yamllint.yml
new file mode 100644
index 000000000..57ef427c1
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/.yamllint.yml
@@ -0,0 +1,21 @@
+---
+# Based on ansible-lint config
+extends: default
+
+rules:
+ braces: {max-spaces-inside: 1, level: error}
+ brackets: {max-spaces-inside: 1, level: error}
+# colons: {max-spaces-after: -1, level: error}
+# commas: {max-spaces-after: -1, level: error}
+ comments: disable
+ comments-indentation: disable
+# document-start: disable
+# empty-lines: {max: 3, level: error}
+# hyphens: {level: error}
+# indentation: disable
+# key-duplicates: enable
+ line-length: disable
+# new-line-at-end-of-file: disable
+# new-lines: {type: unix}
+# trailing-spaces: disable
+ truthy: disable
diff --git a/roles/sap_maintain_etc_hosts/README.md b/roles/sap_maintain_etc_hosts/README.md
new file mode 100644
index 000000000..98fab9adf
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/README.md
@@ -0,0 +1,142 @@
+# Role Name: sap_maintain_etc_hosts
+
+This role can be used to reliably update the /etc/hosts file.
+
+
+
+## Role Input Parameters
+
+This role requires the dictionary `sap_maintain_etc_hosts_list` which contains the parameters for the hostfile. The default value is the definition of the cluster nodes like in the role `sap_ha_pacemaker_cluster`. If the value `sap_hana_cluster_nodes`or `sap_ha_pacemaker_cluster_cluster_nodes` is not defined the role creates a default value from `ansible_facts`.
+
+Caution: If you want to use this role to remove entries from /etc/hosts it is a good practise to do this before adding entries. The adding/removal is done in the order the entries are listed.
+
+### sap_maintain_etc_hosts_list
+
+- _Type:_ `list`
+
+ List of nodes to be added or removed in /etc/hosts
+ possible list options:
+
+#### node_ip
+
+- _Type:_ `string`
+
+ IP address of the node.
+ It is required for adding a node.
+ When deleting a node use only when node_name and node_domain are not defined
+
+#### node_name
+
+- _Type:_ `string`
+
+ Hostname of the node
+ It is required for adding a node.
+ When deleting a node use only when node_ip is not defined
+
+#### node_domain
+
+- _Type:_ `string`
+
+ Domainname of the node
+ Defaults to sap_domain, if set, otherwise ansible_domain is the default
+ When deleting a node use only when node_name is defined
+
+#### aliases
+
+- _Type:_ `list`
+
+ List of aliases for the node
+ Not used when state is absent
+
+#### alias_mode
+
+- _Type:_ `string`
+
+ Options:
+
+ - `merge` : merges the list of aliases with the exiting aliases of the node. (default)
+ - `overwrite` : overwrites the aliases of the node.
+
+ Not used when state is absent
+
+#### node_comment
+
+- _Type:_ `string`
+
+ default: managed by ansible sap_maintain_etc_hosts role`
+ String which is appended to line in hosts after comment string
+ Not used when state is absent
+
+#### hana_site
+
+- _Type:_ `string`
+
+ if set (e.g. for configuring cluster) it is appended to the comment
+ Not used when state is absent
+
+#### node_role
+
+ Not used. For compatibility reason only.
+
+#### state
+
+- _Type:_ `string`
+
+ Options:
+
+ - `present` : creates a host entry (default)`
+ - `absent` : removes a host entry by ip or hostname
+
+
+
+Example Playbook
+----------------
+
+If you want to setup/add entries your etc hosts you can use this snippet
+
+```[yaml]
+- name: Ensure /etc/hosts is updated
+ include_role: sap_sap_maintain_etc_hosts
+ var:
+ sap_maintain_etc_hosts_list:
+ - node_ip: 1.2.3.5
+ state: absent
+ - node_name: host2
+ state: absent
+ - node_ip: 1.2.3.4
+ node_name: host1
+ aliases:
+ - alias1
+ - anotheralias2
+ node_comment: "Here comes text after hashsign" (defaults to hana_site)
+ state: present
+```
+
+If you have defined a cluster and the variable `sap_ha_pacemaker_cluster_cluster_nodes` or `sap_hana_cluster_nodes` is set, you can use the following play:
+
+```[yaml]
+- name: ensure all cluster nodes are in /etc/hosts
+ include_role: sap_maintain_etc_hosts
+ var:
+ sap_maintain_etc_hosts_list: "{{ sap_hana_cluster_nodes }}"
+```
+
+License
+-------
+
+Apache-2.0
+
+Author Information
+------------------
+
+@rhmk 10/10/23
diff --git a/roles/sap_maintain_etc_hosts/defaults/main.yml b/roles/sap_maintain_etc_hosts/defaults/main.yml
new file mode 100644
index 000000000..bd6c1cf5b
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/defaults/main.yml
@@ -0,0 +1,47 @@
+---
+# defaults file for sap_maintain_etc_hosts
+# BEGIN: Default Variables for sap_maintain_etc_hosts
+
+# sap_maintain_etc_hosts_list: (not defined by default)
+# List of nodes to be added or removed in /etc/hosts
+# Possible options:
+# - node_ip:
+# IP address of the node.
+# It is required for adding a node.
+# When deleting a node use only when node_name and node_domain are not defined
+# - node_name
+# Hostname of the node
+# It is required for adding a node.
+# When deleting a node use only when node_ip is not defined
+# - node_domain
+# Domainname of the node
+# Defaults to sap_domain, if set, otherwise ansible_domain is the default
+# When deleting a node use only when node_name is defined
+# - aliases
+# List of aliases for the node
+# Not used when state is absent
+# - alias_mode
+# `merge` : merges the list of aliases with the exiting aliases of the node. (default)
+# `overwrite`: overwrites the aliases of the node.
+# Not used when state is absent
+# - node_comment
+# default: managed by ansible sap_maintain_etc_hosts role`
+# String which is appended to line in hosts after comment string
+# Not used when state is absent
+# - hana_site
+# if set (e.g. for configuring cluster) it is appended to the comment
+# Not used when state is absent
+# - node_role
+# Not used. For compatibility reason only.
+# - state
+# default: present
+# Defines, if an entry is added or removed from /etc/hosts
+#
+# Example: See README.md
+
+# END: Default Variables for sap_maintain_etc_hosts
+
+# Default Value is the definition of the cluster nodes in sap_ha_pacemaker_cluster. If that is not defined
+# the role creates a default value from ansible_facts
+
+sap_maintain_etc_hosts_list: "{{ sap_hana_cluster_nodes | default(sap_ha_pacemaker_cluster_cluster_nodes) | default(omit) }}"
diff --git a/roles/sap_maintain_etc_hosts/meta/argument_specs.yml b/roles/sap_maintain_etc_hosts/meta/argument_specs.yml
new file mode 100644
index 000000000..4244e41ed
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/meta/argument_specs.yml
@@ -0,0 +1,84 @@
+---
+# Requires: ansible 2.11
+# Argument specifications in this separate file maintain backwards compatibility.
+argument_specs:
+
+# For required variables using aliases do not work and fail the argument validation.
+
+ main:
+ short_description: Configure /etc/hosts
+ description: This is role is used for proper local /etc/hosts configuration.
+ options:
+ sap_maintain_etc_hosts_list:
+ type: list
+ description:
+ - List of nodes to be added or removed in /etc/hosts
+ elements: dict
+ options:
+ node_ip:
+ type: str
+ description:
+ - IP address of the node
+ - It is required for adding a node. When deleting a node use only when node_name and node_domain are not defined
+ node_name:
+ type: str
+ description:
+ - Hostname of the node
+ - It is required for adding a node. When deleting a node use only when node_ip is not defined
+ node_domain:
+ type: str
+ description:
+ - Domainname of the node
+ - _Optional. Defaults to sap_domain, if set, otherwise ansible_domain is the default_
+ - _Optional. when deleting a node use only when node_name is defined_
+ aliases:
+ type: list
+ elements: str
+ description:
+ - List of aliases for the node
+ - Not used when state is absent
+ alias_mode:
+ type: str
+ default: merge
+ choices:
+ - merge
+ - overwrite
+ description:
+ - merges or overwrites the aliases of the node
+ - Not used when state is absent
+ node_comment:
+ type: str
+ default: "managed by ansible sap_maintain_etc_hosts role"
+ description:
+ - String which is appended to line in hosts after comment string
+ - Not used when state is absent
+ hana_site:
+ type: str
+ description:
+ - if set (e.g. for configuring cluster) it is appended to the comment
+ - Not used when state is absent
+ node_role:
+ description:
+ - Not used. For compatibility reason only.
+ state:
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ description:
+ - Defines, if an entry is added or removed from /etc/hosts
+ example:
+ sap_maintain_etc_hosts_list:
+ - node_ip: 1.2.3.4
+ node_name: host1
+ node_domain: abc.de
+ aliases:
+ - alias1
+ - anotheralias2
+ node_comment: "Here comes text after hashsign"
+ state: present
+ - node_ip: 1.2.3.5
+ state: absent
+ - node_name: host2
+ state: absent
diff --git a/roles/sap_maintain_etc_hosts/meta/main.yml b/roles/sap_maintain_etc_hosts/meta/main.yml
new file mode 100644
index 000000000..ec0dd8471
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/meta/main.yml
@@ -0,0 +1,28 @@
+---
+galaxy_info:
+ namespace: community
+ author: Red Hat for SAP Community of Practice, Markus Koch
+ description: Configuration of /etc/hosts
+ company: Red Hat, Inc.
+
+ license: Apache-2.0
+
+ min_ansible_version: "2.11"
+
+ platforms:
+ - name: "EL"
+ versions:
+ - "7"
+ - "8"
+ - "9"
+ - name: "SLES"
+ versions:
+ - "15"
+
+ galaxy_tags:
+ - rhel
+ - redhat
+ - sles
+ - suse
+
+dependencies: []
diff --git a/roles/sap_maintain_etc_hosts/tasks/main.yml b/roles/sap_maintain_etc_hosts/tasks/main.yml
new file mode 100644
index 000000000..0fc42f203
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/tasks/main.yml
@@ -0,0 +1,79 @@
+---
+# tasks file for sap_maintain_etc_hosts
+#- name: Double check that list entries with state present do not have duplicate IP addresses
+# ansible.builtin.assert:
+# that:
+# - sap_maintain_etc_hosts_list | selectattr('state')
+- name: Get list of hosts to be added with no state definition
+ ansible.builtin.set_fact:
+ __sap_maintain_etc_hosts_present: "{{ sap_maintain_etc_hosts_list | selectattr('state', 'undefined') | list }}"
+ when: sap_maintain_etc_hosts_list is defined
+
+- name: Add list of hosts with state=present
+ ansible.builtin.set_fact:
+ __sap_maintain_etc_hosts_present: "{{ __sap_maintain_etc_hosts_present +
+ (sap_maintain_etc_hosts_list | difference(__sap_maintain_etc_hosts_present) | selectattr('state', 'eq', 'present') | list) }}"
+
+- name: Debug list of hosts to be present
+ ansible.builtin.debug:
+ msg:
+ - "{{ __sap_maintain_etc_hosts_present }}"
+ verbosity: 2
+ when:
+ - __sap_maintain_etc_hosts_present is defined
+
+- name: Ensure no duplicate IPs are in the list for adding to hosts
+ ansible.builtin.assert:
+ that:
+ (__sap_maintain_etc_hosts_present | map(attribute='node_ip') | length) ==
+ (__sap_maintain_etc_hosts_present | map(attribute='node_ip') | unique | length)
+ msg: "You have defined duplicate ip addresses to be created!"
+ when:
+ - __sap_maintain_etc_hosts_present is defined
+
+- name: Ensure no duplicate hostnames are in the list for adding to hosts
+ ansible.builtin.assert:
+ that:
+ (__sap_maintain_etc_hosts_present | map(attribute='node_name') | length) ==
+ (__sap_maintain_etc_hosts_present | map(attribute='node_name') | unique | length)
+ msg: "You have defined duplicate hostnames to be created!"
+ when:
+ - __sap_maintain_etc_hosts_present is defined
+
+- name: Ensure required defaults are read from setup module
+ when: >
+ not ((ansible_hostname is defined) and
+ (ansible_domain is defined) and
+ (ansible_default_ipv4 is defined))
+ ## this is equivalent to
+ # (ansible_hostname is undefined) or
+ # (ansible_domain is undefined) or
+ # (ansible_default_ipv4 is defined)
+ ansible.builtin.setup:
+ gather_subset: "{{ __sap_maintain_etc_hosts_gather_subset }}"
+ filter:
+ - "ansible_hostname"
+ - "ansible_domain"
+ - "ansible_default_ipv4"
+
+# when sap_maintain_etc_hosts_list is empty, set defaults for local host
+- name: Ensure sap_maintain_etc_hosts_list is filled with defaults
+ when: >
+ ( sap_maintain_etc_hosts_list is undefined) or
+ ( sap_maintain_etc_hosts_list is none) or
+ ( sap_maintain_etc_hosts_list | trim == '')
+ ansible.builtin.set_fact:
+ sap_maintain_etc_hosts_list:
+ - node_ip: "{{ ansible_default_ipv4.address }}"
+ node_name: "{{ ansible_hostname }}"
+ node_domain: "{{ sap_domain | default(ansible_domain) }}"
+ node_comment: "managed by ansible sap_maintain_etc_hosts role"
+ state: present
+
+- name: Add entry to "{{ __sap_maintain_etc_hosts_file }}"
+ loop: "{{ sap_maintain_etc_hosts_list }}"
+ loop_control:
+ label: "{{ thishost.node_name | d(thishost.node_ip) }}"
+ loop_var: thishost
+ ansible.builtin.include_tasks:
+ file: update_host_{{ thishost.state | default('present') }}.yml
diff --git a/roles/sap_maintain_etc_hosts/tasks/update_host_absent.yml b/roles/sap_maintain_etc_hosts/tasks/update_host_absent.yml
new file mode 100644
index 000000000..dddf8f467
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/tasks/update_host_absent.yml
@@ -0,0 +1,55 @@
+---
+- name: Verify that variable node_ip is in the correct format
+ ansible.builtin.assert:
+ that: thishost.node_ip is ansible.utils.ip
+ msg: "Variable 'node_ip' is not an IP address. Please use the correct format"
+ when: thisnode.node_ip is defined
+
+- name: Ensure that either IP address or hostname is defined
+ ansible.builtin.assert:
+ that: >
+ ((thishost.node_ip is defined) and (thishost.node_name is undefined) and (thishost.node_domain is undefined)) or
+ ((thishost.node_ip is undefined) and (thishost.node_name is defined))
+ msg: "Invalid delete item. Please define either node_ip only or node_name. In the latter case node_domain is optional."
+
+- name: Ensure that the entry all entries in hosts file are removed with IP {{ thishost.node_ip | d('undefined') }}
+ ansible.builtin.lineinfile:
+ path: "{{ __sap_maintain_etc_hosts_file }}"
+ regexp: '^{{ thishost.node_ip }}\s'
+ state: absent
+ backup: true
+ when:
+ - thishost.node_ip is defined
+ - thishost.node_name is undefined
+ - not ansible_check_mode
+ become_user: root
+ become: true
+
+- name: Ensure that the entry all entries in hosts file are removed with name {{ thishost.node_name | d('undefined') }}
+ ansible.builtin.lineinfile:
+ path: "{{ __sap_maintain_etc_hosts_file }}"
+ regexp: '^.*\s{{ thishost.node_name }}\s'
+ state: absent
+ backup: true
+ when:
+ - thishost.node_name is defined
+ - (thishost.node_domain is undefined) or (thishost.node_domain | length == 0)
+ - thishist.node_ip is undefined
+ - not ansible_check_mode
+ become_user: root
+ become: true
+
+- name: Ensure that the entry all enries in hosts file are removed with FQDN
+ ansible.builtin.lineinfile:
+ path: "{{ __sap_maintain_etc_hosts_file }}"
+ regexp: '^.*\s{{ thishost.node_name + "." + thishost.node_domain }}\s'
+ state: absent
+ backup: true
+ when:
+ - thishost.node_name is defined
+ - thishost.node_domain is defined
+ - thishost.node_domain | length > 0
+ - thishist.node_ip is undefined
+ - not ansible_check_mode
+ become_user: root
+ become: true
diff --git a/roles/sap_maintain_etc_hosts/tasks/update_host_present.yml b/roles/sap_maintain_etc_hosts/tasks/update_host_present.yml
new file mode 100644
index 000000000..54714284c
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/tasks/update_host_present.yml
@@ -0,0 +1,181 @@
+---
+- name: Verify that variable node_ip is set
+ ansible.builtin.assert:
+ that: not( ( thishost.node_ip is undefined) or ( thishost.node_ip is none) or ( thishost.node_ip | trim == '') )
+ msg: "Variable 'node_ip' is undefined or empty. Please define it in your host list."
+
+- name: Verify that variable node_ip is in the correct format
+ ansible.builtin.assert:
+ that: thishost.node_ip is ansible.utils.ip
+ msg: "Variable 'node_ip' is not an IP address. Please use the correct format"
+
+- name: Verify that variable node_name is set
+ ansible.builtin.assert:
+ that: not( ( thishost.node_name is undefined) or ( thishost.node_name is none) or ( thishost.node_name | trim == '') )
+ msg: "Variable 'node_name' is undefined or empty. Please define it your host list"
+
+- name: Ensure node_domain is set
+ ansible.builtin.set_fact:
+ __sap_maintain_etc_hosts_domain: "{{ thishost.node_domain | default(sap_domain) | default(ansible_domain) }}"
+
+# Necessary, if defaults are both undefined
+- name: Verify that variable domain_name is set
+ ansible.builtin.assert:
+ that: >
+ not( ( __sap_maintain_etc_hosts_domain is undefined) or
+ ( __sap_maintain_etc_hosts_domain is none) or
+ ( __sap_maintain_etc_hosts_domain | trim == '') )
+ msg: "Variable 'domain_name' is undefined or empty. Please define it your host list"
+
+- name: Set default values
+ ansible.builtin.set_fact:
+ __sap_maintain_etc_hosts_comment: "{{ (thishost.node_comment | d('') + ' ' + thishost.hana_site | d('')) | trim }}"
+ __sap_maintain_etc_hosts_alias_mode: "{{ thishost.alias_mode | default('merge') }}"
+
+- name: Prepend Hashtag to comment
+ when: __sap_maintain_etc_hosts_comment|length > 0
+ ansible.builtin.set_fact:
+ __sap_maintain_etc_hosts_comment: "# {{ __sap_maintain_etc_hosts_comment }}"
+
+# The following block reads the existing aliases of a host from /etc/hosts
+# and merges it with the defined aliases in the struct
+#
+# 1. select the line, where the first entry is the ip-adress thishost.node_ip
+# 2. loop over all hostname entries in the selected line (2 bis NF=last element in line)
+# 3. stop looping when a comment sign is found (because these are comments)
+# 4. print an element, if it is not the hostname or FQDN we want to add
+#
+# => __sap_maintain_etc_hosts_register_aliases.stdout contains a list of aliases of thishost.node_ip
+#
+# trunk-ignore(checkov/CKV2_ANSIBLE_3)
+- name: Merge existing aliases with new alias list
+ when: __sap_maintain_etc_hosts_alias_mode != "overwrite"
+ block:
+ - name: Get all existing hostname aliases of {{ thishost.node_ip }}
+ ansible.builtin.shell: |
+ awk '( $1 == "{{ thishost.node_ip }}" ) {
+ for (i=2; i<=NF; ++i) {
+ if ( $i == "#" ) { break }
+ if (( $i != "{{ thishost.node_name }}" ) && ( $i != "{{ thishost.node_name }}.{{ __sap_maintain_etc_hosts_domain }}" )) { printf("%s ",$i) }
+ }
+ }' "{{ __sap_maintain_etc_hosts_file }}"
+ register: __sap_maintain_etc_hosts_register_aliases
+ changed_when: false
+
+ - name: Add defined aliases
+ ansible.builtin.set_fact:
+ __sap_maintain_etc_hosts_aliases: "{{ (__sap_maintain_etc_hosts_register_aliases.stdout.split(' ')
+ + thishost.aliases | d([])) | unique | join(' ') | trim }}"
+
+- name: Overwrite existing aliases
+ when: __sap_maintain_etc_hosts_alias_mode == "overwrite"
+ ansible.builtin.set_fact:
+ __sap_maintain_etc_hosts_aliases: "{{ thishost.node_aliases | d([]) | unique | join(' ') }}"
+
+- name: Display host and domain name, and IP address before the modification
+ ansible.builtin.debug:
+ msg:
+ - "hostname = '{{ thishost.node_name }}'"
+ - "domain = '{{ __sap_maintain_etc_hosts_domain }}'"
+ - "ip = '{{ thishost.node_ip }}'"
+ - "comment = '{{ __sap_maintain_etc_hosts_comment }}'"
+ - "aliases = '{{ __sap_maintain_etc_hosts_aliases }}'"
+ - "alias mode = '{{ __sap_maintain_etc_hosts_alias_mode }}'"
+
+# We allow more than one line containing sap_ip:
+- name: Check for duplicate entries of {{ thishost.node_ip }}
+ ansible.builtin.shell: |
+ n=$(grep "^{{ thishost.node_ip }}\s" {{ __sap_maintain_etc_hosts_file }} | wc -l)
+ if [ $n -gt 1 ]; then
+ echo "Duplicate IP entry in {{ __sap_maintain_etc_hosts_file }}!"
+ exit 1
+ else
+ exit 0
+ fi
+ register: __sap_maintain_etc_hosts_register_duplicate_ip_check
+ changed_when: false
+ ignore_errors: true
+ when: not ansible_check_mode
+
+- name: Report if there is more than one line with the IP address
+ ansible.builtin.debug:
+ msg:
+ - "More than one line containing {{ thishost.node_ip }}. File {{ __sap_maintain_etc_hosts_file }} will not be modified."
+ when:
+ - not ansible_check_mode
+ - __sap_maintain_etc_hosts_register_duplicate_ip_check.rc == 1
+
+- name: Ensure that thisthost.node_name is not part of the localhost entry
+ ansible.builtin.replace:
+ path: "{{ __sap_maintain_etc_hosts_file }}"
+ regexp: '^(127.0.0.1 .*)\s{{ line_item }}(\s.*)$'
+ replace: '\1\2'
+ backup: true
+ when:
+ - not ansible_check_mode
+ - __sap_maintain_etc_hosts_register_duplicate_ip_check.rc == 0
+ become_user: root
+ become: true
+ loop:
+ - "{{ thishost.node_name }}.{{ __sap_maintain_etc_hosts_domain }}"
+ - "{{ thishost.node_name }}"
+ loop_control:
+ loop_var: line_item
+
+- name: Ensure that the entry in hosts file is correct
+ ansible.builtin.lineinfile:
+ path: "{{ __sap_maintain_etc_hosts_file }}"
+ regexp: '^{{ thishost.node_ip }}\s'
+ line: "{{ thishost.node_ip }} {{ thishost.node_name }}.{{ __sap_maintain_etc_hosts_domain }} {{ thishost.node_name }} {{ __sap_maintain_etc_hosts_aliases }} {{ __sap_maintain_etc_hosts_comment }}"
+ backup: true
+ when:
+ - not ansible_check_mode
+ - __sap_maintain_etc_hosts_register_duplicate_ip_check.rc == 0
+ become_user: root
+ become: true
+
+# After all nodes are added or deleted, run the consistency check against the hosts file
+- name: Check for duplicate or missing entries of hostname and fqdn in {{ __sap_maintain_etc_hosts_file }}
+ ansible.builtin.shell: |
+ n=$(awk 'BEGIN{a=0}/^{{ line_item }}\s/||/\s{{ line_item }}\s/||/\s{{ line_item }}$/{a++}END{print a}' {{ __sap_maintain_etc_hosts_file }})
+ if [ $n -eq 1 ]; then
+ exit 0
+ else
+ exit 1
+ fi
+ loop:
+ - "{{ thishost.node_name }}.{{ __sap_maintain_etc_hosts_domain }}"
+ - "{{ thishost.node_name }}"
+ changed_when: false
+ loop_control:
+ loop_var: line_item
+ when: not ansible_check_mode
+
+- name: Perform the hosts file completeness check
+ ansible.builtin.command: awk 'BEGIN{a=0}/{{ thishost.node_ip }}/&&/{{ thishost.node_name }}.{{ __sap_maintain_etc_hosts_domain }}/&&/{{ thishost.node_name }}/{a++}END{print a}' {{ __sap_maintain_etc_hosts_file }}
+ register: __sap_maintain_etc_hosts_register_ipv4_fqdn_sap_hostname_once_check
+ changed_when: false
+
+- name: Display the output of the hosts file completeness check
+ ansible.builtin.debug:
+ var:
+ __sap_maintain_etc_hosts_register_ipv4_fqdn_sap_hostname_once_check.stdout_lines,
+ __sap_maintain_etc_hosts_register_ipv4_fqdn_sap_hostname_once_check.stderr_lines
+
+- name: Display the expected output of the hosts file completeness check
+ ansible.builtin.debug:
+ msg:
+ - "The expected entries in {{ __sap_maintain_etc_hosts_file }} are at least only once:"
+ - "{{ thishost.node_ip }} {{ thishost.node_name }}.{{ __sap_maintain_etc_hosts_domain }} {{ thishost.node_name }}"
+ when:
+ - __sap_maintain_etc_hosts_register_ipv4_fqdn_sap_hostname_once_check.stdout != "1"
+
+- name: Fail if ip4 address, FQDN, or hostname are not in hosts file
+ ansible.builtin.fail:
+ msg:
+ - "Server's ip4 address, FQDN, or hostname are not in {{ __sap_maintain_etc_hosts_file }}!"
+ - "Expected:"
+ - "{{ thishost.node_ip }} {{ thishost.node_name }}.{{ __sap_maintain_etc_hosts_domain }} {{ thishost.node_name }}"
+ when:
+ - __sap_maintain_etc_hosts_register_ipv4_fqdn_sap_hostname_once_check.stdout != "1"
+ ignore_errors: "{{ ansible_check_mode }}"
diff --git a/roles/sap_maintain_etc_hosts/tests/README.md b/roles/sap_maintain_etc_hosts/tests/README.md
new file mode 100644
index 000000000..02fff336d
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/tests/README.md
@@ -0,0 +1,26 @@
+# Some testing guidelines
+
+for testing the role without mangling around with a real /etc/hosts file it is possible
+to create a test file in the host file format, like the following:
+
+```[bash]
+$ cat ./test.hostsfile
+127.0.0.1 localhost host1 localhost.localdomain host1.abc.de
+1.2.3.5 thishost.to.be.deleted
+1.2.3.6 host2
+```
+
+Then you can run the role with
+
+```[bash]
+ansible-playbook -K test.yml -e __sap_maintain_etc_hosts_file=./test.hostsfile
+```
+
+The result should look like:
+
+```[bash]
+127.0.0.1 localhost localhost.localdomain
+1.2.3.4 host1.abc.de host1 alias1 anotheralias2 # Here comes text after hashsign
+```
+
+Please feel free to test with other example host files and report errors accordingly
diff --git a/roles/sap_maintain_etc_hosts/tests/inventory b/roles/sap_maintain_etc_hosts/tests/inventory
new file mode 100644
index 000000000..2fbb50c4a
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/tests/inventory
@@ -0,0 +1 @@
+localhost
diff --git a/roles/sap_maintain_etc_hosts/tests/test.hosts b/roles/sap_maintain_etc_hosts/tests/test.hosts
new file mode 100644
index 000000000..75721cd5a
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/tests/test.hosts
@@ -0,0 +1 @@
+127.0.0.1 localhost
diff --git a/roles/sap_maintain_etc_hosts/tests/test.yml b/roles/sap_maintain_etc_hosts/tests/test.yml
new file mode 100644
index 000000000..6bfeec021
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/tests/test.yml
@@ -0,0 +1,30 @@
+---
+# you can run 'ansible-playbook test.yml -K -e __sap_maintain_etc_hosts_file=./test.hosts' for testing this role
+- name: Test play
+ hosts: localhost
+ gather_facts: false
+
+ vars:
+ sap_maintain_etc_hosts_list:
+ - node_ip: 1.2.3.5
+ state: absent
+ - node_name: host1
+ state: absent
+ - node_ip: 1.2.3.5
+ node_name: host1
+ node_domain: abc.de
+ node_role: primary
+ hana_site: DC1
+ - node_ip: 1.2.3.7
+ node_name: host2
+ node_domain: abc.de
+ - node_ip: 1.2.3.4
+ node_name: host3
+ node_domain: abc.de
+ aliases:
+ - alias1
+ - anotheralias2
+ node_comment: "Here comes text after hashsign"
+ state: present
+ roles:
+ - sap_maintain_etc_hosts
diff --git a/roles/sap_maintain_etc_hosts/vars/main.yml b/roles/sap_maintain_etc_hosts/vars/main.yml
new file mode 100644
index 000000000..c65271322
--- /dev/null
+++ b/roles/sap_maintain_etc_hosts/vars/main.yml
@@ -0,0 +1,5 @@
+---
+# vars file for sap_maintain_etc_hosts
+__sap_maintain_etc_hosts_file: /etc/hosts
+__sap_maintain_etc_hosts_gather_subset:
+ - 'default_ipv4'
diff --git a/roles/sap_netweaver_preconfigure/.ansible-lint b/roles/sap_netweaver_preconfigure/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_netweaver_preconfigure/.ansible-lint
+++ b/roles/sap_netweaver_preconfigure/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_netweaver_preconfigure/README.md b/roles/sap_netweaver_preconfigure/README.md
index d0ad2b26c..ae666ccd8 100644
--- a/roles/sap_netweaver_preconfigure/README.md
+++ b/roles/sap_netweaver_preconfigure/README.md
@@ -13,7 +13,7 @@ system time, before or after running role sap_netweaver_preconfigure.
Note
----
-On RHEL, as per SAP notes 2002167 and 2772999, the role will switch to tuned profile sap-netweaver no matter if another tuned profile
+On RHEL, as per SAP notes 2002167, 2772999, and 3108316, the role will switch to tuned profile sap-netweaver no matter if another tuned profile
(e.g. virtual-guest) had been active before or not.
On SLES, this role will switch the saptune solution to the one specified by the configuration and will override any previously set solution.
diff --git a/roles/sap_netweaver_preconfigure/defaults/main.yml b/roles/sap_netweaver_preconfigure/defaults/main.yml
index dbd6bf16c..b49137da0 100644
--- a/roles/sap_netweaver_preconfigure/defaults/main.yml
+++ b/roles/sap_netweaver_preconfigure/defaults/main.yml
@@ -26,6 +26,6 @@ sap_netweaver_preconfigure_saptune_version: '3.0.2'
#S4HANA-APP+DB
#S4HANA-APPSERVER
#S4HANA-DBSERVER
-# The default vaule is NETWEAVER
+# The default value is NETWEAVER
-sap_netweaver_preconfigure_saptune_solution: 'NETWEAVER'
+sap_netweaver_preconfigure_saptune_solution: NETWEAVER
diff --git a/roles/sap_netweaver_preconfigure/meta/runtime.yml b/roles/sap_netweaver_preconfigure/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_netweaver_preconfigure/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-configuration.yml b/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-configuration.yml
index 60d2e5ad5..496f5c88d 100644
--- a/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-configuration.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-configuration.yml
@@ -21,7 +21,7 @@
- name: Assert that enough swap space is configured
ansible.builtin.assert:
that:
- - "ansible_swaptotal_mb >= {{ sap_netweaver_preconfigure_min_swap_space_mb }}"
+ - (ansible_swaptotal_mb | int) >= (sap_netweaver_preconfigure_min_swap_space_mb | int)
fail_msg: "FAIL: The system has only {{ ansible_swaptotal_mb }} MB of swap space configured,
which is less than the minimum required amount of {{ sap_netweaver_preconfigure_min_swap_space_mb }} MB for SAP NetWeaver!"
success_msg: "PASS: The system has {{ ansible_swaptotal_mb }} MB of swap space configured,
diff --git a/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-installation.yml b/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-installation.yml
index f8767c3c7..14fd4eb25 100644
--- a/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-installation.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/RedHat/assert-installation.yml
@@ -2,8 +2,7 @@
- name: Assert that all required packages are installed
ansible.builtin.assert:
- that:
- - "'{{ line_item }}' in ansible_facts.packages"
+ that: line_item in ansible_facts.packages
fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
success_msg: "PASS: Package '{{ line_item }}' is installed."
with_items:
diff --git a/roles/sap_netweaver_preconfigure/tasks/SLES/assert-configuration.yml b/roles/sap_netweaver_preconfigure/tasks/SLES/assert-configuration.yml
index 3360e8232..a3a4a87b8 100644
--- a/roles/sap_netweaver_preconfigure/tasks/SLES/assert-configuration.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/SLES/assert-configuration.yml
@@ -25,7 +25,7 @@
- name: Discover active solution
ansible.builtin.command: saptune solution enabled
register: __sap_netweaver_preconfigure_register_saptune_status
- changed_when: no
+ changed_when: false
- name: Set solution fact
ansible.builtin.set_fact:
@@ -34,7 +34,7 @@
- name: Discover active solution
ansible.builtin.command: saptune solution enabled
register: __sap_netweaver_preconfigure_register_saptune_status
- changed_when: no
+ changed_when: false
- name: Set fact for active solution
ansible.builtin.set_fact:
diff --git a/roles/sap_netweaver_preconfigure/tasks/SLES/assert-installation.yml b/roles/sap_netweaver_preconfigure/tasks/SLES/assert-installation.yml
index ed69f0de8..b4e51604f 100644
--- a/roles/sap_netweaver_preconfigure/tasks/SLES/assert-installation.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/SLES/assert-installation.yml
@@ -7,7 +7,7 @@
- name: Ensure required packages for SAP NetWeaver are installed
ansible.builtin.assert:
- that: "'{{ package }}' in ansible_facts.packages"
+ that: package in ansible_facts.packages
loop: "{{ __sap_netweaver_preconfigure_packages }}"
loop_control:
loop_var: package
diff --git a/roles/sap_netweaver_preconfigure/tasks/SLES/configuration.yml b/roles/sap_netweaver_preconfigure/tasks/SLES/configuration.yml
index b5a608c4e..58ca75c3e 100644
--- a/roles/sap_netweaver_preconfigure/tasks/SLES/configuration.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/SLES/configuration.yml
@@ -1,53 +1,75 @@
---
-- name: Ensure saptune is running and enabled
- ansible.builtin.systemd:
- name: saptune
- state: started
- enabled: yes
-
-- name: Ensure saptune_check executes correctly
- ansible.builtin.command: saptune_check
- changed_when: no
-
-- name: Discover active solution
- ansible.builtin.command: saptune solution enabled
- register: __sap_netweaver_preconfigure_register_saptune_status
- changed_when: no
-
-- name: Make sure that sapconf and tuned are stopped and disabled
- ansible.builtin.command: "saptune service takeover"
- register: __sap_saptune_takeover
- changed_when: __sap_saptune_takeover.rc == 0
-
-- name: Set fact for active solution
- ansible.builtin.set_fact:
- __sap_netweaver_preconfigure_fact_solution_configured: "{{ (__sap_netweaver_preconfigure_register_saptune_status.stdout | regex_search('(\\S+)', '\\1'))[0] | default('NONE') }}" # Capture the first block on none whitespace
-
-- name: Check if saptune solution needs to be applied
- ansible.builtin.command: "saptune solution verify {{ sap_netweaver_preconfigure_saptune_solution }}"
- register: __sap_netweaver_preconfigure_register_saptune_verify
- changed_when: no # We're only checking, not changing!
- failed_when: no # We expect this to fail if it has not previously been applied
-
-- name: Ensure no solution is currently applied
- ansible.builtin.command: "saptune solution revert {{ __sap_netweaver_preconfigure_fact_solution_configured }}"
- when:
- - __sap_netweaver_preconfigure_fact_solution_configured != 'NONE'
- - __sap_netweaver_preconfigure_register_saptune_verify.rc != 0
+- name: Takover saptune and enable
+ when: __sap_netweaver_preconfigure_run_saptune
+ block:
+ - name: Make sure that sapconf and tuned are stopped and disabled
+ ansible.builtin.command: "saptune service takeover"
+ register: __sap_saptune_takeover
+ changed_when: __sap_saptune_takeover.rc == 0
+
+ - name: Ensure saptune is running and enabled
+ ansible.builtin.systemd:
+ name: saptune
+ state: started
+ enabled: true
+
+ - name: Ensure saptune_check executes correctly
+ ansible.builtin.command: saptune_check
+ changed_when: false
+
+ - name: Discover active solution
+ ansible.builtin.command: saptune solution enabled
+ register: __sap_netweaver_preconfigure_register_saptune_status
+ changed_when: false
+
+ - name: Set fact for active solution
+ ansible.builtin.set_fact:
+ # Capture the first block on none whitespace
+ __sap_netweaver_preconfigure_fact_solution_configured:
+ "{{ (__sap_netweaver_preconfigure_register_saptune_status.stdout | regex_search('(\\S+)', '\\1'))[0] | default('NONE') }}"
+
+ - name: Check if saptune solution needs to be applied
+ ansible.builtin.command: "saptune solution verify {{ sap_netweaver_preconfigure_saptune_solution }}"
+ register: __sap_netweaver_preconfigure_register_saptune_verify
+ changed_when: false # We're only checking, not changing!
+ failed_when: false # We expect this to fail if it has not previously been applied
+
+ - name: Ensure no solution is currently applied
+ ansible.builtin.command: "saptune solution revert {{ __sap_netweaver_preconfigure_fact_solution_configured }}"
+ changed_when: true
+ when:
+ - __sap_netweaver_preconfigure_fact_solution_configured != 'NONE'
+ - __sap_netweaver_preconfigure_register_saptune_verify.rc != 0
+
+ - name: Ensure saptune solution is applied
+ ansible.builtin.command: "saptune solution apply {{ sap_netweaver_preconfigure_saptune_solution }}"
+ changed_when: true
+ when:
+ - __sap_netweaver_preconfigure_register_saptune_verify.rc != 0
+
+ - name: Ensure solution was successful
+ ansible.builtin.command: "saptune solution verify {{ sap_netweaver_preconfigure_saptune_solution }}"
+ changed_when: false # We're only checking, not changing!
-- name: Ensure saptune solution is applied
- ansible.builtin.command: "saptune solution apply {{ sap_netweaver_preconfigure_saptune_solution }}"
- when: __sap_netweaver_preconfigure_register_saptune_verify.rc != 0
+- name: Enable sapconf
+ when: not __sap_netweaver_preconfigure_run_saptune
+ block:
+ - name: Enable sapconf service
+ ansible.builtin.systemd:
+ name: sapconf
+ state: started
+ enabled: true
-- name: Ensure solution was successful
- ansible.builtin.command: "saptune solution verify {{ sap_netweaver_preconfigure_saptune_solution }}"
- changed_when: no # We're only checking, not changing!
+ - name: Restart sapconf service
+ ansible.builtin.systemd:
+ name: sapconf
+ state: restarted
- name: Warn if not enough swap space is configured
ansible.builtin.fail:
msg: "The system has only {{ ansible_swaptotal_mb }} MB of swap space configured,
which is less than the minimum required amount of {{ sap_netweaver_preconfigure_min_swap_space_mb }} MB for SAP NetWeaver!"
- ignore_errors: yes
+ ignore_errors: true
when:
- ansible_swaptotal_mb < sap_netweaver_preconfigure_min_swap_space_mb|int
- not sap_netweaver_preconfigure_fail_if_not_enough_swap_space_configured|d(true)
diff --git a/roles/sap_netweaver_preconfigure/tasks/SLES/installation.yml b/roles/sap_netweaver_preconfigure/tasks/SLES/installation.yml
index 10590a0a6..feef1ae71 100644
--- a/roles/sap_netweaver_preconfigure/tasks/SLES/installation.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/SLES/installation.yml
@@ -5,10 +5,35 @@
state: present
name: "{{ __sap_netweaver_preconfigure_packages }}"
-#The use of zypper here allows exact saptune version to be declared and used.
+- name: Get contents of /etc/products.d/baseproduct
+ ansible.builtin.stat:
+ path: /etc/products.d/baseproduct
+ register: sles_baseproduct
+ when: ansible_os_family == 'Suse'
+
+- name: Setfact if baseproduct contains SLES without SLES_SAP
+ ansible.builtin.set_fact:
+ __sap_netweaver_preconfigure_run_saptune: false
+ when:
+ - '"SLES_SAP" not in sles_baseproduct.stat.lnk_target'
+ - '"SLES" in sles_baseproduct.stat.lnk_target'
+ - ansible_os_family == 'Suse'
+
+# The use of zypper here allows exact saptune version to be declared and used.
- name: Ensure saptune is installed
community.general.zypper:
type: package
name: "saptune={{ sap_netweaver_preconfigure_saptune_version }}"
state: present
- force: yes
+ force: true
+ when:
+ - __sap_netweaver_preconfigure_run_saptune
+
+- name: Ensure sapconf is installed
+ community.general.zypper:
+ type: package
+ name: "sapconf"
+ state: present
+ force: true
+ when:
+ - not __sap_netweaver_preconfigure_run_saptune
diff --git a/roles/sap_netweaver_preconfigure/tasks/sapnote/2526952.yml b/roles/sap_netweaver_preconfigure/tasks/sapnote/2526952.yml
index 73efdb98c..19997a328 100644
--- a/roles/sap_netweaver_preconfigure/tasks/sapnote/2526952.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/sapnote/2526952.yml
@@ -14,11 +14,11 @@
ansible.builtin.service:
name: tuned
state: started
- enabled: yes
+ enabled: true
- name: Show currently active tuned profile
ansible.builtin.shell: set -o pipefail && /usr/sbin/tuned-adm active | awk '/:/{print $NF}'
- check_mode: no
+ check_mode: false
register: __sap_netweaver_preconfigure_register_current_tuned_profile
changed_when: false
@@ -39,8 +39,8 @@
changed_when: true
- name: Show new active tuned profile
- ansible.builtin.shell: /usr/sbin/tuned-adm active | awk '{print $NF}'
- check_mode: no
+ ansible.builtin.shell: set -o pipefail && /usr/sbin/tuned-adm active | awk '/:/{print $NF}'
+ check_mode: false
register: __sap_netweaver_preconfigure_register_new_tuned_profile
changed_when: false
diff --git a/roles/sap_netweaver_preconfigure/tasks/sapnote/3119751.yml b/roles/sap_netweaver_preconfigure/tasks/sapnote/3119751.yml
index 6abf886cd..3b4dcb41c 100644
--- a/roles/sap_netweaver_preconfigure/tasks/sapnote/3119751.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/sapnote/3119751.yml
@@ -19,7 +19,7 @@
mode: '0755'
when: __sap_netweaver_preconfigure_register_stat_compat_sap_cpp.stat.exists
-- name: Create a link to '{{ sap_netweaver_preconfigure_rpath }}libstdc++.so.6'
+- name: Create a link to '{{ sap_netweaver_preconfigure_rpath }}/libstdc++.so.6'
ansible.builtin.file:
src: /opt/rh/SAP/lib64/compat-sap-c++-10.so
dest: "{{ sap_netweaver_preconfigure_rpath }}/libstdc++.so.6"
diff --git a/roles/sap_netweaver_preconfigure/tasks/sapnote/assert-2526952.yml b/roles/sap_netweaver_preconfigure/tasks/sapnote/assert-2526952.yml
index fd73c4df0..6fc7a4b61 100644
--- a/roles/sap_netweaver_preconfigure/tasks/sapnote/assert-2526952.yml
+++ b/roles/sap_netweaver_preconfigure/tasks/sapnote/assert-2526952.yml
@@ -39,19 +39,21 @@
when: "'tuned-profiles-sap' in ansible_facts.packages"
- name: Get active tuned profile
- ansible.builtin.shell: set -o pipefail && /usr/sbin/tuned-adm active | grep ":" | cut -d ":" -f 2 | awk '{$1=$1;print}'
- check_mode: no
+ ansible.builtin.shell: set -o pipefail && /usr/sbin/tuned-adm active | awk '/:/{print $NF}'
+ check_mode: false
register: __sap_netweaver_preconfigure_register_current_tuned_profile
+ ignore_errors: true
changed_when: false
- name: Assert - Display the output of the tuned-adm active command
ansible.builtin.debug:
var: __sap_netweaver_preconfigure_register_current_tuned_profile.stdout_lines,
__sap_netweaver_preconfigure_register_current_tuned_profile.stderr_lines
+ ignore_errors: true
-- name: Assert that tuned profile sap-netweaver is currently active
+- name: Assert that tuned profile 'sap-netweaver' is currently active
ansible.builtin.assert:
- that: "__sap_netweaver_preconfigure_register_current_tuned_profile.stdout == 'sap-netweaver'"
+ that: __sap_netweaver_preconfigure_register_current_tuned_profile.stdout == 'sap-netweaver'
fail_msg: "FAIL: The tuned profile 'sap-netweaver' is currently not active!
Currently active profile: '{{ __sap_netweaver_preconfigure_register_current_tuned_profile.stdout }}'."
success_msg: "PASS: The tuned profile 'sap-netweaver' is currently active."
diff --git a/roles/sap_netweaver_preconfigure/vars/SLES_15.yml b/roles/sap_netweaver_preconfigure/vars/SLES_15.yml
index ee37ca633..86ce492f5 100644
--- a/roles/sap_netweaver_preconfigure/vars/SLES_15.yml
+++ b/roles/sap_netweaver_preconfigure/vars/SLES_15.yml
@@ -27,3 +27,7 @@ __sap_netweaver_preconfigure_packages:
- yast2-vpn
- tcsh
- acl
+
+# SLES_SAP is using saptune, but SLES is using sapconf.
+# Default value true runs saptune, but installation.yml auto-detects base product and adjusts.
+__sap_netweaver_preconfigure_run_saptune: true
diff --git a/roles/sap_storage_setup/.ansible-lint b/roles/sap_storage_setup/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_storage_setup/.ansible-lint
+++ b/roles/sap_storage_setup/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_storage_setup/README.md b/roles/sap_storage_setup/README.md
index d30109f09..4c4e12746 100644
--- a/roles/sap_storage_setup/README.md
+++ b/roles/sap_storage_setup/README.md
@@ -104,7 +104,7 @@ Describes the filesystems to be configured.
- **nfs_server**
When defining an NFS filesystem, this is the address of the NFS server.
The address must contain the root path, in which the mount directories exist or will be created.
For example, `192.168.1.100:/`.
- **swap_path**
- The path to the swap file.
When this option is defined for a swap filesystem definition, it will create a swap file on an existing filesytem.
+ The path to the swap file.
When this option is defined for a swap filesystem definition, it will create a swap file on an existing filesystem.
Example:
diff --git a/roles/sap_storage_setup/defaults/main.yml b/roles/sap_storage_setup/defaults/main.yml
index 583c2c386..8abea7e81 100644
--- a/roles/sap_storage_setup/defaults/main.yml
+++ b/roles/sap_storage_setup/defaults/main.yml
@@ -67,23 +67,23 @@ sap_storage_setup_cloud_type: 'generic'
# Azure variables
-sap_storage_az_imds_json:
-sap_storage_az_imds_url: 'http://169.254.169.254/metadata/instance/compute?api-version=2020-09-01'
-sap_storage_az_vmsize_url: 'http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2017-08-01&format=text'
+sap_storage_setup_az_imds_json:
+sap_storage_setup_az_imds_url: 'http://169.254.169.254/metadata/instance/compute?api-version=2020-09-01'
+sap_storage_setup_az_vmsize_url: 'http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2017-08-01&format=text'
-sap_storage_az_vmsize:
+sap_storage_setup_az_vmsize:
-sap_storage_az_lun: '/dev/disk/azure/scsi1/lun'
+sap_storage_setup_az_lun: '/dev/disk/azure/scsi1/lun'
# AWS variables
# should probably put into a platform specific vars file, to be included and overwriting the default
#sap_storage_setup_aws_nfs_options: 'nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,acl'
-sap_storage_aws_imds_url:
-sap_storage_aws_vmsize_url:
+sap_storage_setup_aws_imds_url:
+sap_storage_setup_aws_vmsize_url:
-sap_storage_aws_vmsize:
+sap_storage_setup_aws_vmsize:
# IBM Cloud variables
#
diff --git a/roles/sap_storage_setup/meta/argument_specs.yml b/roles/sap_storage_setup/meta/argument_specs.yml
index 298fb2a47..55fe57cbb 100644
--- a/roles/sap_storage_setup/meta/argument_specs.yml
+++ b/roles/sap_storage_setup/meta/argument_specs.yml
@@ -131,7 +131,7 @@ argument_specs:
swap_path:
description:
- The path to the swap file.
- - When this option is defined for a swap filesystem definition, it will create a swap file on an existing filesytem.
+ - When this option is defined for a swap filesystem definition, it will create a swap file on an existing filesystem.
type: str
diff --git a/roles/sap_storage_setup/meta/runtime.yml b/roles/sap_storage_setup/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_storage_setup/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_storage_setup/tasks/generic_tasks/configure_swap.yml b/roles/sap_storage_setup/tasks/generic_tasks/configure_swap.yml
index 14bfe9293..b09f3ee2c 100644
--- a/roles/sap_storage_setup/tasks/generic_tasks/configure_swap.yml
+++ b/roles/sap_storage_setup/tasks/generic_tasks/configure_swap.yml
@@ -29,6 +29,7 @@
- name: SAP Storage Setup - (swap file) Allocate space
ansible.builtin.shell: |
fallocate -l {{ swap_file.disk_size | int * 1024 }}MB {{ swap_file.swap_path }}
+ changed_when: true
when:
- not check_swapfile.stat.exists
@@ -41,6 +42,7 @@
ansible.builtin.shell: |
mkswap {{ swap_file.swap_path }}
swapon {{ swap_file.swap_path }}
+ changed_when: true
when:
- not check_swapfile.stat.exists
@@ -80,7 +82,7 @@
- name: SAP Storage Setup - Check if swap partition exists
ansible.builtin.shell: |
- lsblk | grep SWAP || echo "no active swap"
+ set -o pipefail && lsblk | grep SWAP || echo "no active swap"
register: check_swap_partition
changed_when: false
@@ -95,6 +97,7 @@
- name: SAP Storage Setup - Enable swap
ansible.builtin.shell: |
swapon /dev/{{ swap_volume.lvm_vg_name | default('vg_swap') }}/{{ swap_volume.lvm_lv_name | default('lv_swap') }}
+ changed_when: true
when:
- not ansible_check_mode
- swap_volume.lvm_lv_name | default("lv_swap") not in check_swap_partition.stdout
diff --git a/roles/sap_storage_setup/tasks/generic_tasks/remove_storage.yml b/roles/sap_storage_setup/tasks/generic_tasks/remove_storage.yml
index 571eb89f0..d55ee4738 100644
--- a/roles/sap_storage_setup/tasks/generic_tasks/remove_storage.yml
+++ b/roles/sap_storage_setup/tasks/generic_tasks/remove_storage.yml
@@ -1,7 +1,7 @@
---
## Unmount Filesystem
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Unmount Filesystem
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Unmount Filesystem
# mount:
# path: "{{ item.value.directory }}"
# state: absent
@@ -9,19 +9,19 @@
## INFO:
## this only works right using community Ansible Galaxy filesystem module
## only interested with native Ansible modules for now
-## - name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Filesystem
+## - name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Filesystem
## filesystem:
## dev: "/dev/mapper/{{ item.value.vg }}/{{ item.value.lv }}"
## state: absent
#
## Remove Filesystem
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Remove Filesystem
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Remove Filesystem
# shell: |
# /sbin/wipefs --all -f /dev/mapper/{{ item.value.vg }}-{{ item.value.lv }}
# ignore_errors: yes
#
## Remove Logical Volume
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Remove Logical Volume
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Remove Logical Volume
# lvol:
# lv: "{{ item.value.lv }}"
# vg: "{{ item.value.vg }}"
@@ -29,7 +29,7 @@
# force: yes
#
## Remove Volume Group
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Remove Volume Group
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Remove Volume Group
# lvg:
# vg: "{{ item.value.vg }}"
# state: absent
diff --git a/roles/sap_storage_setup/tasks/main.yml b/roles/sap_storage_setup/tasks/main.yml
index d9e3b26ad..91f3bb4af 100644
--- a/roles/sap_storage_setup/tasks/main.yml
+++ b/roles/sap_storage_setup/tasks/main.yml
@@ -50,9 +50,9 @@
label: "{{ new_mounts_item.name }}"
-# TODO: Cloud Specific Pre-Tasks - call cloud specific pre tasks thru {{ sap_storage_cloud_type }}_main.yml
-#- name: SAP Storage Setup - Preparation for '{{ sap_storage_cloud_type }}'
-# include_tasks: "{{ sap_storage_cloud_type }}_main.yml"
+# TODO: Cloud Specific Pre-Tasks - call cloud specific pre tasks thru {{ sap_storage_setup_cloud_type }}_main.yml
+#- name: SAP Storage Setup - Preparation for '{{ sap_storage_setup_cloud_type }}'
+# include_tasks: "{{ sap_storage_setup_cloud_type }}_main.yml"
# TODO: verify that the number of disks matches the fs definition
# TODO: add functionality to work with existing LVM volumes and skip disk assignment
diff --git a/roles/sap_storage_setup/tasks/platform_tasks/az_main.yml b/roles/sap_storage_setup/tasks/platform_tasks/az_main.yml
index e8ea809e2..9c5416bda 100644
--- a/roles/sap_storage_setup/tasks/platform_tasks/az_main.yml
+++ b/roles/sap_storage_setup/tasks/platform_tasks/az_main.yml
@@ -8,22 +8,22 @@
## Create json format of IMDS
## Todo: Ansibilize this
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - Create json format of IMDS
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - Create json format of IMDS
# shell: |
-# curl -H Metadata:true --noproxy "*" "{{ sap_storage_az_imds_url }}" | python3 -mjson.tool
+# curl -H Metadata:true --noproxy "*" "{{ sap_storage_setup_az_imds_url }}" | python3 -mjson.tool
# register: az_imds_reg
# args:
# executable: /bin/bash
## If this fails, that means this VM is not Azure?
#
#- set_fact:
-# sap_storage_az_imds_json: "{{ az_imds_reg.stdout }}"
+# sap_storage_setup_az_imds_json: "{{ az_imds_reg.stdout }}"
#
## Pull VMSize
## Todo: Ansibilize this
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - Pull VMSize
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - Pull VMSize
# shell: |
-# curl -H Metadata:true --noproxy "*" "{{ sap_storage_az_vmsize_url }}"
+# curl -H Metadata:true --noproxy "*" "{{ sap_storage_setup_az_vmsize_url }}"
# register: az_vmsize_reg
# args:
# executable: /bin/bash
@@ -33,8 +33,8 @@
# - "{{ az_vmsize_reg.stdout }}"
#
#- set_fact:
-# sap_storage_az_vmsize: "{{ az_vmsize_reg.stdout }}"
+# sap_storage_setup_az_vmsize: "{{ az_vmsize_reg.stdout }}"
#
## Include vars depending on VM Size
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - Load Variables for {{ sap_storage_az_vmsize }}
-# include_vars: "{{ sap_storage_cloud_type }}_tasks/vmsizes/{{ sap_storage_az_vmsize }}.yml"
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - Load Variables for {{ sap_storage_setup_az_vmsize }}
+# include_vars: "{{ sap_storage_setup_cloud_type }}_tasks/vmsizes/{{ sap_storage_setup_az_vmsize }}.yml"
diff --git a/roles/sap_storage_setup/tasks/platform_tasks/prepare_storage_az.yml b/roles/sap_storage_setup/tasks/platform_tasks/prepare_storage_az.yml
index f9c3d61b8..c3f5f35b4 100644
--- a/roles/sap_storage_setup/tasks/platform_tasks/prepare_storage_az.yml
+++ b/roles/sap_storage_setup/tasks/platform_tasks/prepare_storage_az.yml
@@ -1,18 +1,18 @@
---
## Striped volume
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} - Striped
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} - Striped
# block:
#
# # Get LUNs from metadata
-# - name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Get LUNs from metadata
+# - name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Get LUNs from metadata
# shell: |
# for i in {1..{{ item.value.numluns }}}
# do
-# {{ item.value.vg }}${i}lun="{{ sap_storage_az_lun }} \
+# {{ item.value.vg }}${i}lun="{{ sap_storage_setup_az_lun }} \
# `awk '/caching/ { r=""; f=1 } f { r = (r ? r ORS : "") $0 } \
# /writeAcceleratorEnabled/ \
# { if (f && r ~ /{{ item.value.name }}${i}/) print r; f=0 }' \
-# {{ sap_storage_az_imds_json }} \
+# {{ sap_storage_setup_az_imds_json }} \
# | grep lun | sed 's/[^0-9]*//g'`"
# echo ${{ item.value.vg }}${i}lun
# done
@@ -24,14 +24,14 @@
# pvs_list: "{{ pvs_reg.stdout.split() }}"
#
# # Create Volume Group
-# - name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Volume Group Striped
+# - name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Volume Group Striped
# lvg:
# vg: "{{ item.value.vg }}"
# pvs: "{{ pvs_list | join(',') }}"
# force: yes
#
# # Create Logical Group
-# - name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Logical Volume - Striped
+# - name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Logical Volume - Striped
# lvol:
# vg: "{{ item.value.vg }}"
# lv: "{{ item.value.lv }}"
@@ -42,17 +42,17 @@
# - "item.value.numluns != '1'"
#
## Single volume
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} - Single Volume
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} - Single Volume
# block:
#
# # Get LUNs from metadata
-# - name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Get LUNs from metadata
+# - name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Get LUNs from metadata
# shell: |
-# {{ item.value.vg }}lun="{{ sap_storage_az_lun }} \
+# {{ item.value.vg }}lun="{{ sap_storage_setup_az_lun }} \
# `awk '/caching/ { r=""; f=1 } f { r = (r ? r ORS : "") $0 } \
# /writeAcceleratorEnabled/ \
# { if (f && r ~ /{{ item.value.name }}/) print r; f=0 }' \
-# {{ sap_storage_az_imds_json }} \
+# {{ sap_storage_setup_az_imds_json }} \
# | grep lun | sed 's/[^0-9]*//g'`"
# echo ${{ item.value.vg }}lun
# args:
@@ -63,14 +63,14 @@
# pvs_one: "{{ pvs_reg.stdout }}"
#
# # Create Volume Group
-# - name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Volume Group One
+# - name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Volume Group One
# lvg:
# vg: "{{ item.value.vg }}"
# pvs: "{{ pvs_one }}"
# force: yes
#
# # Create Logical Group
-# - name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Logical Volume - One
+# - name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Logical Volume - One
# lvol:
# vg: "{{ item.value.vg }}"
# lv: "{{ item.value.lv }}"
@@ -80,13 +80,13 @@
# - "item.value.numluns == '1'"
#
## Create Filesystem
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Filesystem
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Filesystem
# filesystem:
# fstype: xfs
# dev: "/dev/{{ item.value.vg }}/{{ item.value.lv }}"
#
## Mount Filesystem
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ sap_storage_az_vmsize }} - {{ item.value.name }} Mount
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ sap_storage_setup_az_vmsize }} - {{ item.value.name }} Mount
# mount:
# path: "{{ item.value.directory }}"
# fstype: xfs
diff --git a/roles/sap_storage_setup/tasks/platform_tasks/remove_storage_az.yml b/roles/sap_storage_setup/tasks/platform_tasks/remove_storage_az.yml
index 3468ebb58..e7c8fe9bc 100644
--- a/roles/sap_storage_setup/tasks/platform_tasks/remove_storage_az.yml
+++ b/roles/sap_storage_setup/tasks/platform_tasks/remove_storage_az.yml
@@ -1,18 +1,18 @@
---
## Unmount Filesystem
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Unmount Filesystem
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Unmount Filesystem
# mount:
# path: "{{ item.value.directory }}"
# state: absent
#
## Remove Filesystem
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Remove Filesystem
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Remove Filesystem
# shell: |
# /sbin/wipefs --all -f /dev/mapper/{{ item.value.vg }}-{{ item.value.lv }}
#
## Remove Logical Volume
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Remove Logical Volume
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Remove Logical Volume
# lvol:
# lv: "{{ item.value.lv }}"
# vg: "{{ item.value.vg }}"
@@ -20,7 +20,7 @@
# force: yes
#
## Remove Volume Group
-#- name: SAP Storage Preparation - {{ sap_storage_cloud_type | upper }} - {{ item.value.name }} Remove Volume Group
+#- name: SAP Storage Preparation - {{ sap_storage_setup_cloud_type | upper }} - {{ item.value.name }} Remove Volume Group
# lvg:
# vg: "{{ item.value.vg }}"
# state: absent
diff --git a/roles/sap_swpm/.ansible-lint b/roles/sap_swpm/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_swpm/.ansible-lint
+++ b/roles/sap_swpm/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_swpm/README.md b/roles/sap_swpm/README.md
index 7bcb5f54e..f8a16e789 100644
--- a/roles/sap_swpm/README.md
+++ b/roles/sap_swpm/README.md
@@ -16,7 +16,7 @@ This role has been tested and working for the following scenarios
- Dual Host Installation
- Distributed Installation
- System Restore
-- High Avalability Installation
+- High Availability Installation
This role has been tested and working for the following SAP products
- SAP S/4HANA 1809, 1909, 2020, 2021
diff --git a/roles/sap_swpm/meta/runtime.yml b/roles/sap_swpm/meta/runtime.yml
deleted file mode 100644
index 2ee3c9fa9..000000000
--- a/roles/sap_swpm/meta/runtime.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-requires_ansible: '>=2.9.10'
diff --git a/roles/sap_swpm/tasks/pre_install/create_os_user.yml b/roles/sap_swpm/tasks/pre_install/create_os_user.yml
index ccf72e132..397f81fa5 100644
--- a/roles/sap_swpm/tasks/pre_install/create_os_user.yml
+++ b/roles/sap_swpm/tasks/pre_install/create_os_user.yml
@@ -44,7 +44,7 @@
recurse: yes
mode: '0755'
-# - name: SAP SWPM Pre Install - Purge parameters so it wont populate inifile.params to prevent SWPM from crashing
+# - name: SAP SWPM Pre Install - Purge parameters so it will not populate inifile.params to prevent SWPM from crashing
# ansible.builtin.set_facts:
# sap_swpm_sapadm_uid: ""
# sap_swpm_sapsys_gid: ""
diff --git a/roles/sap_swpm/tasks/pre_install/install_type.yml b/roles/sap_swpm/tasks/pre_install/install_type.yml
index e8d54f10e..1eabb2157 100644
--- a/roles/sap_swpm/tasks/pre_install/install_type.yml
+++ b/roles/sap_swpm/tasks/pre_install/install_type.yml
@@ -11,6 +11,11 @@
# Determine Installation Type
################
+# Ignore Product IDs for SAP NetWeaver 7.5 Java standalone:
+# - Advanced Adapter Engine (AE) with .AE .AEHA
+# - Advanced Adapter Engine Extended (AEX) with .AEX .AEXHA
+# - Process Integration / Process Orchestration (PI/PO) with .PI .PIHA .PIABAP .PIABAPHA .PIJAVA .PIJAVAHA
+
#- name: SAP SWPM Pre Install - Check if general SAP Software installation
# ansible.builtin.set_fact:
# sap_swpm_swpm_installation_type: "general"
@@ -31,7 +36,7 @@
sap_swpm_swpm_installation_type: "ha"
sap_swpm_swpm_installation_header: "High Availability Installation using virtual hostname"
when:
- - "'.ABAPHA' in sap_swpm_product_catalog_id"
+ - "'.ABAPHA' in sap_swpm_product_catalog_id or '.HA' in sap_swpm_product_catalog_id or '.HACP' in sap_swpm_product_catalog_id"
- name: SAP SWPM Pre Install - Check if installation using SAP Maintenance Planner
ansible.builtin.set_fact:
diff --git a/roles/sap_swpm/tasks/pre_install/install_type/ha_maint_plan_stack_install.yml b/roles/sap_swpm/tasks/pre_install/install_type/ha_maint_plan_stack_install.yml
index 9995effc0..a1f7cbb6d 100644
--- a/roles/sap_swpm/tasks/pre_install/install_type/ha_maint_plan_stack_install.yml
+++ b/roles/sap_swpm/tasks/pre_install/install_type/ha_maint_plan_stack_install.yml
@@ -24,6 +24,7 @@
args:
chdir: "{{ sap_swpm_mp_stack_path }}"
register: sap_swpm_mp_stack_file_get
+ changed_when: false
# Test if variable string is not defined or None / blank
when:
(sap_swpm_mp_stack_file_name is not defined) or
diff --git a/roles/sap_swpm/tasks/pre_install/install_type/maint_plan_stack_install.yml b/roles/sap_swpm/tasks/pre_install/install_type/maint_plan_stack_install.yml
index 3c33b3265..9bf39bded 100644
--- a/roles/sap_swpm/tasks/pre_install/install_type/maint_plan_stack_install.yml
+++ b/roles/sap_swpm/tasks/pre_install/install_type/maint_plan_stack_install.yml
@@ -9,6 +9,7 @@
args:
chdir: "{{ sap_swpm_mp_stack_path }}"
register: sap_swpm_mp_stack_file_get
+ changed_when: false
# Test if variable string is not defined or None / blank
when:
(sap_swpm_mp_stack_file_name is not defined) or
diff --git a/roles/sap_swpm/tasks/swpm.yml b/roles/sap_swpm/tasks/swpm.yml
index 67b255a79..ab9d2fc0b 100644
--- a/roles/sap_swpm/tasks/swpm.yml
+++ b/roles/sap_swpm/tasks/swpm.yml
@@ -23,10 +23,11 @@
### Async method
# Required for Ansible Module pids
-- name: Install Python devel and gcc to system Python
+- name: Install Python devel, Python pip and gcc to system Python
ansible.builtin.package:
name:
- python3-devel
+ - python3-pip
- gcc
state: present
diff --git a/roles/sap_swpm/tasks/swpm/detect_variables.yml b/roles/sap_swpm/tasks/swpm/detect_variables.yml
index 59c08ad8c..7b1d299f9 100644
--- a/roles/sap_swpm/tasks/swpm/detect_variables.yml
+++ b/roles/sap_swpm/tasks/swpm/detect_variables.yml
@@ -8,11 +8,13 @@
END{print product_id}' {{ sap_swpm_tmpdir.path }}/inifile.params
register: sap_swpm_inifile_product_id_detect
changed_when: false
+ when: not sap_swpm_product_catalog_id is defined
# Set fact for product id
- name: SAP SWPM - Set SAP product ID
ansible.builtin.set_fact:
sap_swpm_product_catalog_id: "{{ sap_swpm_inifile_product_id_detect.stdout }}"
+ when: not sap_swpm_product_catalog_id is defined
- name: SAP SWPM - Display SAP product ID
ansible.builtin.debug:
@@ -25,11 +27,13 @@
awk '!/^#/&&/archives.downloadBasket/{print $3}' {{ sap_swpm_tmpdir.path }}/inifile.params
register: sap_swpm_inifile_software_path
changed_when: false
+ when: not sap_swpm_software_path is defined
# Set fact for software path
- name: SAP SWPM - Set Software Path
ansible.builtin.set_fact:
sap_swpm_software_path: "{{ sap_swpm_inifile_software_path.stdout }}"
+ when: not sap_swpm_software_path is defined
- name: SAP SWPM - Display Software Path
ansible.builtin.debug:
@@ -42,11 +46,13 @@
awk '!/^#/&&/NW_GetSidNoProfiles.sid/{print $3}' {{ sap_swpm_tmpdir.path }}/inifile.params
register: sap_swpm_inifile_sid
changed_when: false
+ when: not sap_swpm_sid is defined
# Set fact for SID
- name: SAP SWPM - Set SID
ansible.builtin.set_fact:
sap_swpm_sid: "{{ sap_swpm_inifile_sid.stdout }}"
+ when: not sap_swpm_sid is defined
- name: SAP SWPM - Display SAP SID
ansible.builtin.debug:
@@ -59,11 +65,13 @@
awk '!/^#/&&/NW_getFQDN.FQDN/{print $3}' {{ sap_swpm_tmpdir.path }}/inifile.params
register: sap_swpm_inifile_fqdn
changed_when: false
+ when: not sap_swpm_fqdn is defined
# Set fact for FQDN
- name: SAP SWPM - Set FQDN
ansible.builtin.set_fact:
sap_swpm_fqdn: "{{ sap_swpm_inifile_fqdn.stdout }}"
+ when: not sap_swpm_fqdn is defined
- name: SAP SWPM - Display FQDN
ansible.builtin.debug:
diff --git a/roles/sap_swpm/tasks/swpm/prepare_software.yml b/roles/sap_swpm/tasks/swpm/prepare_software.yml
index 9b1b1ede5..480ded4cd 100644
--- a/roles/sap_swpm/tasks/swpm/prepare_software.yml
+++ b/roles/sap_swpm/tasks/swpm/prepare_software.yml
@@ -101,6 +101,7 @@
register: sap_swpm_swpm_sar_file_name_stat
failed_when: not sap_swpm_swpm_sar_file_name_stat.stat.exists
+
- name: SAP SWPM Pre Install - Full SAP System
when: not sap_swpm_generic | bool
block:
diff --git a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced.yml b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced.yml
index b8fbc6d70..3228f97f4 100644
--- a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced.yml
+++ b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced.yml
@@ -2,16 +2,18 @@
# Remove Existing inifile.params
- name: SAP SWPM advanced mode - Ensure 'inifile.params' exists
- ansible.builtin.file:
- path: "{{ sap_swpm_tmpdir.path }}/inifile.params"
- state: touch
+ ansible.builtin.copy:
+ dest: "{{ sap_swpm_tmpdir.path }}/inifile.params"
mode: '0640'
+ content: |
+ ### inifile.params generated for SWPM Catalog Product ID is {{ sap_swpm_product_catalog_id }}
tags: sap_swpm_generate_inifile
- name: SAP SWPM advanced mode - Loop over the dictionary and output to file
ansible.builtin.lineinfile:
path: "{{ sap_swpm_tmpdir.path }}/inifile.params"
state: present
+ insertafter: EOF
line: "{{ item.key }} = {{ item.value }}"
with_dict: "{{ sap_swpm_inifile_custom_values_dictionary }}"
tags: sap_swpm_generate_inifile
@@ -26,6 +28,7 @@
tags: sap_swpm_generate_inifile
tags: sap_swpm_generate_inifile
+# Requires variables - sap_swpm_software_path (e.g. /software/download_basket), sap_swpm_sapcar_path (e.g. /software/sapcar), sap_swpm_swpm_path (e.g. /software/swpm)
# Prepare Software
- name: SAP SWPM advanced mode - Prepare Software
ansible.builtin.include_tasks: prepare_software.yml
diff --git a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced_templates.yml b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced_templates.yml
index 2ada11ade..efeeab6f4 100644
--- a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced_templates.yml
+++ b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_advanced_templates.yml
@@ -15,10 +15,11 @@
# Remove Existing inifile.params
- name: SAP SWPM advanced_templates mode - Ensure 'inifile.params' exists
- ansible.builtin.file:
- path: "{{ sap_swpm_tmpdir.path }}/inifile.params"
- state: touch
+ ansible.builtin.copy:
+ dest: "{{ sap_swpm_tmpdir.path }}/inifile.params"
mode: '0640'
+ content: |
+ ### inifile.params generated for SWPM Catalog Product ID is {{ sap_swpm_product_catalog_id }}
tags: sap_swpm_generate_inifile
- name: SAP SWPM advanced_templates mode - Loop over the dictionary and output to file
@@ -26,7 +27,7 @@
path: "{{ sap_swpm_tmpdir.path }}/inifile.params"
state: present
insertafter: EOF
- line: "{{ item.key }}={{ item.value }}"
+ line: "{{ item.key }} = {{ item.value }}"
with_dict: "{{ sap_swpm_templates_install_dictionary[sap_swpm_templates_product_input]['sap_swpm_inifile_custom_values_dictionary'] }}"
tags: sap_swpm_generate_inifile
@@ -41,6 +42,7 @@
tags: sap_swpm_generate_inifile
tags: sap_swpm_generate_inifile
+# Requires variables - sap_swpm_software_path (e.g. /software/download_basket), sap_swpm_sapcar_path (e.g. /software/sapcar), sap_swpm_swpm_path (e.g. /software/swpm)
# Prepare Software
- name: SAP SWPM advanced_templates mode - Prepare Software
ansible.builtin.include_tasks: prepare_software.yml
diff --git a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_default_templates.yml b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_default_templates.yml
index e5dadf8ad..68abe9de2 100644
--- a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_default_templates.yml
+++ b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_default_templates.yml
@@ -13,7 +13,8 @@
when: "'java' in sap_swpm_templates_install_dictionary[sap_swpm_templates_product_input]['sap_swpm_product_catalog_id'] | lower"
tags: sap_swpm_generate_inifile
-- name: SAP SWPM default_templates mode - If not already defined, use the default variable for the template
+# Reason for noqa: We want to define variable names based on what is in the dictionary.
+- name: SAP SWPM default_templates mode - If not already defined, use the default variable for the template # noqa var-naming[no-jinja]
ansible.builtin.set_fact:
"{{ item.key }}": "{{ item.value }}"
with_dict: "{{ sap_swpm_templates_install_dictionary[sap_swpm_templates_product_input]['sap_swpm_inifile_dictionary'] }}"
diff --git a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_inifile_reuse.yml b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_inifile_reuse.yml
index 003a24130..2513a7971 100644
--- a/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_inifile_reuse.yml
+++ b/roles/sap_swpm/tasks/swpm/swpm_inifile_generate_inifile_reuse.yml
@@ -32,6 +32,7 @@
tags: sap_swpm_generate_inifile
tags: sap_swpm_generate_inifile
+# Requires variables - sap_swpm_software_path (e.g. /software/download_basket), sap_swpm_sapcar_path (e.g. /software/sapcar), sap_swpm_swpm_path (e.g. /software/swpm)
# Prepare Software
- name: SAP SWPM inifile_reuse mode - Prepare Software
ansible.builtin.include_tasks: prepare_software.yml
diff --git a/roles/sap_swpm/templates/configfile.j2 b/roles/sap_swpm/templates/configfile.j2
index e59f930b6..4e0d8412e 100644
--- a/roles/sap_swpm/templates/configfile.j2
+++ b/roles/sap_swpm/templates/configfile.j2
@@ -164,8 +164,6 @@ NW_GetMasterPassword.masterPwd = {{ sap_swpm_master_password }}
# 'adm' user
nwUsers.sidadmPassword = {{ sap_swpm_sap_sidadm_password }}
-NW_DDIC_Password.ddic000Password = {{ sap_swpm_ddic_000_password }}
-
DiagnosticsAgent.dasidAdmPassword = {{ sap_swpm_diagnostics_agent_password }}
# 'sapadm' user of the SAP Host Agent
@@ -185,6 +183,7 @@ storageBasedCopy.hdb.systemPassword = {{ sap_swpm_db_system_password }}
# credentials_anydb_ibmdb2
######
nwUsers.db6.db2sidPassword = {{ sap_swpm_sap_sidadm_password }}
+# nwUsers.db6.db2sidUid =
{% endif %}
{% if 'credentials_anydb_oracledb' in sap_swpm_inifile_list %}
@@ -250,6 +249,16 @@ HDB_Userstore.useABAPSSFS = false
# NW_HDB_DBClient.clientPathStrategy = LOCAL
{% endif %}
+{% if 'credentials_syscopy' in sap_swpm_inifile_list %}
+######
+# credentials_syscopy
+######
+# Are the passwords for the DDIC users different from the default value?
+NW_DDIC_Password.needDDICPasswords = true
+NW_DDIC_Password.ddic000Password = {{ sap_swpm_ddic_000_password }}
+#NW_DDIC_Password.ddic001Password =
+{% endif %}
+
{% if 'db_config_hana' in sap_swpm_inifile_list %}
######
# db_config_hana
@@ -272,7 +281,6 @@ HDB_Schema_Check_Dialogs.validateSchemaName = false
# db_config_anydb_all
######
NW_ABAP_Import_Dialog.dbCodepage = 4103
-NW_DDIC_Password.needDDICPasswords = true
{% endif %}
{% if 'db_config_anydb_ibmdb2' in sap_swpm_inifile_list %}
@@ -283,8 +291,13 @@ NW_ABAP_Import_Dialog.migmonJobNum = 3
NW_ABAP_Import_Dialog.migmonLoadArgs = -stop_on_error -loadprocedure fast LOAD:COMPRESS_ALL:DEF_CRT
NW_getDBInfoGeneric.dbhost = {{ sap_swpm_db_host }}
NW_getDBInfoGeneric.dbsid = {{ sap_swpm_db_sid }}
+NW_getUnicode.isUnicode = true
# db6.useMcod = true
# db6.useBluSettings = false
+# db6.gid_sysadm =
+# db6.gid_sysctrl =
+# db6.gid_sysmaint =
+# db6.gid_sysmon =
db6.allowUnsignedDatabaseSoftware = true
db6.cluster.ClusterType = HADR (High Availability Disaster Recovery)
db6.createTablespacesUsingSapinst = true
@@ -294,7 +307,6 @@ db6.useAutoStorage = true
db6.useExtraSapdataSaptmpDirLayout = false
db6.UseStandardTablespacePool = true
db6.usesLDAP = false
-# nwUsers.db6.db2sidUid =
storageBasedCopy.db6.CommunicationPortNumber = 5912
storageBasedCopy.db6.PortRangeEnd = 5917
storageBasedCopy.db6.PortRangeStart = 5914
@@ -438,10 +450,17 @@ NW_Recovery_Install_HDB.sidAdmPassword = {{ sap_swpm_db_sidadm_password }}
# db_connection_nw_anydb_ibmdb2
######
# db6.UseDb2SSLClientServerComm = false
-# nwUsers.db6.sapsidUid =
nwUsers.db6.sapsidPassword = {{ sap_swpm_sapadm_password }}
-NW_DB6_DB.db6.abap.connect.user = sap{{ sap_swpm_db_sid | lower }}
-NW_DB6_DB.db6.abap.schema = sap{{ sap_swpm_db_sid | lower }}
+# nwUsers.db6.sapsidUid =
+
+# nwUsers.db6.sapsiddbPassword =
+# nwUsers.db6.sapsiddbUid =
+
+# Database Schema and Database Connect User for ABAP (default is sap and not sap)
+NW_DB6_DB.db6.abap.connect.user = sap{{ sap_swpm_sid | lower }}
+NW_DB6_DB.db6.abap.schema = sap{{ sap_swpm_sid | lower }}
+# NW_DB6_DB.db6.java.connect.user =
+# NW_DB6_DB.db6.java.schema =
{% endif %}
{% if 'db_connection_nw_anydb_oracledb' in sap_swpm_inifile_list %}
@@ -697,17 +716,29 @@ NW_CI_Instance_ABAP_Reports.enableActivateICFService = true
# SAP INTERNAL USE ONLY
# NW_CI_Instance_ABAP_Reports.enableTransportsWithoutStackXml = false
-# Specify new password of the DDIC user in client 000, different from Master Password
-# NW_CI_Instance_ABAP_Reports.ddic000Password =
-
# Need specific new password of the DDIC user in client 000, different from Master Password
# NW_CI_Instance_ABAP_Reports.needNewDDIC000Password = false
+# Need specific new password of the DDIC user in client 001, different from Master Password
+# NW_CI_Instance_ABAP_Reports.needNewDDIC001Password = false
+
# Need specific new password of the SAP* user in client 000, different from Master Password
# NW_CI_Instance_ABAP_Reports.needNewSapStar000Password = false
+# Need specific new password of the SAP* user in client 001, different from Master Password
+# NW_CI_Instance_ABAP_Reports.needNewSapStar001Password = false
+
+# Specify new password of the DDIC user in client 000, different from Master Password
+# NW_CI_Instance_ABAP_Reports.ddic000Password =
+
+# Specify new password of the DDIC user in client 001, different from Master Password
+# NW_CI_Instance_ABAP_Reports.ddic001Password =
+
# Specify new password of the SAP* user in client 000, different from Master Password
# NW_CI_Instance_ABAP_Reports.sapStar000Password =
+
+# Specify new password of the SAP* user in client 001, different from Master Password
+# NW_CI_Instance_ABAP_Reports.sapStar001Password =
{% endif %}
{% if 'nw_config_livecache' in sap_swpm_inifile_list %}
@@ -815,3 +846,114 @@ InitDeclusteringForImport.decluster = false
# DiagnosticsAgent.SolMan.UserName
# DiagnosticsAgent.SolMan.UseSSL
{% endif %}
+
+
+{% if 'syscopy_export_anydb' in sap_swpm_inifile_list %}
+######
+# syscopy_export_anydb
+# Not in use by sap_swpm Ansible Role
+######
+# InitDeclusteringForExport.decluster =
+# NW_ABAP_Export_Dialog.customPackageOrder =
+# NW_ABAP_Export_Dialog.customSortOrderFile =
+# NW_ABAP_Export_Dialog.exportTimeFile =
+# NW_ABAP_Export_Dialog.importTimeFile =
+# NW_ABAP_Export_Dialog.jobNumberGroupLarge = 7
+# NW_ABAP_Export_Dialog.jobNumberGroupSmall = 4
+# NW_ABAP_Export_Dialog.migmonComHost =
+# NW_ABAP_Export_Dialog.migmonComPort =
+# NW_ABAP_Export_Dialog.migmonComType = EXCHANGE
+# NW_ABAP_Export_Dialog.migmonDataTransferType = NET
+# NW_ABAP_Export_Dialog.migmonFtpExchangeDir =
+# NW_ABAP_Export_Dialog.migmonFtpExportDir =
+# NW_ABAP_Export_Dialog.migmonFtpHost =
+# NW_ABAP_Export_Dialog.migmonFtpPassword =
+# NW_ABAP_Export_Dialog.migmonFtpUser =
+NW_ABAP_Export_Dialog.migmonJobNum = 3
+NW_ABAP_Export_Dialog.migmonLoadArgs = -stop_on_error
+# NW_ABAP_Export_Dialog.migmonNetExchangeDir =
+# NW_ABAP_Export_Dialog.migmonTaskArgs =
+# NW_ABAP_Export_Dialog.nonStandardAbapObjectsHandlingSkip = false
+NW_ABAP_Export_Dialog.parallelR3szchkExecution = true
+NW_ABAP_Export_Dialog.r3szchkDetermineSizeValue = DB
+# NW_ABAP_Export_Dialog.r3szchkJobNum = 10
+NW_ABAP_Export_Dialog.repeatExport = COMPLETE
+NW_ABAP_Export_Dialog.splitPackageLimitVal = 5000
+# NW_ABAP_Export_Dialog.splitMaxTableNumVal = 1000
+# NW_ABAP_Export_Dialog.splitTableFile =
+# NW_ABAP_Export_Dialog.splitTableLimitVal = 300
+# NW_ABAP_Export_Dialog.splitTableNumVal = 10
+# NW_ABAP_Export_Dialog.splitUseMaxTableNum = false
+# NW_ABAP_Export_Dialog.splitUsePackageLimit = true
+# NW_ABAP_Export_Dialog.splitUseTableFile = false
+# NW_ABAP_Export_Dialog.splitUseTableLimit = false
+# NW_ABAP_Export_Dialog.splitUseTableNum = true
+# NW_ABAP_Export_Dialog.sqlFileDir =
+NW_ABAP_Export_Dialog.targetHardwarePlatform = LITTLE_ENDIAN
+
+## Target database types for Linux are ADA, HDB, DB6, ORA, SYB
+NW_ABAP_Export_Dialog.targetDbType = {{ sap_swpm_export_target_db_type }}
+
+# NW_ABAP_Export_Dialog.useAdditionalExportHosts = false
+# NW_ABAP_Export_Dialog.useAdvancedUnloadConfig = false
+# NW_ABAP_Export_Dialog.useCustomPackageOrder = false
+# NW_ABAP_Export_Dialog.useCustomSortOrder = false
+# NW_ABAP_Export_Dialog.useMigMonConfig = false
+# NW_ABAP_Export_Dialog.useParallelExportImport = false
+# NW_ABAP_Export_Dialog.useSplit = true
+NW_ABAP_Export_Dialog.useSqlFiles = NOSQL
+# NW_ABAP_Export_Dialog.useUnicodeTargetSystem =
+NW_ABAP_Export_Dialog.useUnsortedUnload = true
+
+NW_Export.accessLevel4ExportDir = DEFAULT
+NW_Export.choiceSepKernel = false
+NW_Export.mainExportDir = {{ sap_swpm_export_files_path }}
+
+# Do not stop SAP System before data is unloaded
+# SAP SWPM is unable to automatically stop the SAP System (i.e. DB, ASCS, PAS instances),
+# if stop is true then SAP SWPM Unattended will fail on step mainExportParameters
+NW_Export.stopRunningSystem = true
+
+NW_readProfileDir.profileDir = /sapmnt/{{ sap_swpm_sid | upper }}/profile
+
+NW_getLoadType.loadType = {{ sap_swpm_load_type }}
+NW_getLoadType.importManuallyExecuted = false
+{% endif %}
+
+
+{% if 'syscopy_import_anydb_ibmdb2' in sap_swpm_inifile_list %}
+######
+# syscopy_import_anydb_ibmdb2
+# Not in use by sap_swpm Ansible Role
+######
+# db6.Additional_DbServer =
+# db6.cluster.HADRPort1 =
+# db6.cluster.HADRPort2 =
+# db6.cluster.SyncMode =
+# db6.ConfigureCluster = false
+# db6.DropSchemaList =
+# db6.InstallPureScale = false
+# db6.InstallTSA = false
+# db6.minimizeDatabaseSizeCompression =
+# db6.minimizeDatabaseSizeCompressionJava =
+# db6.minimizeDatabaseSizeDeferredTable = true
+# db6.notuseMcod =
+# db6.NumAdditionalPartitions =
+# db6.useDB2ControlFiles = false
+# db6.UseDb2NativeEncryption = false
+# NW_adaptProfile.templateFiles =
+# NW_CreateDBandLoad.movePVCforUsagePiAndDi =
+db6.allowUnsignedDatabaseSoftware = true
+db6.cluster.ClusterType = HADR (High Availability Disaster Recovery)
+db6.usingSystemCopyBRforHADR = true
+NW_getDBInfoGeneric.dbhost = {{ sap_swpm_db_host }}
+NW_getDBInfoGeneric.dbsid = {{ sap_swpm_db_sid }}
+NW_getLoadType.loadType = {{ sap_swpm_load_type }}
+NW_getLoadType.importManuallyExecuted = false
+NW_getUnicode.isUnicode = true
+# NW_getDBInfoGeneric.strictDbSidCheck = true
+storageBasedCopy.db6.CommunicationPortNumber = 5912
+storageBasedCopy.db6.PortRangeEnd = 5917
+storageBasedCopy.db6.PortRangeStart = 5914
+# storageBasedCopy.db6.db6updatedbpath =
+{% endif %}
diff --git a/roles/sap_vm_preconfigure/.ansible-lint b/roles/sap_vm_preconfigure/.ansible-lint
index 63122b8f9..8a5df4d43 100644
--- a/roles/sap_vm_preconfigure/.ansible-lint
+++ b/roles/sap_vm_preconfigure/.ansible-lint
@@ -1,7 +1,16 @@
---
+exclude_paths:
+ - tests/
enable_list:
- yaml
skip_list:
- - ignore-errors # We use ignore_errors for all the assert tasks, which should be acceptable
- - schema # We want to allow single digit version numbers in a role's meta/main.yml file. This is allowed as per https://galaxy.ansible.com/docs/contributing/creating_role.html and https://galaxy.ansible.com/api/v1/platforms/?page=6.
- - name[template] # Allow templating inside name. During dev and qa, it should be possible to identify cases where it doesn't work
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_vm_preconfigure/defaults/main.yml b/roles/sap_vm_preconfigure/defaults/main.yml
index 7835abe27..45de6c6fc 100644
--- a/roles/sap_vm_preconfigure/defaults/main.yml
+++ b/roles/sap_vm_preconfigure/defaults/main.yml
@@ -1,9 +1,9 @@
---
-# For setting Ansible Var sap_vm_platform, unless overriden
+# For setting Ansible Var sap_vm_platform, unless overridden
sap_vm_platform_detect: false
-# For re-use of this Ansible Role to establish which platform, without running any of the preconfigure Ansible Tasks for that platform
+# For reuse of this Ansible Role to establish which platform, without running any of the preconfigure Ansible Tasks for that platform
sap_vm_platform_detect_only: false
# Static definition, required if detection boolean is set to false
diff --git a/roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml b/roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml
index 158adaf4f..4a89b9f84 100644
--- a/roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml
+++ b/roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml
@@ -37,14 +37,14 @@
- name: "Set the grub.cfg location RHEL"
set_fact:
__sap_vm_preconfigure_uefi_boot_dir: /boot/efi/EFI/redhat/grub.cfg
- when:
+ when:
- ansible_distribution == 'RedHat'
- name: "Set the grub.cfg location SLES"
set_fact:
__sap_vm_preconfigure_uefi_boot_dir: /boot/efi/EFI/BOOT/grub.cfg
- when:
- - ansible_distribution == 'SLES' or ansible_distribution == 'SLES_SAP'
+ when:
+ - ansible_distribution == 'SLES' or ansible_distribution == 'SLES_SAP'
- name: "Run grub-mkconfig (UEFI mode)"
command: "grub2-mkconfig -o {{ __sap_vm_preconfigure_uefi_boot_dir }}"
diff --git a/roles/sap_vm_preconfigure/tasks/detect_platform/main.yml b/roles/sap_vm_preconfigure/tasks/detect_platform/main.yml
index 095f22892..ee7161ce1 100644
--- a/roles/sap_vm_preconfigure/tasks/detect_platform/main.yml
+++ b/roles/sap_vm_preconfigure/tasks/detect_platform/main.yml
@@ -41,7 +41,8 @@
# TODO: detection based on multiple facts and providing one standard
# name for use as platform type in related include files
-# cloud_aliyun_ecs_vm, cloud_aws_ec2_vs, cloud_gcp_ce_vm, cloud_ibmcloud_powervs, cloud_ibmcloud_vs, cloud_msazure_vm, hyp_ibmpower_lpar, hyp_redhat_ocp_virt_vm, hyp_redhat_rhel_kvm_vm, hyp_vmware_vsphere_vm
+# cloud_aliyun_ecs_vm, cloud_aws_ec2_vs, cloud_gcp_ce_vm, cloud_ibmcloud_powervs, cloud_ibmcloud_vs, cloud_msazure_vm,
+# hyp_ibmpower_lpar, hyp_redhat_ocp_virt_vm, hyp_redhat_rhel_kvm_vm, hyp_vmware_vsphere_vm
- name: "SAP VM Preconfigure - Check if platform is Amazon Web Services EC2 Virtual Server"
when:
@@ -49,7 +50,7 @@
ansible.builtin.set_fact:
sap_vm_platform: cloud_aws_ec2_vs
-#- name: "SAP VM Preconfigure - Check if platform is Google Cloud Compute Engine Virtual Machine"
+# - name: "SAP VM Preconfigure - Check if platform is Google Cloud Compute Engine Virtual Machine"
# when:
# - ansible_product_name == 'Google Compute Engine'
# ansible.builtin.set_fact:
@@ -61,14 +62,14 @@
ansible.builtin.set_fact:
sap_vm_platform: cloud_ibmcloud_vs
-#- name: "SAP VM Preconfigure - Check if platform is Microsoft Azure Virtual Machine"
+# - name: "SAP VM Preconfigure - Check if platform is Microsoft Azure Virtual Machine"
# when:
# - ansible_chassis_vendor == 'Virtual Machine'
# - ansible_product_name == 'Microsoft Corporation'
# ansible.builtin.set_fact:
# sap_vm_platform: cloud_msazure_vm
-#- name: "SAP VM Preconfigure - Check if platform is VMware vSphere"
+# - name: "SAP VM Preconfigure - Check if platform is VMware vSphere"
# when:
# - ansible_virtualization_type == 'VMware'
# ansible.builtin.set_fact:
@@ -79,53 +80,52 @@
when: sap_vm_platform == cloud_aws_ec2_vs
block:
- - name: (AWS) Get instance metadata token
- ansible.builtin.uri:
- headers:
- X-aws-ec2-metadata-token-ttl-seconds: 21600
- method: PUT
- return_content: true
- url: http://169.254.169.254/latest/api/token
- register: detect_cloud_provider_aws_token
- changed_when: false
- ignore_errors: true
-
- - name: (AWS) Get instance metadata ami-id
- ansible.builtin.uri:
- headers:
- X-aws-ec2-metadata-token: "{{ detect_cloud_provider_aws_token.content }}"
- method: GET
- return_content: true
- url: http://169.254.169.254/latest/meta-data/ami-id
- register: detect_cloud_provider_aws_ami_id
- changed_when: false
- ignore_errors: true
-
- - name: (AWS) Fail if cannot reach Instance Metadata Service
- ansible.builtin.fail:
- msg: Detected MS Azure, but could not confirm with the Instance Metadata Service
- when:
- - detect_cloud_provider_aws_ami_id.failed
+ - name: (AWS) Get instance metadata token
+ ansible.builtin.uri:
+ headers:
+ X-aws-ec2-metadata-token-ttl-seconds: 21600
+ method: PUT
+ return_content: true
+ url: http://169.254.169.254/latest/api/token
+ register: detect_cloud_provider_aws_token
+ changed_when: false
+ ignore_errors: true
+
+ - name: (AWS) Get instance metadata ami-id
+ ansible.builtin.uri:
+ headers:
+ X-aws-ec2-metadata-token: "{{ detect_cloud_provider_aws_token.content }}"
+ method: GET
+ return_content: true
+ url: http://169.254.169.254/latest/meta-data/ami-id
+ register: detect_cloud_provider_aws_ami_id
+ changed_when: false
+ ignore_errors: true
+
+ - name: (AWS) Fail if cannot reach Instance Metadata Service
+ ansible.builtin.fail:
+ msg: Detected MS Azure, but could not confirm with the Instance Metadata Service
+ when:
+ - detect_cloud_provider_aws_ami_id.failed
- name: SAP VM Preconfigure - confirm Microsoft Azure Virtual Machine
when: sap_vm_platform == cloud_msazure_vm
block:
- - name: (Azure) Get instance metadata
- ansible.builtin.uri:
- headers:
- Metadata: true
- method: GET
- url: http://169.254.169.254/metadata/instance/compute?api-version=2021-10-01
- register: detect_cloud_provider_azure_instance_metadata
- changed_when: false
- ignore_errors: true
-
- - name: (Azure) Fail if cannot reach Instance Metadata Service
- ansible.builtin.fail:
- msg: Detected MS Azure, but could not confirm with the Instance Metadata Service
- when:
- - detect_cloud_provider_azure_instance_metadata.json.azEnvironment is not defined
- - detect_cloud_provider_azure_instance_metadata.json.azEnvironment != "AzurePublicCloud"
-
+ - name: (Azure) Get instance metadata
+ ansible.builtin.uri:
+ headers:
+ Metadata: true
+ method: GET
+ url: http://169.254.169.254/metadata/instance/compute?api-version=2021-10-01
+ register: detect_cloud_provider_azure_instance_metadata
+ changed_when: false
+ ignore_errors: true
+
+ - name: (Azure) Fail if cannot reach Instance Metadata Service
+ ansible.builtin.fail:
+ msg: Detected MS Azure, but could not confirm with the Instance Metadata Service
+ when:
+ - detect_cloud_provider_azure_instance_metadata.json.azEnvironment is not defined
+ - detect_cloud_provider_azure_instance_metadata.json.azEnvironment != "AzurePublicCloud"
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml
index b0c443fab..720fa49f7 100644
--- a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml
+++ b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml
@@ -2,11 +2,12 @@
- name: Assert - Check tuned profile
block:
- name: Get tuned profile
- command: tuned-adm active
+ ansible.builtin.command: tuned-adm active
register: __sap_vm_preconfigure_register_tuned_profile_assert
+ changed_when: __sap_vm_preconfigure_register_tuned_profile_assert.rc != 0
- name: Verify tuned profile
- assert:
+ ansible.builtin.assert:
that: "'Current active profile: sap-hana-kvm-host' in __sap_vm_preconfigure_register_tuned_profile_assert.stdout"
fail_msg: "FAIL: tuned profile is not sap-hana-kvm-guest"
success_msg: "PASS: tuned profile is sap-hana-kvm-guest"
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml
index 0c14a0015..26a234fdb 100644
--- a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml
+++ b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml
@@ -2,16 +2,17 @@
# tasks file for sap_vm_preconfigure
- name: Trigger tuned profile sap-hana-kvm-guest activation
- include_tasks: set-tuned-profile.yml
+ ansible.builtin.include_tasks: set-tuned-profile.yml
- name: Set filename prefix to empty string if role is run in normal mode
- set_fact:
+ ansible.builtin.set_fact:
__sap_vm_preconfigure_fact_assert_prefix: ""
when: not sap_vm_preconfigure_assert|d(false)
- name: Prepend filename with assert string if role is run in assert mode
- set_fact:
+ ansible.builtin.set_fact:
__sap_vm_preconfigure_fact_assert_prefix: "assert-"
when: sap_hypervisor_node_preconfigure_assert|d(false)
-- include_tasks: '{{ __sap_vm_preconfigure_fact_assert_prefix }}set-tuned-profile.yml'
+- name: Include '{{ __sap_vm_preconfigure_fact_assert_prefix }}set-tuned-profile.yml'
+ ansible.builtin.include_tasks: '{{ __sap_vm_preconfigure_fact_assert_prefix }}set-tuned-profile.yml'
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml
index 9bbceea27..080f207c3 100644
--- a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml
+++ b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml
@@ -1,12 +1,14 @@
---
- name: Create tuned profile directory /usr/lib/tuned/sap-hana
- file:
+ ansible.builtin.file:
path: /usr/lib/tuned/sap-hana
state: directory
+ mode: "0755"
- name: Create sap-hana tuned profile
- copy:
+ ansible.builtin.copy:
dest: "/usr/lib/tuned/sap-hana/tuned.conf"
+ mode: "0644"
content: |
#
# tuned configuration
@@ -34,14 +36,15 @@
- name: Create tuned profile directory /usr/lib/tuned/sap-hana-kvm-guest
- file:
+ ansible.builtin.file:
path: /usr/lib/tuned/sap-hana-kvm-guest
+ mode: "0755"
state: directory
- name: Add haltpoll.sh for tuned sap-hana-kvm-guest
- copy:
+ ansible.builtin.copy:
dest: "/usr/lib/tuned/sap-hana-kvm-guest/haltpoll.sh"
- mode: 0744
+ mode: "0744"
content: |
#!/bin/bash
@@ -53,8 +56,9 @@
- name: Create sap-hana-kvm-guest tuned profile
- copy:
+ ansible.builtin.copy:
dest: "/usr/lib/tuned/sap-hana-kvm-guest/tuned.conf"
+ mode: "0644"
content: |
#
# tuned configuration
@@ -82,4 +86,6 @@
cmdline_saphana=skew_tick=1
- name: Activate tuned profile
- command: tuned-adm profile sap-hana-kvm-guest
+ ansible.builtin.command: tuned-adm profile sap-hana-kvm-guest
+ register: __sap_provision_vm_register_tuned_sap_hana_kvm_guest_status
+ changed_when: __sap_provision_vm_register_tuned_sap_hana_kvm_guest_status.rc != 0
diff --git a/tools/swpm2_parameters_inifile_generate.py b/tools/swpm2_parameters_inifile_generate.py
index aea54fae2..af8d4a24b 100755
--- a/tools/swpm2_parameters_inifile_generate.py
+++ b/tools/swpm2_parameters_inifile_generate.py
@@ -117,7 +117,7 @@ def control_xml_to_inifile_params(filepath):
#
# All parameters are commented-out, each hash # before the parameter is removed to activate the parameter.
# When running SWPM in Unattended Mode, the activated parameters will create a new SWPM file in the sapinst directory.
- # If any parameter is marked as 'encode', the plaintext value will be coverted to DES hash for this parameter
+ # If any parameter is marked as 'encode', the plaintext value will be converted to DES hash for this parameter
# in the new SWPM file (in the sapinst directory).
#
# An inifile.params is otherwise obtained after running SWPM as GUI or Unattended install,
@@ -151,7 +151,7 @@ def control_xml_to_inifile_params(filepath):
if component_parameter_key_inifile_name is not None:
inifile_output.write("\n# %s" % (component_parameter_contents_doclong_text))
if component_parameter_key_encode == "true":
- inifile_output.write("\n# Encoded parameter. Plaintext values will be coverted to DES hash")
+ inifile_output.write("\n# Encoded parameter. Plaintext values will be converted to DES hash")
inifile_output.write("\n# %s = %s\n" % (component_parameter_key_inifile_name, component_parameter_key_defval))
inifile_output.close()