@@ -961,13 +961,13 @@ sudo crm configure property maintenance-mode=true
961
961
962
962
# Replace <placeholders> with your instance number and HANA system ID
963
963
964
- sudo crm configure primitive rsc_SAPHanaTopology_<SID>_HDB<InstanceNumber > ocf:suse:SAPHanaTopology \
964
+ sudo crm configure primitive rsc_SAPHanaTopology_<SID>_HDB<InstNum > ocf:suse:SAPHanaTopology \
965
965
op monitor interval="50" timeout="600" \
966
966
op start interval="0" timeout="600" \
967
967
op stop interval="0" timeout="300" \
968
- params SID="<SID>" InstanceNumber="<InstanceNumber >"
968
+ params SID="<SID>" InstanceNumber="<InstNum >"
969
969
970
- sudo crm configure clone cln_SAPHanaTopology_<SID>_HDB<InstanceNumber > rsc_SAPHanaTopology_<SID>_HDB<InstanceNumber > \
970
+ sudo crm configure clone cln_SAPHanaTopology_<SID>_HDB<InstNum > rsc_SAPHanaTopology_<SID>_HDB<InstNum > \
971
971
meta clone-node-max="1" interleave="true"
972
972
` ` `
973
973
@@ -978,13 +978,13 @@ sudo crm configure property maintenance-mode=true
978
978
979
979
# Replace <placeholders> with your instance number and HANA system ID
980
980
981
- sudo crm configure primitive rsc_SAPHanaTopology_<SID>_HDB<InstanceNumber > ocf:suse:SAPHanaTopology \
981
+ sudo crm configure primitive rsc_SAPHanaTopology_<SID>_HDB<InstNum > ocf:suse:SAPHanaTopology \
982
982
op monitor interval="10" timeout="600" \
983
983
op start interval="0" timeout="600" \
984
984
op stop interval="0" timeout="300" \
985
- params SID="<SID>" InstanceNumber="<InstanceNumber >"
985
+ params SID="<SID>" InstanceNumber="<InstNum >"
986
986
987
- sudo crm configure clone cln_SAPHanaTopology_<SID>_HDB<InstanceNumber > rsc_SAPHanaTopology_<SID>_HDB<InstanceNumber > \
987
+ sudo crm configure clone cln_SAPHanaTopology_<SID>_HDB<InstNum > rsc_SAPHanaTopology_<SID>_HDB<InstNum > \
988
988
meta clone-node-max="1" target-role="Started" interleave="true"
989
989
` ` `
990
990
@@ -997,18 +997,18 @@ sudo crm configure clone cln_SAPHanaTopology_<SID>_HDB<InstanceNumber> rsc_SAPHa
997
997
` ` ` bash
998
998
# Replace <placeholders> with your instance number and HANA system ID
999
999
1000
- sudo crm configure primitive rsc_SAPHanaController_<SID>_HDB<InstanceNumber > ocf:suse:SAPHanaController \
1000
+ sudo crm configure primitive rsc_SAPHanaController_<SID>_HDB<InstNum > ocf:suse:SAPHanaController \
1001
1001
op start interval="0" timeout="3600" \
1002
1002
op stop interval="0" timeout="3600" \
1003
1003
op promote interval="0" timeout="900" \
1004
1004
op demote interval="0" timeout="320" \
1005
1005
op monitor interval="60" role="Promoted" timeout="700" \
1006
1006
op monitor interval="61" role="Unpromoted" timeout="700" \
1007
- params SID="<SID>" InstanceNumber="<InstanceNumber >" PREFER_SITE_TAKEOVER="true" \
1007
+ params SID="<SID>" InstanceNumber="<InstNum >" PREFER_SITE_TAKEOVER="true" \
1008
1008
DUPLICATE_PRIMARY_TIMEOUT="7200" AUTOMATED_REGISTER="false" \
1009
1009
HANA_CALL_TIMEOUT="120"
1010
1010
1011
- sudo crm configure clone msl_SAPHanaController_<SID>_HDB<InstanceNumber > rsc_SAPHanaController_<SID>_HDB<InstanceNumber > \
1011
+ sudo crm configure clone msl_SAPHanaController_<SID>_HDB<InstNum > rsc_SAPHanaController_<SID>_HDB<InstNum > \
1012
1012
meta clone-node-max="1" interleave="true" promotable="true"
1013
1013
` ` `
1014
1014
@@ -1020,18 +1020,19 @@ sudo crm configure clone msl_SAPHanaController_<SID>_HDB<InstanceNumber> rsc_SAP
1020
1020
```bash
1021
1021
# Replace <placeholders> with your instance number and HANA system ID
1022
1022
1023
- sudo crm configure primitive rsc_SAPHana_<SID>_HDB<InstanceNumber > ocf:suse:SAPHanaController \
1023
+ sudo crm configure primitive rsc_SAPHana_<SID>_HDB<InstNum > ocf:suse:SAPHanaController \
1024
1024
op start interval="0" timeout="3600" \
1025
1025
op stop interval="0" timeout="3600" \
1026
1026
op promote interval="0" timeout="3600" \
1027
1027
op monitor interval="60" role="Master" timeout="700" \
1028
1028
op monitor interval="61" role="Slave" timeout="700" \
1029
- params SID="<SID>> " InstanceNumber="<InstanceNumber >" PREFER_SITE_TAKEOVER="true" \
1029
+ params SID="<SID>" InstanceNumber="<InstNum >" PREFER_SITE_TAKEOVER="true" \
1030
1030
DUPLICATE_PRIMARY_TIMEOUT="7200" AUTOMATED_REGISTER="false"
1031
1031
1032
- sudo crm configure ms msl_SAPHana_<SID>_HDB<InstanceNumber > rsc_SAPHana_<SID>_HDB<InstanceNumber > \
1032
+ sudo crm configure ms msl_SAPHana_<SID>_HDB<InstNum > rsc_SAPHana_<SID>_HDB<InstNum > \
1033
1033
meta clone-node-max="1" master-max="1" interleave="true"
1034
1034
```
1035
+
1035
1036
---
1036
1037
1037
1038
> [!IMPORTANT]
@@ -1050,12 +1051,13 @@ sudo crm configure primitive rsc_SAPHanaFilesystem_SA5_HDB10 ocf:suse:SAPHanaFil
1050
1051
op start interval="0" timeout="10" \
1051
1052
op stop interval="0" timeout="20" \
1052
1053
op monitor interval="120" timeout="120" \
1053
- params SID="<SID>" InstanceNumber="<InstanceNumber >" ON_FAIL_ACTION="fence"
1054
+ params SID="<SID>" InstanceNumber="<InstNum >" ON_FAIL_ACTION="fence"
1054
1055
1055
- sudo crm configure clone cln_SAPHanaFilesystem_<SID>_HDB<InstanceNumber > rsc_SAPHanaFilesystem_<SID>_HDB<InstanceNumber > \
1056
+ sudo crm configure clone cln_SAPHanaFilesystem_<SID>_HDB<InstNum > rsc_SAPHanaFilesystem_<SID>_HDB<InstNum > \
1056
1057
meta clone-node-max="1" interleave="true"
1057
1058
1058
- sudo crm configure location SAPHanaFilesystem_not_on_majority_maker cln_SAPHanaFilesystem_<SID>>_HDB<InstanceNumber> -inf: hana-s-mm
1059
+ # Add a location constraint to not run filesystem check on majority maker VM
1060
+ sudo crm configure location loc_SAPHanaFilesystem_not_on_majority_maker cln_SAPHanaFilesystem_<SID>_HDB<InstNum> -inf: hana-s-mm
1059
1061
` ` `
1060
1062
1061
1063
# ## [SAPHanaSR-ScaleOut](#tab/saphanasr-scaleout)
@@ -1079,19 +1081,19 @@ Create a dummy file system cluster resource, which will monitor and report failu
1079
1081
` ` ` bash
1080
1082
# Replace <placeholders> with your instance number and HANA system ID
1081
1083
1082
- crm configure primitive fs_HN1_HDB03_fscheck Filesystem \
1083
- params device=" /hana/shared/HN1 /check" \
1084
+ crm configure primitive fs_ < SID > _HDB < InstNum > _fscheck Filesystem \
1085
+ params device=" /hana/shared/<SID> /check" \
1084
1086
directory=" /hana/check" fstype=nfs4 \
1085
1087
options=" bind,defaults,rw,hard,proto=tcp,noatime,nfsvers=4.1,lock" \
1086
1088
op monitor interval=120 timeout=120 on-fail=fence \
1087
1089
op_params OCF_CHECK_LEVEL=20 \
1088
1090
op start interval=0 timeout=120 op stop interval=0 timeout=120
1089
1091
1090
- crm configure clone cln_fs_HN1_HDB03_fscheck fs_HN1_HDB03_fscheck \
1092
+ crm configure clone cln_fs_ < SID > _HDB < InstNum > _fscheck fs_ < SID > _HDB < InstNum > _fscheck \
1091
1093
meta clone-node-max=1 interleave=true
1092
-
1093
- crm configure location loc_cln_fs_HN1_HDB03_fscheck_not_on_mm \
1094
- cln_fs_HN1_HDB03_fscheck -inf: hana-s-mm
1094
+ # Add a location constraint to not run filesystem check on majority maker VM
1095
+ crm configure location loc_cln_fs_ < SID > _HDB < InstNum > _fscheck_not_on_mm \
1096
+ cln_fs_ < SID > _HDB < InstNum > _fscheck -inf: hana-s-mm
1095
1097
` ` `
1096
1098
1097
1099
` OCF_CHECK_LEVEL=20` attribute is added to the monitor operation, so that monitor operations perform a read/write test on the file system. Without this attribute, the monitor operation only verifies that the file system is mounted. This can be a problem because when connectivity is lost, the file system may remain mounted, despite being inaccessible.
@@ -1100,81 +1102,112 @@ Create a dummy file system cluster resource, which will monitor and report failu
1100
1102
1101
1103
---
1102
1104
1103
- 4. ** [1]** Continue with cluster resources for virtual IPs, defaults, and constraints.
1105
+ 4. ** [1]** Continue with cluster resources for virtual IPs and constraints.
1104
1106
1105
- ### [SAPHanaSR-angi](#tab/saphanasr-angi)
1107
+ ### [SAPHanaSR-angi](#tab/saphanasr-angi)
1106
1108
1107
- text 123
1109
+ ` ` ` bash
1110
+ # Replace <placeholders> with your instance number and HANA system ID, and respective IP address and load balancer port
1108
1111
1109
- # ## [SAPHanaSR-ScaleOut](#tab/saphanasr-scaleout)
1110
-
1111
- ` ` ` bash
1112
- sudo crm configure primitive rsc_ip_HN1_HDB03 ocf:heartbeat:IPaddr2 \
1113
- op start timeout=60s on-fail=fence \
1114
- op monitor interval=" 10s" timeout=" 20s" \
1115
- params ip=" 10.23.0.27"
1116
-
1117
- sudo crm configure primitive rsc_nc_HN1_HDB03 azure-lb port=62503 \
1118
- op monitor timeout=20s interval=10 \
1119
- meta resource-stickiness=0
1120
-
1121
- sudo crm configure group g_ip_HN1_HDB03 rsc_ip_HN1_HDB03 rsc_nc_HN1_HDB03
1122
- ` ` `
1112
+ sudo crm configure primitive rsc_ip_< SID> _HDB< InstNum> ocf:heartbeat:IPaddr2 \
1113
+ op start timeout=60s on-fail=fence \
1114
+ op monitor interval=" 10s" timeout=" 20s" \
1115
+ params ip=" 10.23.0.27"
1116
+
1117
+ sudo crm configure primitive rsc_nc_< SID> _HDB< InstNum> azure-lb port=62503 \
1118
+ op monitor timeout=20s interval=10 \
1119
+ meta resource-stickiness=0
1120
+
1121
+ sudo crm configure group g_ip_< SID> _HDB< InstNum> rsc_ip_< SID> _HDB< InstNum> rsc_nc_< SID> _HDB< InstNum>
1122
+ ` ` `
1123
1123
1124
- Create the cluster constraints
1124
+ Create the cluster constraints
1125
1125
1126
- ` ` ` bash
1127
- # Replace <placeholders> with your instance number and HANA system ID
1126
+ ` ` ` bash
1127
+ # Colocate the IP with primary HANA node
1128
+ sudo crm configure colocation col_saphana_ip_< SID> _HDB< InstNum> 4000: g_ip_< SID> _HDB< InstNum> :Started \
1129
+ msl_SAPHanaController_< SID> _HDB< InstNum> :Promoted
1130
+
1131
+ # Start HANA Topology before HANA instance
1132
+ sudo crm configure order ord_SAPHana_< SID> _HDB< InstNum> Optional: cln_SAPHanaTopology_< SID> _HDB< InstNum> \
1133
+ msl_SAPHanaController_< SID> _HDB< InstNum>
1134
+
1135
+ # HANA resources don't run on the majority maker node
1136
+ sudo crm configure location loc_SAPHanaController_not_on_majority_maker msl_SAPHanaController_< SID> _HDB< InstNum> -inf: hana-s-mm
1137
+ sudo crm configure location loc_SAPHanaTopology_not_on_majority_maker cln_SAPHanaTopology_< SID> _HDB< InstNum> -inf: hana-s-mm
1138
+ ` ` `
1128
1139
1129
- # Colocate the IP with HANA master
1130
- sudo crm configure colocation col_saphana_ip_HN1_HDB03 4000: g_ip_HN1_HDB03:Started \
1131
- msl_SAPHana_HN1_HDB03:Master
1132
-
1133
- # Start HANA Topology before HANA instance
1134
- sudo crm configure order ord_SAPHana_HN1_HDB03 Optional: cln_SAPHanaTopology_HN1_HDB03 \
1135
- msl_SAPHana_HN1_HDB03
1136
-
1137
- # HANA resources don't run on the majority maker node
1138
- sudo crm configure location loc_SAPHanaCon_not_on_majority_maker msl_SAPHana_HN1_HDB03 -inf: hana-s-mm
1139
- sudo crm configure location loc_SAPHanaTop_not_on_majority_maker cln_SAPHanaTopology_HN1_HDB03 -inf: hana-s-mm
1140
- ` ` `
1140
+ # ## [SAPHanaSR-ScaleOut](#tab/saphanasr-scaleout)
1141
+
1142
+ ` ` ` bash
1143
+ # Replace <placeholders> with your instance number and HANA system ID, and respective IP address and load balancer port
1144
+
1145
+ sudo crm configure primitive rsc_ip_< SID> _HDB< InstNum> ocf:heartbeat:IPaddr2 \
1146
+ op start timeout=60s on-fail=fence \
1147
+ op monitor interval=" 10s" timeout=" 20s" \
1148
+ params ip=" 10.23.0.27"
1149
+
1150
+ sudo crm configure primitive rsc_nc_< SID> _HDB< InstNum> azure-lb port=62503 \
1151
+ op monitor timeout=20s interval=10 \
1152
+ meta resource-stickiness=0
1153
+
1154
+ sudo crm configure group g_ip_< SID> _HDB< InstNum> rsc_ip_< SID> _HDB< InstNum> rsc_nc_< SID> _HDB< InstNum>
1155
+ ` ` `
1156
+
1157
+ Create the cluster constraints
1158
+
1159
+ ` ` ` bash
1160
+ # Replace <placeholders> with your instance number and HANA system ID
1161
+
1162
+ # Colocate the IP with primary HANA node
1163
+ sudo crm configure colocation col_saphana_ip_< SID> _HDB< InstNum> 4000: g_ip_< SID> _HDB< InstNum> :Started \
1164
+ msl_SAPHana_< SID> _HDB< InstNum> :Master
1165
+
1166
+ # Start HANA Topology before HANA instance
1167
+ sudo crm configure order ord_SAPHana_< SID> _HDB< InstNum> Optional: cln_SAPHanaTopology_< SID> _HDB< InstNum> \
1168
+ msl_SAPHana_< SID> _HDB< InstNum>
1169
+
1170
+ # HANA resources don't run on the majority maker node
1171
+ sudo crm configure location loc_SAPHanaCon_not_on_majority_maker msl_SAPHana_< SID> _HDB< InstNum> -inf: hana-s-mm
1172
+ sudo crm configure location loc_SAPHanaTop_not_on_majority_maker cln_SAPHanaTopology_< SID> _HDB< InstNum> -inf: hana-s-mm
1173
+ ` ` `
1141
1174
1142
1175
---
1143
1176
1144
1177
5. ** [1]** Configure additional cluster properties
1145
1178
1146
- ` ` ` bash
1147
- sudo crm configure rsc_defaults resource-stickiness=1000
1148
- sudo crm configure rsc_defaults migration-threshold=50
1149
- ` ` `
1179
+ ` ` ` bash
1180
+ sudo crm configure rsc_defaults resource-stickiness=1000
1181
+ sudo crm configure rsc_defaults migration-threshold=50
1182
+ ` ` `
1150
1183
1151
1184
6. ** [1]** Place the cluster out of maintenance mode. Make sure that the cluster status is ok and that all of the resources are started.
1152
1185
1153
- ` ` ` bash
1154
- # Cleanup any failed resources - the following command is example
1155
- crm resource cleanup rsc_SAPHana_HN1_HDB03
1156
-
1157
- # Place the cluster out of maintenance mode
1158
- sudo crm configure property maintenance-mode=false
1159
- ` ` `
1186
+ ` ` ` bash
1187
+ # Cleanup any failed resources - the following command is example
1188
+ sudo crm resource cleanup rsc_SAPHana_HN1_HDB03
1189
+
1190
+ # Place the cluster out of maintenance mode
1191
+ sudo crm configure property maintenance-mode=false
1192
+ ` ` `
1160
1193
1161
1194
7. ** [1]** Verify the communication between the HANA HA hook and the cluster, showing status SOK for SID and both replication sites with status P(rimary) or S(econdary).
1162
1195
1163
- ` ` ` bash
1164
- sudo /usr/sbin/SAPHanaSR-showAttr
1165
- # Expected result
1166
- # Global cib-time maintenance prim sec sync_state upd
1167
- # ---------------------------------------------------------------------
1168
- # HN1 Fri Jan 27 10:38:46 2023 false HANA_S1 - SOK ok
1169
- #
1170
- # Sites lpt lss mns srHook srr
1171
- # -----------------------------------------------
1172
- # HANA_S1 1674815869 4 hana-s1-db1 PRIM P
1173
- # HANA_S2 30 4 hana-s2-db1 SWAIT S
1174
- ` ` `
1175
-
1176
- > [! NOTE]
1177
- > The timeouts in the above configuration are just examples and may need to be adapted to the specific HANA setup. For instance, you may need to increase the start timeout, if it takes longer to start the SAP HANA database.
1196
+ ` ` ` bash
1197
+ sudo /usr/sbin/SAPHanaSR-showAttr
1198
+ # Expected result
1199
+ # Global cib-time maintenance prim sec sync_state upd
1200
+ # ---------------------------------------------------------------------
1201
+ # HN1 Fri Jan 27 10:38:46 2023 false HANA_S1 - SOK ok
1202
+ #
1203
+ # Sites lpt lss mns srHook srr
1204
+ # -----------------------------------------------
1205
+ # HANA_S1 1674815869 4 hana-s1-db1 PRIM P
1206
+ # HANA_S2 30 4 hana-s2-db1 SWAIT S
1207
+ ` ` `
1208
+
1209
+ > [! NOTE]
1210
+ > The timeouts in the above configuration are just examples and may need to be adapted to the specific HANA setup. For instance, you may need to increase the start timeout, if it takes longer to start the SAP HANA database.
1178
1211
1179
1212
1180
1213
@@ -1352,7 +1385,7 @@ Create a dummy file system cluster resource, which will monitor and report failu
1352
1385
# site name: HANA_S1
1353
1386
` ` `
1354
1387
1355
- 2. We recommend to thoroughly validate the SAP HANA cluster configuration, by performing the tests, documented in [HA for SAP HANA on Azure VMs on SLES](./sap-hana-high-availability.md#test-the-cluster-setup) and in [SLES Replication scale-out Performance Optimized Scenario](https://documentation.suse.com/sbp/all /html/SLES4SAP-hana-scaleOut-PerfOpt-12 /index.html#_testing_the_cluster ).
1388
+ 2. We recommend to thoroughly validate the SAP HANA cluster configuration, by performing the tests, documented in [HA for SAP HANA on Azure VMs on SLES](./sap-hana-high-availability.md#test-the-cluster-setup) and in [SLES Replication scale-out Performance Optimized Scenario](https://documentation.suse.com/sbp/sap-15 /html/SLES4SAP-hana-angi-scaleout-perfopt-15 /index.html#id-testing-the-cluster ).
1356
1389
1357
1390
3. Verify the cluster configuration for a failure scenario, when a node loses access to the NFS share (` /hana/shared` ).
1358
1391
0 commit comments