@@ -562,196 +562,212 @@ Serviceguard for Linux Flex Storage Add-on is a software-based, shared-nothing,
562
562
563
563
Once data replication is configured on the nodes, we can now configure LVM on top of the DRBD disk /dev/drbd0. The following Ansible snippet can be used to configure the LVM volume group named nfsvg and an logical volume names nfsvol of size 45GB
564
564
565
- - - -
566
-
567
- * hosts: sglx-storage-flex-add-on-hosts
565
+ ```
566
+ ---
567
+ - hosts: sglx-storage-flex-add-on-hosts
568
568
tasks:
569
-
570
- * name: Modify lvm configuration
569
+ - name: Modify lvm configuration
571
570
become: True
572
571
lineinfile:
573
572
path: /etc/lvm/lvm.conf
574
573
regexp: "# volume_list"
575
- line: volume_list=\ [ "nfsvg","nfsvg/nfsvol","@tag1 ","@* "]
574
+ line: volume_list=["nfsvg","nfsvg/nfsvol","@tag1","@*"]
576
575
state: present
577
576
backup: True
578
- * name: reject disk in lvm configuration
577
+ - name: reject disk in lvm configuration
579
578
become: True
580
579
lineinfile:
581
580
path: /etc/lvm/lvm.conf
582
581
regexp: ".*/dev/cdrom.*"
583
- line: ' filter = \ [ "r|/dev/sdb|", "a|/dev/drbd0|" ] '
582
+ line: ' filter = [ "r|/dev/sdb|", "a|/dev/drbd0|" ] '
584
583
state: present
585
584
backup: True
586
- * hosts: primary
587
- tasks:
588
585
589
- * name: Create a volume group on /dev/drbd0
586
+ - hosts: primary
587
+ tasks:
588
+ - name: Create a volume group on /dev/drbd0
590
589
become: True
591
590
lvg:
592
591
vg: nfsvg
593
592
pvs: /dev/drbd0
594
- * name: create logical volume for nfs
593
+
594
+ - name: create logical volume for nfs
595
595
become: True
596
596
lvol:
597
597
vg: nfsvg
598
598
lv: nfsvol
599
599
size: 45g
600
600
force: True
601
- * name: Format filesystem
601
+
602
+ - name: Format filesystem
602
603
become: True
603
604
filesystem:
604
605
dev: /dev/nfsvg/nfsvol
605
606
fstype: xfs
606
607
607
- Setting up the NFS server
608
- Now we will start the NFS service and export the NFS share from the primary node using the ansible snippet below.
608
+ ```
609
609
610
- - - -
611
610
612
- * hosts: sglx-storage-flex-add-on-hosts
613
- tasks:
614
611
615
- * name: Install NFS Server and related components
612
+ ## Setting up the NFS server
613
+
614
+
615
+ Now we will start the NFS service and export the NFS share from the primary node using the ansible snippet below.
616
+
617
+ ```
618
+ ---
619
+ - hosts: sglx-storage-flex-add-on-hosts
620
+ tasks:
621
+ - name: Install NFS Server and related components
616
622
become: True
617
623
ansible.builtin.yum:
618
624
name:
619
625
- nfs-utils
620
626
state: present
621
627
ignore_errors: True
622
- * name: Enable NFS related services
628
+
629
+ - name: Enable NFS related services
623
630
become: True
624
631
systemd:
625
632
name: "{{ item }}"
626
633
enabled: True
627
634
with_items:
635
+ - rpcbind
636
+ - nfs-server
628
637
629
- * rpcbind
630
- * nfs-server
631
- * name: Start NFS related services
638
+ - name: Start NFS related services
632
639
become: True
633
640
systemd:
634
641
name: "{{ item }}"
635
642
state: started
636
643
with_items:
644
+ - rpcbind
645
+ - nfs-server
637
646
638
- * rpcbind
639
- * nfs-server
640
- * name: Add /etc/exports entry and create NFS mount point
647
+ - name: Add /etc/exports entry and create NFS mount point
641
648
become: True
642
649
shell: |
643
650
mkdir -p /nfs
644
651
chmod go+rwx /nfs
645
652
echo '/nfs *(rw,sync,no_root_squash)' > /etc/exports
646
653
647
- * hosts: primary
654
+ - hosts: primary
648
655
tasks:
656
+ - name: mount nfs on primary
657
+ become: True
658
+ shell: |
659
+ mount /dev/nfsvg/nfsvol /nfs
660
+ exportfs -a
661
+
662
+ ```
663
+
664
+ ## Creating an SGLX cluster and providing HA to the NFS workload
665
+
666
+
667
+ Once NFS share is configured, we will now look into creating an SGLX cluster and deploy the NFS workload in the SGLX environment to make it highly available. The below snippet will help us achieve the same.
668
+
669
+ ```
670
+ ---
671
+ - hosts: primary
672
+ - name: Build string of primary nodes
673
+ set_fact:
674
+ primary_nodes: "{{ primary_nodes | default ('') + ' -n ' + hostvars[item].ansible_hostname }}"
675
+ with_items:
676
+ - "{{ groups['primary'] }}"
649
677
650
- * name: mount nfs on primary
678
+ - name: Build string of secondary nodes
679
+ set_fact:
680
+ secondary_nodes: "{{ secondary_nodes | default ('') + ' -n ' + hostvars[item].ansible_hostname }}"
681
+ with_items:
682
+ - "{{ groups['secondary'] }}"
683
+
684
+ - name: Build string of quorum nodes
685
+ set_fact:
686
+ quorum_nodes: "{{ quorum_nodes | default ('') + ' -q ' + hostvars[item].ansible_hostname }}"
687
+ with_items:
688
+ - "{{ groups['quorum-server-hosts'] }}"
689
+
690
+ - name: Run cmdeploycl command
691
+ become: True
692
+ ansible.builtin.expect:
693
+ command: "$SGSBIN/cmdeploycl {{ primary_nodes }} {{secondary_nodes }} {{ quorum_nodes }}"
694
+ responses:
695
+ password: "{{ root_pass }}"
696
+ timeout: 300
697
+ - name: Update cluster config
698
+ become: True
699
+ shell: |
700
+ rm -rf /tmp/cluster.txt
701
+ $SGSBIN/cmgetconf > /tmp/cluster.txt
702
+ echo "GENERIC_RESOURCE_NAME CGR_SGeNSS_drbd" >> /tmp/cluster.txt
703
+ echo "GENERIC_RESOURCE_TYPE simple" >> /tmp/cluster.txt
704
+ echo "GENERIC_RESOURCE_CMD $SGSBIN/scripts/sgenss/replication_software/drbd/cluster_generic_resource.sh" >> /tmp/cluster.txt
705
+ echo "GENERIC_RESOURCE_SCOPE node" >> /tmp/cluster.txt
706
+ echo "GENERIC_RESOURCE_RESTART none" >> /tmp/cluster.txt
707
+ echo "GENERIC_RESOURCE_HALT_TIMEOUT 10000000" >> /tmp/cluster.txt
708
+
709
+ - name: Run cmapplyconf command
710
+ become: True
711
+ shell: |
712
+ $SGSBIN/cmapplyconf -v -C /tmp/cluster.txt -f
713
+
714
+ - name: Create a DRBD and NFS package
651
715
become: True
652
716
shell: |
653
- mount /dev/nfsvg/nfsvol /nfs
654
- exportfs -a
717
+ rm -rf /tmp/nfs_drbd.conf
718
+ $SGSBIN/cmmakepkg -m sgenss/rf_drbd -m tkit/nfs/nfs /tmp/nfs_drbd.conf
655
719
656
- Creating an SGLX cluster and providing HA to the NFS workload
657
- Once NFS share is configured, we will now look into creating an SGLX cluster and deploy the NFS workload in the SGLX environment to make it highly available. The below snippet will help us achieve the same.
720
+ - name: update the drbd resource name
721
+ become: True
722
+ replace:
723
+ path: /tmp/nfs_drbd.conf
724
+ regexp: "{{ item.regexp }}"
725
+ replace: "{{ item.rep }}"
726
+ with_items:
727
+ - { regexp: 'res0', rep: 'drbd0'}
728
+
729
+ - name: Make change to package configuration
730
+ become: True
731
+ lineinfile:
732
+ path: /tmp/nfs_drbd.conf
733
+ regexp: "{{ item.regexp }}"
734
+ line: "{{ item.line }}"
735
+ state: present
736
+ with_items:
737
+ - { regexp: '^package_name', line: 'package_name nfs_drbd'}
738
+ - { regexp: '^#vg', line: 'vg nfsvg'}
739
+ - { regexp: '^tkit/nfs/nfs/XFS', line: 'tkit/nfs/nfs/XFS "-o rw,sync,no_root_squash *:/nfs"'}
740
+ - { regexp: '^tkit/nfs/nfs/QUOTA_MON', line: 'tkit/nfs/nfs/QUOTA_MON no'}
658
741
659
- - - -
660
-
661
- * hosts: primary
662
- * name: Build string of primary nodes
663
- set_fact:
664
- primary_nodes: "{{ primary_nodes | default ('') + ' -n ' + hostvars\[ item] .ansible_hostname }}"
665
- with_items:
666
-
667
- * "{{ groups\[ 'primary'] }}"
668
- * name: Build string of secondary nodes
669
- set_fact:
670
- secondary_nodes: "{{ secondary_nodes | default ('') + ' -n ' + hostvars\[ item] .ansible_hostname }}"
671
- with_items:
672
-
673
- * "{{ groups\[ 'secondary'] }}"
674
- * name: Build string of quorum nodes
675
- set_fact:
676
- quorum_nodes: "{{ quorum_nodes | default ('') + ' -q ' + hostvars\[ item] .ansible_hostname }}"
677
- with_items:
678
-
679
- * "{{ groups\[ 'quorum-server-hosts'] }}"
680
- * name: Run cmdeploycl command
681
- become: True
682
- ansible.builtin.expect:
683
- command: "$SGSBIN/cmdeploycl {{ primary_nodes }} {{secondary_nodes }} {{ quorum_nodes }}"
684
- responses:
685
- password: "{{ root_pass }}"
686
- timeout: 300
687
- * name: Update cluster config
688
- become: True
689
- shell: |
690
- rm -rf /tmp/cluster.txt
691
- $SGSBIN/cmgetconf > /tmp/cluster.txt
692
- echo "GENERIC_RESOURCE_NAME CGR_SGeNSS_drbd" >> /tmp/cluster.txt
693
- echo "GENERIC_RESOURCE_TYPE simple" >> /tmp/cluster.txt
694
- echo "GENERIC_RESOURCE_CMD $SGSBIN/scripts/sgenss/replication_software/drbd/cluster_generic_resource.sh" >> /tmp/cluster.txt
695
- echo "GENERIC_RESOURCE_SCOPE node" >> /tmp/cluster.txt
696
- echo "GENERIC_RESOURCE_RESTART none" >> /tmp/cluster.txt
697
- echo "GENERIC_RESOURCE_HALT_TIMEOUT 10000000" >> /tmp/cluster.txt
698
-
699
- * name: Run cmapplyconf command
700
- become: True
701
- shell: |
702
- $SGSBIN/cmapplyconf -v -C /tmp/cluster.txt -f
703
-
704
- * name: Create a DRBD and NFS package
705
- become: True
706
- shell: |
707
- rm -rf /tmp/nfs_drbd.conf
708
- $SGSBIN/cmmakepkg -m sgenss/rf_drbd -m tkit/nfs/nfs /tmp/nfs_drbd.conf
709
- * name: update the drbd resource name
710
- become: True
711
- replace:
712
- path: /tmp/nfs_drbd.conf
713
- regexp: "{{ item.regexp }}"
714
- replace: "{{ item.rep }}"
715
- with_items:
716
-
717
- * { regexp: 'res0', rep: 'drbd0'}
718
- * name: Make change to package configuration
719
- become: True
720
- lineinfile:
721
- path: /tmp/nfs_drbd.conf
722
- regexp: "{{ item.regexp }}"
723
- line: "{{ item.line }}"
724
- state: present
725
- with_items:
726
-
727
- * { regexp: '^package_name', line: 'package_name nfs_drbd'}
728
- * { regexp: '^#vg', line: 'vg nfsvg'}
729
- * { regexp: '^tkit/nfs/nfs/XFS', line: 'tkit/nfs/nfs/XFS "-o rw,sync,no_root_squash * :/nfs"'}
730
- * { regexp: '^tkit/nfs/nfs/QUOTA_MON', line: 'tkit/nfs/nfs/QUOTA_MON no'}
731
- * name: Add additional NFS configuration
732
- become: True
733
- lineinfile:
734
- path: /tmp/nfs_drbd.conf
735
- insertafter: EOF
736
- line: |
737
- fs_name /dev/nfsvg/nfsvol
738
- fs_directory /nfs
739
- fs_type "xfs"
740
- fs_mount_opt "-o rw"
741
- ip_subnet 10.10.180.0
742
- ip_address 10.10.180.99
743
- * name: check the package and apply it
744
- become: True
745
- shell: |
746
- $SGSBIN/cmcheckconf -P /tmp/nfs_drbd.conf
747
- $SGSBIN /cmapplyconf -P /tmp/nfs_drbd.conf -f
748
- * name: enable the package
749
- become: True
750
- shell: |
751
- $SGSBIN /cmmodpkg -e nfs_drbd
742
+ - name: Add additional NFS configuration
743
+ become: True
744
+ lineinfile:
745
+ path: /tmp/nfs_drbd.conf
746
+ insertafter: EOF
747
+ line: |
748
+ fs_name /dev/nfsvg/nfsvol
749
+ fs_directory /nfs
750
+ fs_type "xfs"
751
+ fs_mount_opt "-o rw"
752
+ ip_subnet 10.10.180.0
753
+ ip_address 10.10.180.99
754
+
755
+ - name: check the package and apply it
756
+ become: True
757
+ shell: |
758
+ $SGSBIN/cmcheckconf -P /tmp/nfs_drbd.conf
759
+ $SGSBIN /cmapplyconf -P /tmp/nfs_drbd.conf -f
760
+
761
+ - name: enable the package
762
+ become: True
763
+ shell: |
764
+ $SGSBIN /cmmodpkg -e nfs_drbd
765
+
766
+
767
+ ```
752
768
753
769
Now we have the NFS server deployed in Serviceguard cluster with high availability.
754
770
755
- Conclusion
771
+ # Conclusion
756
772
757
773
In this blog we looked at how we could use platforms like Terraform and Ansible to easily provision and deploy a highly available NFS server solution with Serviceguard for Linux on an HPE GreenLake for Private Cloud Enterprise environment.
0 commit comments