diff --git a/playbooks/PBR/Create_mTLs.yml b/playbooks/PBR/Create_mTLs.yml new file mode 100644 index 0000000..afc3563 --- /dev/null +++ b/playbooks/PBR/Create_mTLs.yml @@ -0,0 +1,41 @@ +- name: Generate certificate + ibm_svctask_command: + command: "svctask chsystemcert -mkselfsigned" + clustername: "{{item.cluster_ip}}" + username: "{{item.cluster_username}}" + password: "{{item.cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + loop: "{{users_data}}" + +- name: Export SSL certificate internally + ibm_sv_manage_ssl_certificate: + clustername: "{{item.cluster_ip}}" + username: "{{item.cluster_username}}" + password: "{{item.cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + certificate_type: "system" + loop: "{{users_data}}" + +- name: Create truststore on primary + ibm_sv_manage_truststore_for_replication: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + name: trust + remote_clustername: "{{users_data[1].cluster_ip}}" + remote_username: "{{users_data[1].cluster_username}}" + remote_password: "{{users_data[1].cluster_password}}" + state: "present" + +- name: Create truststore on secondary + ibm_sv_manage_truststore_for_replication: + clustername: "{{users_data[1].cluster_ip}}" + username: "{{users_data[1].cluster_username}}" + password: "{{users_data[1].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: trust + remote_clustername: "{{users_data[0].cluster_ip}}" + remote_username: "{{users_data[0].cluster_username}}" + remote_password: "{{users_data[0].cluster_password}}" + state: "present" diff --git a/playbooks/PBR/Create_mdiskgrp_drp_proviPolicy.yml b/playbooks/PBR/Create_mdiskgrp_drp_proviPolicy.yml new file mode 100644 index 0000000..48dfb56 --- /dev/null +++ b/playbooks/PBR/Create_mdiskgrp_drp_proviPolicy.yml @@ -0,0 +1,156 @@ +- name: create mdiskgrp on both clusters + ibm_svc_mdiskgrp: + clustername: "{{item.cluster_ip}}" + username: "{{item.cluster_username}}" + password: "{{item.cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + name: mdg0 + state: present + datareduction: yes + ext: 1024 + loop: "{{users_data}}" + +- name: Get drive info + register: results + ibm_svcinfo_command: + command: "svcinfo lsdrive" + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + +- name: set drive id + set_fact: + drive_id: "{{item['id']}}" + loop: "{{(results['stdout'])}}" + +- name: set drive status + set_fact: + drive_status: "{{item['use']}}" + loop: "{{(results['stdout'])}}" + +- name: Set drive count + set_fact: + TotalDrive: "{{drive_id|int + 1|int}}" + +- name: set level + set_fact: + Level: + +- name: Decide Level + set_fact: + Level: raid1 + when: (TotalDrive|int <= 3) + +- name: Decide Level + set_fact: + Level: raid6 + when: (TotalDrive|int > 3) + +- name: Create a List of variable + set_fact: + list1: [] + +- name: set variable + set_fact: + member: member + +- name: Make drive in candidate state + ibm_svctask_command: + command: [ "svctask chdrive -use candidate {{item}}" ] + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + with_sequence: start=0 end="{{drive_id}}" + when: drive_status != member + +- name: create distribute array on primary + ibm_svc_mdisk: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: mdisk0 + state: present + level: "{{Level}}" + drivecount: "{{TotalDrive|int}}" + driveclass: 0 + encrypt: no + mdiskgrp: mdg0 + +- name: Get drive info + register: results + ibm_svcinfo_command: + command: "svcinfo lsdrive" + clustername: "{{users_data[1].cluster_ip}}" + username: "{{users_data[1].cluster_username}}" + password: "{{users_data[1].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + +- name: set drive id + set_fact: + drive_id: "{{item['id']}}" + loop: "{{(results['stdout'])}}" + +- name: set drive status + set_fact: + drive_status1: "{{item['use']}}" + loop: "{{(results['stdout'])}}" + +- name: Drive count + set_fact: + TotalDrive2: "{{drive_id|int + 1|int}}" + +- name: set level + set_fact: + Level2: + +- name: Decide Level + set_fact: + Level2: raid1 + when: (TotalDrive2|int <= 3 ) + +- name: Decide Level + set_fact: + Level2: raid6 + when: (TotalDrive2|int > 3 ) + +- name: set variable as a member + set_fact: + member: member + +- name: Make drive in candidate state + ibm_svctask_command: + command: [ "svctask chdrive -use candidate {{item}}" ] + clustername: "{{users_data[1].cluster_ip}}" + username: "{{users_data[1].cluster_username}}" + password: "{{users_data[1].cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + with_sequence: start=0 end="{{drive_id}}" + when: drive_status1 != member + +- name: create distribute array on secondary + ibm_svc_mdisk: + clustername: "{{users_data[1].cluster_ip}}" + username: "{{users_data[1].cluster_username}}" + password: "{{users_data[1].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: mdisk0 + state: present + level: "{{Level2}}" + drivecount: "{{TotalDrive2|int}}" + driveclass: 0 + encrypt: no + mdiskgrp: mdg0 + +- name: Create provisioning policy on both the clusters + ibm_sv_manage_provisioning_policy: + clustername: "{{item.cluster_ip}}" + username: "{{item.cluster_username}}" + password: "{{item.cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: provisioning_policy0 + capacitysaving: "drivebased" + state: present + loop: "{{users_data}}" diff --git a/playbooks/PBR/PBR_variable.yml b/playbooks/PBR/PBR_variable.yml new file mode 100644 index 0000000..e9de47a --- /dev/null +++ b/playbooks/PBR/PBR_variable.yml @@ -0,0 +1,17 @@ +- users_data: + - cluster_name: < primary_cluster_name> + cluster_ip: + cluster_username: + cluster_password: + + - cluster_name: + cluster_ip: + cluster_username: + cluster_password: + +- host_name: +- volume_size: +- volume_prefix: +- volume_group_name: +- number_of_volumes: +- log_path: diff --git a/playbooks/PBR/README.txt b/playbooks/PBR/README.txt new file mode 100644 index 0000000..dcf92b3 --- /dev/null +++ b/playbooks/PBR/README.txt @@ -0,0 +1,32 @@ +Objective: +Set up mTLS and configure Policy Based Replication. + +Prerequisite: +- IBM storage Virtualize ansible collection plugins must be installed. + +These playbook set up mTLS and configure Policy Based Replication between a primary cluster and the secondary cluster. + - It uses storage virtualize ansible modules. + - This playbook is designed to set up mTLS on both the site and configure Policy Based Replication between source cluster to destination cluster. This is designed in a way that it creates Data Reduction Pool , links them, creates provision policy and replication policy + - These playbooks also creates multiple Volumes with specified prefix along with volume group and maps all of them to the specified host. + +There are total 4 files used for this use-case. + 1. mainplaybook.yml: + This is main playbook file user needs to execute only this playbook it leaverages rest of the 3 files. + It executes 2 playbooks like 'Create_mTLS.yml' and 'Create_mdiskgrp_drp_proviPolicy.yml' and later on this the playbook creates volume group and associated volumes with volume_prefix name specified in inventroy file ‘PBR_variable.txt’. It also maps all the volumes to specified host. + After first execution of this playbook for next execution we can add volumes on existing/new volume group with existing replication policy and provision policy. It mapped this newly added volumes to the existing host object. + + 2. PBR_inventory.yml: + This file has all the variables required for playbooks. + - user_data : Parameters contain primary cluster details from where user wants to replicate data as well as secondary cluster details to where volume will be replicated to. + - host_name : It is the host name to which all the volumes should be mapped after creation. It assumes host is already created on primary clusters. + - volume* : Parameters starting volume contain details for volume such as name prefix for volume and size for the volumes to be created.It also has a volume group name + - number_of_volumes : It is the number of volumes to be created between clusters. + - log_path : It specifies the log path of playbook. If not specified then logs will generate at default path ‘/tmp/ansiblePB.debug’ + + 3. Create_mTLS.yml: + This playbook sets mTLS (Mutual Transport Layer Security) which includes ceritficate generation on individual cluster, export it to remote location, creates certificate truststore which contains the certificate bundle. This operation performed on primary as well as secondary site. This playbook is called under 'Create_PBR_config.yml'. + + 4. Create_mdiskgrp_drp_proviPolicy.yml: + This playbook check the drive status, drive count based on that it creates mdiskgrp, Data reduction Pool with specified level. It links pool of both the site. It creates provision policy, replication policy.This playbook is called under 'Create_PBR_config.yml'. + + Authors: Akshada Thorat (akshada.thorat@ibm.com) , Sandip Rajbanshi (Sandip.Rajbanshi@ibm.com) diff --git a/playbooks/PBR/main.yml b/playbooks/PBR/main.yml new file mode 100644 index 0000000..da7f146 --- /dev/null +++ b/playbooks/PBR/main.yml @@ -0,0 +1,169 @@ +- name: Using the IBM Storage Virtualize collection For PBR configuration + hosts: localhost + collections: + - ibm.storage_virtualize + gather_facts: no + connection: local + vars_files: + - PBR_variable.yml + vars: + volume_count: "{{number_of_volumes | default(10)}}" + vol_prefix: "{{volume_prefix | default('vol_')}}" + tasks: + - name: Create mTLS on both the clusters + include_tasks: Create_mTLS.yml + loop: "{{users_data}}" + no_log: true + + - name: Create mdiskgrp_drp and provisionpolicy on both the clusters + include_tasks: Create_mdiskgrp_drp_proviPolicy.yml + loop: "{{users_data}}" + no_log: true + + - name: Get mdisk info + register: results + ibm_svcinfo_command: + command: "svcinfo lsmdiskgrp" + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + + - name: show mdiskgrp status + set_fact: + uid: "{{item['replication_pool_link_uid']}}" + loop: "{{ (results['stdout']) }}" + + - name: Get primary cluster id info + register: results + ibm_svcinfo_command: + command: "svcinfo lspartnership" + clustername: "{{users_data[1].cluster_ip}}" + username: "{{users_data[1].cluster_username}}" + password: "{{users_data[1].cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + + - name: show cluster id + set_fact: + uid1: "{{item['id']}}" + loop: "{{ (results['stdout']) }}" + + - name: Get secondary cluster id info + register: results + ibm_svcinfo_command: + command: "svcinfo lspartnership" + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + + - name: show cluster id + set_fact: + uid2: "{{item['id']}}" + loop: "{{ (results['stdout']) }}" + + - name: set provisionpolicy and replicationpoollinkuid on primary site + ibm_svc_mdiskgrp: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + name: mdg0 + state: present + replicationpoollinkuid: "{{uid}}" + replication_partner_clusterid: "{{uid2}}" + provisioningpolicy: provisioning_policy0 + + - name: set provisionpolicy and replicationpoollinkuid on secondary site + ibm_svc_mdiskgrp: + clustername: "{{users_data[1].cluster_ip}}" + username: "{{users_data[1].cluster_username}}" + password: "{{users_data[1].cluster_password}}" + log_path: "{{ log_path | default('/tmp/ansiblePB.debug') }}" + name: mdg0 + state: present + replicationpoollinkuid: "{{uid}}" + replication_partner_clusterid: "{{uid1}}" + provisioningpolicy: provisioning_policy0 + + - name: Create replication policy + ibm_sv_manage_replication_policy: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: replication_policy_1 + topology: 2-site-async-dr + location1system: "{{uid1}}" + location1iogrp: 0 + location2system: "{{uid2}}" + location2iogrp: 0 + rpoalert: 300 + state: present + + - name: Create volume group + ibm_svc_manage_volumegroup: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: "{{volume_group_name}}" + replicationpolicy: replication_policy_1 + state: present + + - name: Create master volume + ibm_svc_manage_volume: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: vol + state: present + pool: "mdg0" + size: "2" + unit: gb + + - name: Create volume + ibm_svc_manage_volume: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: vol + state: present + volumegroup: "{{volume_group_name}}" + + - name: Create volumes + ibm_svc_manage_volume: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: "{{vol_prefix}}{{item}}" + state: "present" + pool: "mdg0" + size: "{{volume_size}}" + unit: "gb" + volumegroup: "{{volume_group_name}}" + with_sequence: start=1 end="{{volume_count}}" + + - name: Delete volume + ibm_svc_manage_volume: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + name: vol + state: absent + + - name: masterVdisks-host mapping + register: results + ibm_svc_vol_map: + clustername: "{{users_data[0].cluster_ip}}" + username: "{{users_data[0].cluster_username}}" + password: "{{users_data[0].cluster_password}}" + log_path: "{{log_path | default('/tmp/ansiblePB.debug')}}" + state: present + volname: "{{vol_prefix}}{{item}}" + host: "{{host_name}}" + with_sequence: start=1 end="{{volume_count}}"