|
1 | 1 | --- |
2 | | -### This standalone playbook can be used to prep a COBBLER-IMAGED testnode |
| 2 | +### This role is used to prep a {FOG|MAAS}-IMAGED testnode |
3 | 3 | ### so that it can be used to capture an OS image for FOG. |
4 | 4 | ### This playbook is needed for a couple reasons |
5 | 5 | ### - NIC configs get hard coded into the captured FOG images so nodes reimaged by FOG don't come up with network |
| 6 | +### - SSH host keys need to be deleted |
| 7 | +### - apt and cloud-init services need to be disabled |
6 | 8 |
|
7 | 9 | - hosts: |
8 | 10 | - testnodes |
9 | | - become: true |
| 11 | + roles: |
| 12 | + - prep-fog-capture |
10 | 13 | gather_facts: false |
11 | | - tasks: |
12 | | - |
13 | | - # (Missing in RHEL8) |
14 | | - - name: Check for /usr/bin/python |
15 | | - shell: echo marco |
16 | | - register: polo |
17 | | - ignore_errors: true |
18 | | - |
19 | | - - name: Set ansible_python_interpreter=/usr/bin/python3 |
20 | | - set_fact: |
21 | | - ansible_python_interpreter: /usr/bin/python3 |
22 | | - when: polo is failed |
23 | | - |
24 | | - # Now that we know where python is, we can gather_facts |
25 | | - - setup: |
26 | | - |
27 | | - # We need to leave /.cephlab_rc_local or else each FOG reimage would tell Cobbler to run ceph-cm-ansible |
28 | | - - name: Remove lock files and udev rules |
29 | | - file: |
30 | | - path: "{{ item }}" |
31 | | - state: absent |
32 | | - with_items: |
33 | | - - /etc/udev/rules.d/70-persistent-net.rules |
34 | | - - /.cephlab_net_configured |
35 | | - - /ceph-qa-ready |
36 | | - |
37 | | - - name: Get list of ifcfg scripts from host used to capture image |
38 | | - shell: "ls -1 /etc/sysconfig/network-scripts/ifcfg-* | grep -v ifcfg-lo" |
39 | | - register: ifcfg_scripts |
40 | | - when: ansible_os_family == "RedHat" |
41 | | - ignore_errors: true |
42 | | - |
43 | | - - name: Get list of ifcfg scripts from host used to capture image |
44 | | - shell: "ls -1 /etc/sysconfig/network/ifcfg-* | grep -v ifcfg-lo" |
45 | | - register: ifcfg_scripts |
46 | | - when: ansible_os_family == "Suse" |
47 | | - ignore_errors: true |
48 | | - |
49 | | - - name: Delete ifcfg scripts |
50 | | - file: |
51 | | - path: "{{ item }}" |
52 | | - state: absent |
53 | | - with_items: "{{ ifcfg_scripts.stdout_lines|default([]) }}" |
54 | | - when: ifcfg_scripts is defined |
55 | | - |
56 | | - - name: Remove /var/lib/ceph mountpoint from fstab |
57 | | - shell: sed -i '/\/var\/lib\/ceph/d' /etc/fstab |
58 | | - |
59 | | - - name: Unmount /var/lib/ceph |
60 | | - ansible.posix.mount: |
61 | | - path: /var/lib/ceph |
62 | | - state: unmounted |
63 | | - |
64 | | - - name: Install one-shot service to regenerate SSH host keys on first boot |
65 | | - copy: |
66 | | - dest: /etc/systemd/system/regen-ssh-hostkeys.service |
67 | | - owner: root |
68 | | - group: root |
69 | | - mode: '0644' |
70 | | - content: | |
71 | | - [Unit] |
72 | | - Description=Regenerate SSH host keys on first boot |
73 | | - ConditionPathExists=!/etc/ssh/ssh_host_ed25519_key |
74 | | - Before=ssh.service |
75 | | - |
76 | | - [Service] |
77 | | - Type=oneshot |
78 | | - ExecStart=/usr/bin/ssh-keygen -A |
79 | | - ExecStartPost=/bin/systemctl disable regen-ssh-hostkeys.service |
80 | | - |
81 | | - [Install] |
82 | | - WantedBy=multi-user.target |
83 | | - |
84 | | - - name: Reload systemd daemon |
85 | | - systemd: |
86 | | - daemon_reload: true |
87 | | - |
88 | | - - name: Enable regen-ssh-hostkeys.service |
89 | | - systemd: |
90 | | - name: regen-ssh-hostkeys.service |
91 | | - enabled: true |
92 | | - |
93 | | - - name: Get list of SSH host keys |
94 | | - shell: "ls -1 /etc/ssh/ssh_host_*" |
95 | | - register: ssh_host_keys |
96 | | - ignore_errors: true |
97 | | - |
98 | | - # Key regeneration is done automatically on CentOS firstboot. |
99 | | - # For Ubuntu, we'll add `dpkg-reconfigure openssh-server` to rc.local |
100 | | - - name: Delete SSH host keys so they're generated during firstboot on cloned machines |
101 | | - file: |
102 | | - path: "{{ item }}" |
103 | | - state: absent |
104 | | - with_items: "{{ ssh_host_keys.stdout_lines|default([]) }}" |
105 | | - when: ssh_host_keys is defined |
106 | | - |
107 | | - - name: Unsubscribe RHEL |
108 | | - command: subscription-manager unregister |
109 | | - when: ansible_distribution == "RedHat" |
110 | | - failed_when: false |
111 | | - |
112 | | - # A file gets leftover when a testnode is registered with Satellite that caused |
113 | | - # each registered subsequent testnode to report the wrong hostname |
114 | | - - name: Clean up katello facts |
115 | | - file: |
116 | | - path: /etc/rhsm/facts/katello.facts |
117 | | - state: absent |
118 | | - when: ansible_distribution == "RedHat" |
119 | | - |
120 | | - # https://bugzilla.redhat.com/show_bug.cgi?id=1814337 |
121 | | - - name: Disable dnf-makecache service |
122 | | - service: |
123 | | - name: dnf-makecache.timer |
124 | | - state: stopped |
125 | | - enabled: no |
126 | | - when: |
127 | | - - ansible_os_family == "RedHat" |
128 | | - - ansible_distribution_major_version|int >= 8 |
129 | | - |
130 | | - # Hopefully fixes https://github.com/ceph/ceph-cm-ansible/pull/544#issuecomment-599076564 |
131 | | - - name: Clean DNF cache |
132 | | - shell: "dnf clean all && rm -rf /var/cache/dnf/*" |
133 | | - when: |
134 | | - - ansible_os_family == "RedHat" |
135 | | - - ansible_distribution_major_version|int >= 8 |
136 | | - |
137 | | - - set_fact: |
138 | | - ntp_service: ntp |
139 | | - when: ansible_os_family == "Debian" |
140 | | - |
141 | | - - set_fact: |
142 | | - ntp_service: ntpd |
143 | | - when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int <= 7 |
144 | | - |
145 | | - - set_fact: |
146 | | - ntp_service: chronyd |
147 | | - when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 8) or |
148 | | - ansible_os_family == "Suse" |
149 | | - |
150 | | - - name: "Stop {{ ntp_service }} service" |
151 | | - service: |
152 | | - name: "{{ ntp_service }}" |
153 | | - state: stopped |
154 | | - when: '"ntp" in ntp_service' |
155 | | - |
156 | | - # The theory here is although we do have the ntp service running on boot, |
157 | | - # if the time is off, it slowly drifts back in sync. Since our testnodes |
158 | | - # are ephemeral, they don't ever have enough time to correctly drift |
159 | | - # back to the correct time. So we'll force it in the captured OS images. |
160 | | - - name: Install ntpdate command if missing |
161 | | - package: |
162 | | - name: ntpdate |
163 | | - state: present |
164 | | - when: '"ntp" in ntp_service' |
165 | | - |
166 | | - - name: Force time synchronization using stepping | ntp |
167 | | - command: "ntpdate -b {{ ntp_servers|join(' ') }}" |
168 | | - when: '"ntp" in ntp_service' |
169 | | - |
170 | | - - name: "Start {{ ntp_service }}" |
171 | | - service: |
172 | | - name: "{{ ntp_service }}" |
173 | | - state: started |
174 | | - |
175 | | - # chronyd needs to be started in order to force time sync. This differs from ntpd. |
176 | | - - name: Force time synchronization using stepping | chrony |
177 | | - command: chronyc -a makestep |
178 | | - when: '"chrony" in ntp_service' |
179 | | - |
180 | | - - name: Sync the hardware clock |
181 | | - command: "hwclock --systohc" |
| 14 | + become: true |
0 commit comments