From 23815b540da598b2c3c2149703aa661411594205 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Tue, 3 Dec 2024 15:39:05 +0330 Subject: [PATCH 01/25] feat(compose): comelete compose prompt --- app/directory_generators/compose_generator.py | 1 - .../MyAnsible/roles/init_k8s/tasks/cni.yml | 20 +++ .../roles/init_k8s/tasks/initk8s.yml | 61 ++++++++ .../MyAnsible/roles/init_k8s/tasks/main.yml | 8 ++ .../MyAnsible/roles/k8s/handlers/main.yml | 12 ++ app/media/MyAnsible/roles/k8s/tasks/k8s.yml | 130 ++++++++++++++++++ app/media/MyAnsible/roles/k8s/tasks/main.yml | 3 + app/media/MyCompose/docker-compose.yaml | 4 + app/models/compose_models.py | 23 +++- app/template_generators/docker/compose.py | 39 +++++- 10 files changed, 298 insertions(+), 3 deletions(-) create mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/cni.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/k8s/handlers/main.yml create mode 100644 app/media/MyAnsible/roles/k8s/tasks/k8s.yml create mode 100644 app/media/MyAnsible/roles/k8s/tasks/main.yml create mode 100644 app/media/MyCompose/docker-compose.yaml diff --git a/app/directory_generators/compose_generator.py b/app/directory_generators/compose_generator.py index acd5aaec..e69de29b 100644 --- a/app/directory_generators/compose_generator.py +++ b/app/directory_generators/compose_generator.py @@ -1 +0,0 @@ -Hello! It looks like you entered just the letter "M." How can I assist you today? \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml new file mode 100644 index 00000000..516dbff3 --- /dev/null +++ b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml @@ -0,0 +1,20 @@ +- block: + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml new file mode 100644 index 00000000..ff134a27 --- /dev/null +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -0,0 +1,61 @@ +- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + +- name: Reboot the servers + command: reboot + async: 1 + poll: 0 + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + +- name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml new file mode 100644 index 00000000..bb40ddec --- /dev/null +++ b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# tasks file for init_k8s + +- name: Initialize kubernetes cluster + include_tasks: initk8s.yml + +- name: Initialize Calico CNI + include_tasks: cni.yml diff --git a/app/media/MyAnsible/roles/k8s/handlers/main.yml b/app/media/MyAnsible/roles/k8s/handlers/main.yml new file mode 100644 index 00000000..de036f51 --- /dev/null +++ b/app/media/MyAnsible/roles/k8s/handlers/main.yml @@ -0,0 +1,12 @@ +--- +# handlers file for k8s + +- name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent + +- name: Restart kubelet + service: + name: kubelet + state: restarted diff --git a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml new file mode 100644 index 00000000..a346e99b --- /dev/null +++ b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml @@ -0,0 +1,130 @@ +- name: Disable SWAP since kubernetes can't work with swap enabled + shell: | + swapoff -a + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\ssw\s+.*)$' + replace: '# \1' + +- name: Check if ufw is installed + package_facts: + manager: "auto" + +- name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + +- name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + +- name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + +- name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + +- name: Reload sysctl settings + command: + cmd: sysctl --system + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + +- name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + +- name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + +- name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + +- name: Determine the architecture + command: dpkg --print-architecture + register: architecture + +- name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + +- name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + +- name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + +- name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + +- name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + +- name: Enable containerd service + systemd: + name: containerd + enabled: yes diff --git a/app/media/MyAnsible/roles/k8s/tasks/main.yml b/app/media/MyAnsible/roles/k8s/tasks/main.yml new file mode 100644 index 00000000..a0ac6054 --- /dev/null +++ b/app/media/MyAnsible/roles/k8s/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Install kubernetes packages + include_tasks: k8s.yml diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml new file mode 100644 index 00000000..e48d178b --- /dev/null +++ b/app/media/MyCompose/docker-compose.yaml @@ -0,0 +1,4 @@ +services: + web_server: + image: nginx:latest + build: diff --git a/app/models/compose_models.py b/app/models/compose_models.py index 163ee3cb..4294f751 100644 --- a/app/models/compose_models.py +++ b/app/models/compose_models.py @@ -1,5 +1,5 @@ from typing import List, Optional -from pydantic import BaseModel, validator, ValidationError +from pydantic import BaseModel, validator, ValidationError, computed_field class Port(BaseModel): machine_port:int = 80 @@ -12,9 +12,19 @@ class EnvironmentVariable(BaseModel): name:str = 'foo' value:str = "bar" + @computed_field + @property + def env_full(self) -> int: + return f"{self.name}:{self.value}" + class Volume(BaseModel): local_dir: str = './nginx/nginx.conf' container_dir:str = '/etc/nginx/nginx.conf' + + @computed_field + @property + def volume(self) -> int: + return f"{self.local_dir}:{self.container_dir}" class Build(BaseModel): context:str @@ -30,6 +40,17 @@ class Service(BaseModel): networks:List[Network] environments:List[EnvironmentVariable] + @computed_field + @property + def image_full(self) -> int: + return f"{self.image}:{self.version}" + + @computed_field + @property + def volumes_full(self) -> int: + return [i.volume for i in self.volumes] + + class DockerCompose(BaseModel): services: List[Service] diff --git a/app/template_generators/docker/compose.py b/app/template_generators/docker/compose.py index ae8e052d..79b739ff 100644 --- a/app/template_generators/docker/compose.py +++ b/app/template_generators/docker/compose.py @@ -1,3 +1,40 @@ def docker_compose_generator(input): - prompt = """M""" + compose_services = input.services + services = [i.container_name for i in compose_services] + images = [{i.container_name:i.image_full} for i in compose_services] + volumes = [{i.container_name:i.volumes_full} for i in compose_services] + depends_on = [{i.container_name:i.depends_on} for i in compose_services] + ports = [{i.container_name:i.ports} for i in compose_services] + env = [{i.container_name:i.environments} for i in compose_services] + networks = [{i.container_name:i.networks} for i in compose_services] + + + prompt = f""" + + generate a python code (with out any ```python entry or additionals) with generates a docker-compose.yaml file in the directory 'app/media/MyCompose' + + + + + finally just give me a python code without any note that can generate a project folder with the + given schema without ```python entry. and we dont need any base directory in the python code. + the final ansible template must work very well without any error! + + the python code you give me, must have structure like that: + + import os + project_name = "app/media/MyCompose" + foo_dir = os.path.join(project_name, "bar") + x_dir = os.path.join(modules_dir, "y") + + # Create project directories + os.makedirs(compose_dir, exist_ok=True) + + # Create main.tf + with open(os.path.join(project_name, "main.tf"), "w") as main_file: + # any thing you need + + + + """ return prompt \ No newline at end of file From 7c593ab1266599c8d7bca6634a84a637b6020045 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Tue, 3 Dec 2024 15:40:19 +0330 Subject: [PATCH 02/25] fix(kuber): remove lb --- app/models/ansible_models.py | 1 - app/routes/ansible.py | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/app/models/ansible_models.py b/app/models/ansible_models.py index 748da389..7d80f39c 100644 --- a/app/models/ansible_models.py +++ b/app/models/ansible_models.py @@ -37,7 +37,6 @@ class AnsibleInstallKuber(AnsibleBase): os: str = 'ubuntu' k8s_worker_nodes: List[str] k8s_master_nodes: List[str] - lb_nodes: List[str] version:str = "1.31" diff --git a/app/routes/ansible.py b/app/routes/ansible.py index a3c61656..f24fedf9 100644 --- a/app/routes/ansible.py +++ b/app/routes/ansible.py @@ -48,9 +48,6 @@ async def ansible_install_generation_kuber(request:AnsibleInstallKuber) -> Outpu add_files_to_folder(files = ['app/media/kuber_configs/resolv.conf.j2'] , folder='app/media/MyAnsible/roles/preinstall/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf.yml.j2'] , folder='app/media/MyAnsible/roles/init_k8s/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf-join.yml.j2'] , folder='app/media/MyAnsible/roles/join_master/templates/') - add_files_to_folder(files = ['app/media/kuber_configs/check_apiserveer.sh.j2', - 'app/media/kuber_configs/haproxy.cfg.j2', - 'app/media/kuber_configs/keepalived.conf.j2' - ] , folder='app/media/MyAnsible/roles/lb/templates/') + return Output(output='output') \ No newline at end of file From 91a80bdf1dd14a44d35eb54c1c8cf13d4cf8d5e4 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Tue, 3 Dec 2024 17:33:38 +0330 Subject: [PATCH 03/25] feat(compose): compelete compose prompt --- app/directory_generators/compose_generator.py | 39 +++++++++++++++++++ app/media/MyCompose/docker-compose.yaml | 27 ++++++++++++- app/template_generators/docker/compose.py | 21 +++++++++- 3 files changed, 85 insertions(+), 2 deletions(-) diff --git a/app/directory_generators/compose_generator.py b/app/directory_generators/compose_generator.py index e69de29b..0627c345 100644 --- a/app/directory_generators/compose_generator.py +++ b/app/directory_generators/compose_generator.py @@ -0,0 +1,39 @@ +import os + +project_name = "app/media/MyCompose" +compose_file_path = os.path.join(project_name, "docker-compose.yaml") + +# Create project directories +os.makedirs(project_name, exist_ok=True) + +# Create docker-compose.yaml +with open(compose_file_path, "w") as compose_file: + compose_file.write("version: '3'\n") + compose_file.write("services:\n") + compose_file.write(" web_server:\n") + compose_file.write(" image: nginx:latest\n") + compose_file.write(" volumes:\n") + compose_file.write(" - ./nginx/nginx.conf:/etc/nginx/nginx.conf\n") + compose_file.write(" depends_on:\n") + compose_file.write(" - string\n") + compose_file.write(" ports:\n") + compose_file.write(" - '80:80'\n") + compose_file.write(" environment:\n") + compose_file.write(" - foo=bar\n") + compose_file.write(" networks:\n") + compose_file.write(" - app_network\n") + compose_file.write(" monitoring_server:\n") + compose_file.write(" image: grafana:latest\n") + compose_file.write(" volumes:\n") + compose_file.write(" - ./nginx/nginx.conf:/etc/nginx/nginx.conf\n") + compose_file.write(" depends_on:\n") + compose_file.write(" - string\n") + compose_file.write(" ports:\n") + compose_file.write(" - '82:80'\n") + compose_file.write(" environment:\n") + compose_file.write(" - foo=bar\n") + compose_file.write(" networks:\n") + compose_file.write(" - app_network\n") + compose_file.write("networks:\n") + compose_file.write(" app_network:\n") + compose_file.write(" driver: bridge\n") \ No newline at end of file diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index e48d178b..5a225be0 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -1,4 +1,29 @@ +version: '3' services: web_server: image: nginx:latest - build: + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + depends_on: + - string + ports: + - '80:80' + environment: + - foo=bar + networks: + - app_network + monitoring_server: + image: grafana:latest + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + depends_on: + - string + ports: + - '82:80' + environment: + - foo=bar + networks: + - app_network +networks: + app_network: + driver: bridge diff --git a/app/template_generators/docker/compose.py b/app/template_generators/docker/compose.py index 79b739ff..f2589073 100644 --- a/app/template_generators/docker/compose.py +++ b/app/template_generators/docker/compose.py @@ -1,4 +1,5 @@ def docker_compose_generator(input): + compose_network = input.network.name compose_services = input.services services = [i.container_name for i in compose_services] images = [{i.container_name:i.image_full} for i in compose_services] @@ -13,13 +14,31 @@ def docker_compose_generator(input): generate a python code (with out any ```python entry or additionals) with generates a docker-compose.yaml file in the directory 'app/media/MyCompose' - + the docker-compose.yaml, must following there instructions: + the version must be = 3 + set services following this list: {services} + set images to serivce following this dict : {images} + set volumes to service following this dict : {volumes} + set depends_on to service following this dict : {depends_on} + set ports to service following this dict : {ports} + set environment to service following this dict : {env} + set netwotks to service following this dict : {networks} + + + finally, at the end of docker-compose file, add following block: + ``` + networks: + {compose_network}: + driver: bridge + + ``` finally just give me a python code without any note that can generate a project folder with the given schema without ```python entry. and we dont need any base directory in the python code. the final ansible template must work very well without any error! + the python code you give me, must have structure like that: import os From 72814e8115d56c779f43a8a33539875af855b9ff Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Tue, 3 Dec 2024 17:51:05 +0330 Subject: [PATCH 04/25] nothing --- app/media/MyCompose/docker-compose.yaml | 2 + .../ansible/install/kuber.py | 1677 ----------------- 2 files changed, 2 insertions(+), 1677 deletions(-) diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index 5a225be0..1bbac8cc 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -12,6 +12,7 @@ services: - foo=bar networks: - app_network + monitoring_server: image: grafana:latest volumes: @@ -24,6 +25,7 @@ services: - foo=bar networks: - app_network + networks: app_network: driver: bridge diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index 356d0a12..e69de29b 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -1,1677 +0,0 @@ -def ansible_kuber_install(input): - - kubernetes_ansible_port = input.ansible_port - kubernetes_ansible_user = input.ansible_user - k8s_master_nodes = input.k8s_master_nodes - k8s_worker_nodes = input.k8s_worker_nodes - k8s_version = input.version - sections = { - "[all]": [f"{name} private_ip=x.x.x.x" for name in k8s_master_nodes + k8s_worker_nodes], - "[k8s]": k8s_master_nodes + k8s_worker_nodes, - "[k8s_masters]": k8s_master_nodes, - "[k8s_workers]": k8s_worker_nodes, - } - kubernetes_inventory = "\n\n".join(f"{section}\n" + "\n".join(entries) for section, entries in sections.items()) - - inventory_hostname = "{{ inventory_hostname }}" - item_in_task = "{{ item }}" - ufw_in_task = "'ufw'" - docker_gpg_key_path_in_task = "{{ docker_gpg_key_path }}" - docker_gpg_key_url_in_task = "{{ docker_gpg_key_url }}" - architecture_stdout_in_task = "{{ architecture.stdout }}" - docker_apt_repo_in_task = "{{ docker_apt_repo }}" - distribution_codename_stdout_in_task = "{{ distribution_codename.stdout }}" - kubernetes_gpg_keyring_path_in_task = "{{ kubernetes_gpg_keyring_path }}" - kubernetes_gpg_key_url_in_task = "{{ kubernetes_gpg_key_url }}" - kubernetes_apt_repo_in_task = "{{ kubernetes_apt_repo }}" - private_ip_in_task = "{{ private_ip }}" - hostvars_private_ip_in_task = "{{ hostvars[item].private_ip }}" - domain_in_task = "{{ domain }}" - groups_all_in_task = "{{ groups['all'] }}" - hostvars_groups_k8s_masters_private_ip_in_task = "{{ hostvars[groups['k8s_masters'][0]].private_ip }}" - apiserver_url_in_task = "{{ apiserver_url }}" - groups_k8s_masters_in_task = "{{ groups['k8s_masters'][0] }}" - calico_operator_url_in_task = "{{ calico_operator_url }}" - calico_crd_url_in_task = "{{ calico_crd_url }}" - join_command_stdout_lines_in_task = "{{ join_command.stdout_lines[0] }}" - kubeadm_cert_key_stdout_lines_in_task = "{{ kubeadm_cert_key.stdout_lines[2] }}" - hostvars_k8s_masters_control_plane_certkey_in_task = "{{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }}" - cri_socket_in_task = "{{ cri_socket }}" - - - - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: - - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── all - │   - ├── hosts - ├── host_vars - ├── kubernetes_playbook.yml - └── roles - └── preinstall - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── basic.yml - │   └── main.yml - ├── templates - │   └── resolv.conf.j2 - └── vars - | └── main.yml - k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── k8s.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - | └── main.yml - init_k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── cni.yml - │   └── initk8s.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf.yml.j2 - └── vars - | └── main.yml - join_master - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_master.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf-join.yml.j2 - └── vars - | └── main.yml - join_worker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_worker.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "all" and the content of this file must be as follows: - ``` - # General - install_ansible_modules: "true" - disable_transparent_huge_pages: "true" - - setup_interface: "false" - - # Network Calico see here for more details https://github.com/projectcalico/calico/releases - calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" - calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" - pod_network_cidr: "192.168.0.0/16" - - # DNS - resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - - # Sanction shekan - use_iran: "true" # change it to "false" if you are outside of iran - - # Docker - docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" - docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" - docker_apt_repo: "https://download.docker.com/linux/ubuntu" - - # Kubernetes - kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" - kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/Release.key" - kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/" - k8s_version: "{k8s_version}.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - - # CRI - cri_socket: unix:///var/run/containerd/containerd.sock - - # Ansible Connection - - ansible_user: {kubernetes_ansible_user} - ansible_port: {kubernetes_ansible_port} - ansible_python_interpreter: "/usr/bin/python3" - domain: "devopsgpt.com" - apiserver_url: "devopsgpt.com" - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {kubernetes_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "kubernetes_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - - - hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - - - hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - - - hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - - - hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] - ``` - - There is a directory called "roles" which a sub-directory called "preinstall" (roles/preinstall): - "preinstall" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (preinstall/tasks): This path has two files called "basic.yml" and "main.yml". - - 1. Create "preinstall/tasks/basic.yml" and it must be as follows:" - ``` - - name: Set timezone to UTC - timezone: - name: Etc/UTC - - - name: Set hostname - command: hostnamectl set-hostname {inventory_hostname} - - - name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - - - name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - - - name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\.0\.0\.1' - line: "127.0.0.1 {inventory_hostname} localhost" - owner: root - group: root - mode: 0644 - - - name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - - - name: Fix broken packages - apt: - state: fixed - ``` - - 2. Create preinstall/tasks/main.yml and it must be as follows:" - ``` - --- - - name: basic setup - include_tasks: basic.yml - ``` - - There is a directory called "roles" which a sub-directory called "k8s" (roles/k8s): - "k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (k8s/tasks): This path has two files called "k8s.yml" and "main.yml". - - 1. Create k8s/tasks/k8s.yml and it must be as follows:" - ``` - - name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - - - name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\sswap\s+sw\s+.*)$' - replace: '# \\1' - - - name: Check if ufw is installed - package_facts: - manager: "auto" - - - name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "{ufw_in_task} in ansible_facts.packages" - - - name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{item_in_task}" - create: yes - state: present - loop: - - overlay - - br_netfilter - - - name: Load kernel modules - command: - cmd: "modprobe {item_in_task}" - loop: - - overlay - - br_netfilter - - - name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {{mark}} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - - - name: Reload sysctl settings - command: - cmd: sysctl --system - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - - state: present - update_cache: yes - - - name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - - - name: Remove existing Docker GPG key if it exists - file: - path: '{docker_gpg_key_path_in_task}' - state: absent - - - name: Download Docker GPG key - shell: | - curl -fsSL {docker_gpg_key_url_in_task} | gpg --dearmor -o {docker_gpg_key_path_in_task} - - - name: Determine the architecture - command: dpkg --print-architecture - register: architecture - - - name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - - - name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={architecture_stdout_in_task} signed-by={docker_gpg_key_path_in_task}] {docker_apt_repo_in_task} {distribution_codename_stdout_in_task} stable" - state: present - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - - - name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - - - name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - - - name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - - - name: Enable containerd service - systemd: - name: containerd - enabled: yes - - name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{kubernetes_gpg_keyring_path_in_task}' - state: absent - - - name: Download Kubernetes GPG key - shell: | - curl -fsSL '{kubernetes_gpg_key_url_in_task}' | gpg --dearmor -o '{kubernetes_gpg_keyring_path_in_task}' - - - name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={kubernetes_gpg_keyring_path_in_task}] {kubernetes_apt_repo_in_task} /" - state: present - filename: kubernetes.list - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install Kubernetes packages - apt: - name: "{item_in_task}" - state: present - loop: - - kubeadm={k8s_version}.2-1.1 - - kubelet={k8s_version}.2-1.1 - - kubectl={k8s_version}.2-1.1 - - - name: Hold Kubernetes packages - dpkg_selections: - name: "{item_in_task}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={private_ip_in_task} - create: yes - state: present - notify: Restart kubelet - - - name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{hostvars_private_ip_in_task} {item_in_task} {item_in_task}.{domain_in_task}" - state: present - create: no - loop: "{groups_all_in_task}" - when: hostvars[item].private_ip is defined - - - name: Add apiserver_url to point to the masters temporary" - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: present - - - name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull - ``` - 2. Create k8s/tasks/main.yml and it must be as follows:" - ``` - --- - - name: Install kubernetes packages - include_tasks: k8s.yml - ``` - - (k8s/handlers): This path has a file called "main.yml". - - 3. Create k8s/handlers/main.yml and it must be as follows:" - ``` - --- - # handlers file for k8s - - - name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - - - name: Restart kubelet - service: - name: kubelet - state: restarted - ``` - - There is a directory called "roles" which a sub-directory called "init_k8s" (roles/init_k8s): - "init_k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (init_k8s/tasks): This path has three files called "cni.yml", "initk8s.yml" and "main.yml". - - 1. Create init_k8s/tasks/cni.yml and it must be as follows:" - ``` - - block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_operator_url_in_task} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_crd_url_in_task} - retries: 3 - delay: 3 - delegate_to: "{groups_k8s_masters_in_task}" - when: calico_crd_check.rc != 0 - run_once: true - ``` - 2. Create init_k8s/tasks/initk8s.yml and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." - ``` - 3. Create init_k8s/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for init_k8s - - - name: Initialize kubernetes cluster - include_tasks: initk8s.yml - - - name: Initialize Calico CNI - include_tasks: cni.yml - ``` - - There is a directory called "roles" which a sub-directory called "join_master" (roles/join_master): - "join_master" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_master/tasks): This path has two files called "join_master.yml" and "main.yml". - - 1. Create "join_master/tasks/join_master.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{kubeadm_cert_key_stdout_lines_in_task}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{kubeadm_cert_key_stdout_lines_in_task}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - run_once: false - delegate_facts: true - - - name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={hostvars_k8s_masters_control_plane_certkey_in_task} --cri-socket={cri_socket_in_task}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: absent - - - - name: Add apiserver_url to point to the masters" - lineinfile: - dest: /etc/hosts - line: "{private_ip_in_task} {apiserver_url_in_task}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] - ``` - 2. Create join_master/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_master - - - name: Join master(s) node to cluster - include_tasks: join_master.yml - - ``` - - There is a directory called "roles" which a sub-directory called "join_worker" (roles/join_worker): - "join_worker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_worker/tasks): This path has two files called "join_worker.yml" and "main.yml". - - 1. Create "join_worker/tasks/join_worker.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - ``` - 2. Create join_worker/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_worker - - - name: Join worker(s) node to cluster - include_tasks: join_worker.yml - ``` - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") - - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) - - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt -cat: 7: No such file or directory -cat ../../template_generators/ansible/install/kuber.py -def ansible_kuber_install(input): - - - kubernetes_ansible_port = input.ansible_port - kubernetes_ansible_user = input.ansible_user - k8s_master_nodes = input.k8s_master_nodes - k8s_worker_nodes = input.k8s_worker_nodes - k8s_version = input.version - sections = { - "[all]": [f"{name} private_ip=x.x.x.x" for name in k8s_master_nodes + k8s_worker_nodes], - "[k8s]": k8s_master_nodes + k8s_worker_nodes, - "[k8s_masters]": k8s_master_nodes, - "[k8s_workers]": k8s_worker_nodes, - } - kubernetes_inventory = "\n\n".join(f"{section}\n" + "\n".join(entries) for section, entries in sections.items()) - - inventory_hostname = "{{ inventory_hostname }}" - item_in_task = "{{ item }}" - ufw_in_task = "'ufw'" - docker_gpg_key_path_in_task = "{{ docker_gpg_key_path }}" - docker_gpg_key_url_in_task = "{{ docker_gpg_key_url }}" - architecture_stdout_in_task = "{{ architecture.stdout }}" - docker_apt_repo_in_task = "{{ docker_apt_repo }}" - distribution_codename_stdout_in_task = "{{ distribution_codename.stdout }}" - kubernetes_gpg_keyring_path_in_task = "{{ kubernetes_gpg_keyring_path }}" - kubernetes_gpg_key_url_in_task = "{{ kubernetes_gpg_key_url }}" - kubernetes_apt_repo_in_task = "{{ kubernetes_apt_repo }}" - private_ip_in_task = "{{ private_ip }}" - hostvars_private_ip_in_task = "{{ hostvars[item].private_ip }}" - domain_in_task = "{{ domain }}" - groups_all_in_task = "{{ groups['all'] }}" - hostvars_groups_k8s_masters_private_ip_in_task = "{{ hostvars[groups['k8s_masters'][0]].private_ip }}" - apiserver_url_in_task = "{{ apiserver_url }}" - groups_k8s_masters_in_task = "{{ groups['k8s_masters'][0] }}" - calico_operator_url_in_task = "{{ calico_operator_url }}" - calico_crd_url_in_task = "{{ calico_crd_url }}" - join_command_stdout_lines_in_task = "{{ join_command.stdout_lines[0] }}" - kubeadm_cert_key_stdout_lines_in_task = "{{ kubeadm_cert_key.stdout_lines[2] }}" - hostvars_k8s_masters_control_plane_certkey_in_task = "{{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }}" - cri_socket_in_task = "{{ cri_socket }}" - - - - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: - - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── all - │   - ├── hosts - ├── host_vars - ├── kubernetes_playbook.yml - └── roles - └── preinstall - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── basic.yml - │   └── main.yml - ├── templates - │   └── resolv.conf.j2 - └── vars - | └── main.yml - k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── k8s.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - | └── main.yml - init_k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── cni.yml - │   └── initk8s.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf.yml.j2 - └── vars - | └── main.yml - join_master - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_master.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf-join.yml.j2 - └── vars - | └── main.yml - join_worker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_worker.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "all" and the content of this file must be as follows: - ``` - # General - install_ansible_modules: "true" - disable_transparent_huge_pages: "true" - - setup_interface: "false" - - # Network Calico see here for more details https://github.com/projectcalico/calico/releases - calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" - calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" - pod_network_cidr: "192.168.0.0/16" - - # DNS - resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - - # Sanction shekan - use_iran: "true" # change it to "false" if you are outside of iran - - # Docker - docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" - docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" - docker_apt_repo: "https://download.docker.com/linux/ubuntu" - - # Kubernetes - kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" - kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/Release.key" - kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/" - k8s_version: "{k8s_version}.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - - # CRI - cri_socket: unix:///var/run/containerd/containerd.sock - - # Ansible Connection - - ansible_user: {kubernetes_ansible_user} - ansible_port: {kubernetes_ansible_port} - ansible_python_interpreter: "/usr/bin/python3" - domain: "devopsgpt.com" - apiserver_url: "devopsgpt.com" - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {kubernetes_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "kubernetes_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - - - hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - - - hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - - - hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - - - hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] - ``` - - There is a directory called "roles" which a sub-directory called "preinstall" (roles/preinstall): - "preinstall" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (preinstall/tasks): This path has two files called "basic.yml" and "main.yml". - - 1. Create "preinstall/tasks/basic.yml" and it must be as follows:" - ``` - - name: Set timezone to UTC - timezone: - name: Etc/UTC - - - name: Set hostname - command: hostnamectl set-hostname {inventory_hostname} - - - name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - - - name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - - - name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\.0\.0\.1' - line: "127.0.0.1 {inventory_hostname} localhost" - owner: root - group: root - mode: 0644 - - - name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - - - name: Fix broken packages - apt: - state: fixed - ``` - - 2. Create preinstall/tasks/main.yml and it must be as follows:" - ``` - --- - - name: basic setup - include_tasks: basic.yml - ``` - - There is a directory called "roles" which a sub-directory called "k8s" (roles/k8s): - "k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (k8s/tasks): This path has two files called "k8s.yml" and "main.yml". - - 1. Create k8s/tasks/k8s.yml and it must be as follows:" - ``` - - name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - - - name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\sswap\s+sw\s+.*)$' - replace: '# \\1' - - - name: Check if ufw is installed - package_facts: - manager: "auto" - - - name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "{ufw_in_task} in ansible_facts.packages" - - - name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{item_in_task}" - create: yes - state: present - loop: - - overlay - - br_netfilter - - - name: Load kernel modules - command: - cmd: "modprobe {item_in_task}" - loop: - - overlay - - br_netfilter - - - name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {{mark}} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - - - name: Reload sysctl settings - command: - cmd: sysctl --system - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - - state: present - update_cache: yes - - - name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - - - name: Remove existing Docker GPG key if it exists - file: - path: '{docker_gpg_key_path_in_task}' - state: absent - - - name: Download Docker GPG key - shell: | - curl -fsSL {docker_gpg_key_url_in_task} | gpg --dearmor -o {docker_gpg_key_path_in_task} - - - name: Determine the architecture - command: dpkg --print-architecture - register: architecture - - - name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - - - name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={architecture_stdout_in_task} signed-by={docker_gpg_key_path_in_task}] {docker_apt_repo_in_task} {distribution_codename_stdout_in_task} stable" - state: present - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - - - name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - - - name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - - - name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - - - name: Enable containerd service - systemd: - name: containerd - enabled: yes - - name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{kubernetes_gpg_keyring_path_in_task}' - state: absent - - - name: Download Kubernetes GPG key - shell: | - curl -fsSL '{kubernetes_gpg_key_url_in_task}' | gpg --dearmor -o '{kubernetes_gpg_keyring_path_in_task}' - - - name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={kubernetes_gpg_keyring_path_in_task}] {kubernetes_apt_repo_in_task} /" - state: present - filename: kubernetes.list - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install Kubernetes packages - apt: - name: "{item_in_task}" - state: present - loop: - - kubeadm={k8s_version}.2-1.1 - - kubelet={k8s_version}.2-1.1 - - kubectl={k8s_version}.2-1.1 - - - name: Hold Kubernetes packages - dpkg_selections: - name: "{item_in_task}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={private_ip_in_task} - create: yes - state: present - notify: Restart kubelet - - - name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{hostvars_private_ip_in_task} {item_in_task} {item_in_task}.{domain_in_task}" - state: present - create: no - loop: "{groups_all_in_task}" - when: hostvars[item].private_ip is defined - - - name: Add apiserver_url to point to the masters temporary" - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: present - - - name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull - ``` - 2. Create k8s/tasks/main.yml and it must be as follows:" - ``` - --- - - name: Install kubernetes packages - include_tasks: k8s.yml - ``` - - (k8s/handlers): This path has a file called "main.yml". - - 3. Create k8s/handlers/main.yml and it must be as follows:" - ``` - --- - # handlers file for k8s - - - name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - - - name: Restart kubelet - service: - name: kubelet - state: restarted - ``` - - There is a directory called "roles" which a sub-directory called "init_k8s" (roles/init_k8s): - "init_k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (init_k8s/tasks): This path has three files called "cni.yml", "initk8s.yml" and "main.yml". - - 1. Create init_k8s/tasks/cni.yml and it must be as follows:" - ``` - - block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_operator_url_in_task} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_crd_url_in_task} - retries: 3 - delay: 3 - delegate_to: "{groups_k8s_masters_in_task}" - when: calico_crd_check.rc != 0 - run_once: true - ``` - 2. Create init_k8s/tasks/initk8s.yml and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." - ``` - 3. Create init_k8s/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for init_k8s - - - name: Initialize kubernetes cluster - include_tasks: initk8s.yml - - - name: Initialize Calico CNI - include_tasks: cni.yml - ``` - - There is a directory called "roles" which a sub-directory called "join_master" (roles/join_master): - "join_master" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_master/tasks): This path has two files called "join_master.yml" and "main.yml". - - 1. Create "join_master/tasks/join_master.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{kubeadm_cert_key_stdout_lines_in_task}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{kubeadm_cert_key_stdout_lines_in_task}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - run_once: false - delegate_facts: true - - - name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={hostvars_k8s_masters_control_plane_certkey_in_task} --cri-socket={cri_socket_in_task}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: absent - - - - name: Add apiserver_url to point to the masters" - lineinfile: - dest: /etc/hosts - line: "{private_ip_in_task} {apiserver_url_in_task}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] - ``` - 2. Create join_master/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_master - - - name: Join master(s) node to cluster - include_tasks: join_master.yml - - ``` - - There is a directory called "roles" which a sub-directory called "join_worker" (roles/join_worker): - "join_worker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_worker/tasks): This path has two files called "join_worker.yml" and "main.yml". - - 1. Create "join_worker/tasks/join_worker.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - ``` - 2. Create join_worker/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_worker - - - name: Join worker(s) node to cluster - include_tasks: join_worker.yml - ``` - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") - - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) - - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt From 67e63bb3f1848072529fe0315cabb04bd458529e Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Wed, 4 Dec 2024 22:10:17 +0330 Subject: [PATCH 05/25] fix(compose): totally restructre docker compose generator --- app/directory_generators/compose_generator.py | 39 -------- app/media/MyCompose/docker-compose.yaml | 91 ++++++++++++++----- app/models/ansible_models.py | 2 +- app/models/compose_models.py | 78 +++++----------- app/routes/docker.py | 5 +- app/template_generators/docker/compose.py | 64 ++----------- 6 files changed, 102 insertions(+), 177 deletions(-) diff --git a/app/directory_generators/compose_generator.py b/app/directory_generators/compose_generator.py index 0627c345..e69de29b 100644 --- a/app/directory_generators/compose_generator.py +++ b/app/directory_generators/compose_generator.py @@ -1,39 +0,0 @@ -import os - -project_name = "app/media/MyCompose" -compose_file_path = os.path.join(project_name, "docker-compose.yaml") - -# Create project directories -os.makedirs(project_name, exist_ok=True) - -# Create docker-compose.yaml -with open(compose_file_path, "w") as compose_file: - compose_file.write("version: '3'\n") - compose_file.write("services:\n") - compose_file.write(" web_server:\n") - compose_file.write(" image: nginx:latest\n") - compose_file.write(" volumes:\n") - compose_file.write(" - ./nginx/nginx.conf:/etc/nginx/nginx.conf\n") - compose_file.write(" depends_on:\n") - compose_file.write(" - string\n") - compose_file.write(" ports:\n") - compose_file.write(" - '80:80'\n") - compose_file.write(" environment:\n") - compose_file.write(" - foo=bar\n") - compose_file.write(" networks:\n") - compose_file.write(" - app_network\n") - compose_file.write(" monitoring_server:\n") - compose_file.write(" image: grafana:latest\n") - compose_file.write(" volumes:\n") - compose_file.write(" - ./nginx/nginx.conf:/etc/nginx/nginx.conf\n") - compose_file.write(" depends_on:\n") - compose_file.write(" - string\n") - compose_file.write(" ports:\n") - compose_file.write(" - '82:80'\n") - compose_file.write(" environment:\n") - compose_file.write(" - foo=bar\n") - compose_file.write(" networks:\n") - compose_file.write(" - app_network\n") - compose_file.write("networks:\n") - compose_file.write(" app_network:\n") - compose_file.write(" driver: bridge\n") \ No newline at end of file diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index 1bbac8cc..f8882cd1 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -1,31 +1,78 @@ -version: '3' +networks: + additionalProp1: + driver: string + additionalProp2: + driver: string + additionalProp3: + driver: string services: - web_server: - image: nginx:latest - volumes: - - ./nginx/nginx.conf:/etc/nginx/nginx.conf + additionalProp1: + args: + additionalProp1: string + additionalProp2: string + additionalProp3: string + build: + context: string + dockerfile: string + command: string + container_name: string depends_on: - - string - ports: - - '80:80' + - string environment: - - foo=bar + additionalProp1: string + additionalProp2: string + additionalProp3: string + image: string networks: - - app_network - - monitoring_server: - image: grafana:latest + - string + ports: + - string volumes: - - ./nginx/nginx.conf:/etc/nginx/nginx.conf + - string + additionalProp2: + args: + additionalProp1: string + additionalProp2: string + additionalProp3: string + build: + context: string + dockerfile: string + command: string + container_name: string depends_on: - - string + - string + environment: + additionalProp1: string + additionalProp2: string + additionalProp3: string + image: string + networks: + - string ports: - - '82:80' + - string + volumes: + - string + additionalProp3: + args: + additionalProp1: string + additionalProp2: string + additionalProp3: string + build: + context: string + dockerfile: string + command: string + container_name: string + depends_on: + - string environment: - - foo=bar + additionalProp1: string + additionalProp2: string + additionalProp3: string + image: string networks: - - app_network - -networks: - app_network: - driver: bridge + - string + ports: + - string + volumes: + - string +version: string diff --git a/app/models/ansible_models.py b/app/models/ansible_models.py index 7d80f39c..f97c4c09 100644 --- a/app/models/ansible_models.py +++ b/app/models/ansible_models.py @@ -32,7 +32,7 @@ def validator_os(cls, value): return value - + class AnsibleInstallKuber(AnsibleBase): os: str = 'ubuntu' k8s_worker_nodes: List[str] diff --git a/app/models/compose_models.py b/app/models/compose_models.py index daa81ba8..137dc1f1 100644 --- a/app/models/compose_models.py +++ b/app/models/compose_models.py @@ -1,59 +1,27 @@ -from typing import List, Optional -from pydantic import BaseModel, validator, ValidationError, computed_field - -class Port(BaseModel): - machine_port:int = 80 - container_port:int = 80 - -class Network(BaseModel): - name:str = 'app_network' - -class EnvironmentVariable(BaseModel): - name:str = 'foo' - value:str = "bar" - - @computed_field - @property - def env_full(self) -> int: - return f"{self.name}:{self.value}" - -class Volume(BaseModel): - local_dir: str = './nginx/nginx.conf' - container_dir:str = '/etc/nginx/nginx.conf' - - @computed_field - @property - def volume(self) -> int: - return f"{self.local_dir}:{self.container_dir}" +from typing import Dict, List, Optional +from pydantic import BaseModel class Build(BaseModel): - context:str - dockerfile:str + context: str + dockerfile: str + class Service(BaseModel): - image:str = 'nginx' - name:str = 'web_server' - container_name:str = 'web_server' - build: Build | None = None - version:str = 'latest' - volumes:List[Volume] | None = None - depends_on:List[str] | None = None - ports:List[Port] - networks:List[Network] | None = None - environments:List[EnvironmentVariable] | None = None - - @computed_field - @property - def image_full(self) -> int: - return f"{self.image}:{self.version}" - - @computed_field - @property - def volumes_full(self) -> int: - return [i.volume for i in self.volumes] - - - + build: Optional[Build] = None + image: Optional[str] = None + container_name: Optional[str] = None + command: Optional[str] = None + volumes: Optional[List[str]] = None + environment: Optional[Dict[str, str]] = None + ports: Optional[List[str]] = None + networks: Optional[List[str]] = None + args: Optional[Dict[str, str]] = None + depends_on: Optional[List[str]] = None + +class Network(BaseModel): + driver: str + class DockerCompose(BaseModel): - services: List[Service] - network:Network - \ No newline at end of file + version: str + services: Dict[str, Service] + networks: Optional[Dict[str, Network]] = None + diff --git a/app/routes/docker.py b/app/routes/docker.py index 1c8b4e65..c62a487c 100644 --- a/app/routes/docker.py +++ b/app/routes/docker.py @@ -10,10 +10,7 @@ async def docker_compose_template(request:DockerCompose) -> Output: if os.environ.get("TEST"): return Output(output='output') - generated_prompt = docker_compose_generator(request) + docker_compose_generator(request) - output = gpt_service(generated_prompt) - edit_directory_generator("compose_generator",output) - execute_pythonfile("MyCompose","compose_generator") return Output(output='output') \ No newline at end of file diff --git a/app/template_generators/docker/compose.py b/app/template_generators/docker/compose.py index 23b7cf92..c2770d57 100644 --- a/app/template_generators/docker/compose.py +++ b/app/template_generators/docker/compose.py @@ -1,59 +1,11 @@ -def docker_compose_generator(input): - compose_network = input.network.name - compose_services = input.services - services = [i.name for i in compose_services] - images = [{i.name:i.image_full} for i in compose_services] - volumes = [{i.name:i.volumes_full} for i in compose_services] - depends_on = [{i.name:i.depends_on} for i in compose_services] - ports = [{i.name:i.ports} for i in compose_services] - env = [{i.name:i.environments} for i in compose_services] - networks = [{i.name:i.networks} for i in compose_services] - - - prompt = f""" - - generate a python code (with out any ```python entry or additionals) with generates a docker-compose.yaml file in the directory 'app/media/MyCompose' - - the docker-compose.yaml, must following there instructions: - the version must be = 3 - set services following this list: {services} - set images to serivce following this dict : {images} - set volumes to service following this dict : {volumes} - set depends_on to service following this dict : {depends_on} - set ports to service following this dict : {ports} - set environment to service following this dict : {env} - set netwotks to service following this dict : {networks} - - - finally, at the end of docker-compose file, add following block: - ``` - networks: - {compose_network}: - driver: bridge - - ``` - - - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyCompose" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") +import yaml +from app.models.compose_models import DockerCompose - # Create project directories - os.makedirs(compose_dir, exist_ok=True) - - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - +def docker_compose_generator(input): + compose_total = input.model_dump(mode="json") - """ - return prompt \ No newline at end of file + file=open("app/media/MyCompose/docker-compose.yaml","w") + yaml.dump(compose_total,file) + file.close() + \ No newline at end of file From c61adb30fe14c1f5ef5dfbdc64dbe8453026f446 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Wed, 4 Dec 2024 23:18:50 +0330 Subject: [PATCH 06/25] fix(compose): directory builder --- app/media/MyCompose/docker-compose.yaml | 2 +- app/template_generators/docker/compose.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index f8882cd1..607ae626 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -44,7 +44,7 @@ services: environment: additionalProp1: string additionalProp2: string - additionalProp3: string + additionalProp3: shhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh image: string networks: - string diff --git a/app/template_generators/docker/compose.py b/app/template_generators/docker/compose.py index c2770d57..2ef066a4 100644 --- a/app/template_generators/docker/compose.py +++ b/app/template_generators/docker/compose.py @@ -1,10 +1,18 @@ import yaml from app.models.compose_models import DockerCompose +import os def docker_compose_generator(input): - + dir = 'app/media/MyCompose' compose_total = input.model_dump(mode="json") - + if not os.path.exists(dir): + os.makedirs(dir) + os.path.join(dir, 'docker-compose.yaml') + + file=open("app/media/MyCompose/docker-compose.yaml","w") + yaml.dump(compose_total,file) + file.close() + file=open("app/media/MyCompose/docker-compose.yaml","w") yaml.dump(compose_total,file) file.close() From 7db40d2da6f34222d6c454ec32636ed8cc6317a0 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 5 Dec 2024 11:04:47 +0330 Subject: [PATCH 07/25] fix(compose): compelete compose generation allgorithm --- app/media/MyCompose/docker-compose.yaml | 94 ++++++++--------------- app/models/compose_models.py | 28 ++++--- app/template_generators/docker/compose.py | 13 +++- m.py | 29 +++++++ 4 files changed, 90 insertions(+), 74 deletions(-) create mode 100644 m.py diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index 607ae626..19cb7613 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -1,78 +1,50 @@ -networks: - additionalProp1: - driver: string - additionalProp2: - driver: string - additionalProp3: - driver: string +version: '3' services: - additionalProp1: - args: - additionalProp1: string - additionalProp2: string - additionalProp3: string - build: - context: string - dockerfile: string - command: string - container_name: string - depends_on: - - string + web1: + image: nginx:latest + container_name: web_server environment: - additionalProp1: string - additionalProp2: string - additionalProp3: string - image: string - networks: - - string + foo: bar ports: - - string - volumes: - - string - additionalProp2: - args: - additionalProp1: string - additionalProp2: string - additionalProp3: string + - 80:80 + networks: + - app_network + web2: build: - context: string - dockerfile: string + context: . + dockerfile: DockerFile + image: nginx:latest + container_name: web_server command: string - container_name: string - depends_on: + volumes: - string environment: - additionalProp1: string - additionalProp2: string - additionalProp3: shhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh - image: string - networks: - - string + foo: bar ports: - - string - volumes: - - string - additionalProp3: + - 80:80 + networks: + - app_network args: additionalProp1: string additionalProp2: string additionalProp3: string + depends_on: + - string + web3: build: - context: string - dockerfile: string + context: . + dockerfile: DockerFile + image: nginx:latest + container_name: web_server command: string - container_name: string - depends_on: + volumes: - string environment: - additionalProp1: string - additionalProp2: string - additionalProp3: string - image: string - networks: - - string + foo: bar ports: - - string - volumes: - - string -version: string + - 80:80 + networks: + - app_network +networks: + app: + driver: bridge diff --git a/app/models/compose_models.py b/app/models/compose_models.py index 137dc1f1..62e409f7 100644 --- a/app/models/compose_models.py +++ b/app/models/compose_models.py @@ -1,27 +1,33 @@ from typing import Dict, List, Optional -from pydantic import BaseModel +from pydantic import BaseModel, model_validator class Build(BaseModel): - context: str - dockerfile: str + context: str = "." + dockerfile: str = "DockerFile" class Service(BaseModel): build: Optional[Build] = None - image: Optional[str] = None - container_name: Optional[str] = None + image: Optional[str] = "nginx:latest" + container_name: Optional[str] = "web_server" command: Optional[str] = None volumes: Optional[List[str]] = None - environment: Optional[Dict[str, str]] = None - ports: Optional[List[str]] = None - networks: Optional[List[str]] = None + environment: Optional[Dict[str, str]] = {"foo":"bar"} + ports: Optional[List[str]] = ["80:80"] + networks: Optional[List[str]] = ["app_network"] args: Optional[Dict[str, str]] = None depends_on: Optional[List[str]] = None + @model_validator(mode="after") + def validator(self): + if self.build == None and self.image == None: + raise ValueError(f"one of the build or image sections must be present!") + return self + class Network(BaseModel): - driver: str + driver: str = "bridge" class DockerCompose(BaseModel): - version: str + version: str = "3" services: Dict[str, Service] - networks: Optional[Dict[str, Network]] = None + networks: Optional[Dict[str, Network]] diff --git a/app/template_generators/docker/compose.py b/app/template_generators/docker/compose.py index 2ef066a4..7a11eb31 100644 --- a/app/template_generators/docker/compose.py +++ b/app/template_generators/docker/compose.py @@ -1,19 +1,28 @@ import yaml from app.models.compose_models import DockerCompose import os +def remove_none_values(d): + if isinstance(d, dict): + return {k: remove_none_values(v) for k, v in d.items() if v is not None} + elif isinstance(d, list): + return [remove_none_values(i) for i in d if i is not None] + return d + def docker_compose_generator(input): dir = 'app/media/MyCompose' + compose_total = input.model_dump(mode="json") + compose_total = remove_none_values(compose_total) if not os.path.exists(dir): os.makedirs(dir) os.path.join(dir, 'docker-compose.yaml') file=open("app/media/MyCompose/docker-compose.yaml","w") - yaml.dump(compose_total,file) + yaml.dump(compose_total,file,default_flow_style=False) file.close() file=open("app/media/MyCompose/docker-compose.yaml","w") - yaml.dump(compose_total,file) + yaml.dump(compose_total,file,default_flow_style=False,sort_keys=False) file.close() \ No newline at end of file diff --git a/m.py b/m.py new file mode 100644 index 00000000..976d59bd --- /dev/null +++ b/m.py @@ -0,0 +1,29 @@ +{ + "version": "3", + "services": { + "webserver": { + "build": { + "context": ".", + "dockerfile": "DockerFile" + }, + "image": null, + "container_name": "web_server", + "command": null, + "volumes": null, + "environment": { + "foo": "bar" + }, + "ports": [ + "80:80" + ], + "networks": [ + "app_network" + ], + "args": null, + "depends_on": null + + + } + +} +} \ No newline at end of file From b9e4ed16fb903bb69b4886784d5dc48f52b04cfb Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 5 Dec 2024 11:22:05 +0330 Subject: [PATCH 08/25] fix(compose): edit default values for documentation --- app/media/MyCompose/docker-compose.yaml | 33 ++++++++++--------------- app/models/compose_models.py | 14 +++++------ 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index 19cb7613..27640329 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -1,23 +1,14 @@ version: '3' services: - web1: - image: nginx:latest - container_name: web_server - environment: - foo: bar - ports: - - 80:80 - networks: - - app_network - web2: + web: build: context: . dockerfile: DockerFile image: nginx:latest container_name: web_server - command: string + command: command... volumes: - - string + - ./foo:bar environment: foo: bar ports: @@ -25,26 +16,28 @@ services: networks: - app_network args: - additionalProp1: string - additionalProp2: string - additionalProp3: string + foo: bar depends_on: - - string - web3: + - service 0 + web2: build: context: . dockerfile: DockerFile image: nginx:latest container_name: web_server - command: string + command: command... volumes: - - string + - ./foo:bar environment: foo: bar ports: - 80:80 networks: - app_network + args: + foo: bar + depends_on: + - service 0 networks: - app: + app_network: driver: bridge diff --git a/app/models/compose_models.py b/app/models/compose_models.py index 62e409f7..9f53a9f5 100644 --- a/app/models/compose_models.py +++ b/app/models/compose_models.py @@ -6,16 +6,16 @@ class Build(BaseModel): dockerfile: str = "DockerFile" class Service(BaseModel): - build: Optional[Build] = None + build: Optional[Build] = Build() image: Optional[str] = "nginx:latest" container_name: Optional[str] = "web_server" - command: Optional[str] = None - volumes: Optional[List[str]] = None + command: Optional[str] = "command..." + volumes: Optional[List[str]] = ["./foo:bar"] environment: Optional[Dict[str, str]] = {"foo":"bar"} ports: Optional[List[str]] = ["80:80"] networks: Optional[List[str]] = ["app_network"] - args: Optional[Dict[str, str]] = None - depends_on: Optional[List[str]] = None + args: Optional[Dict[str, str]] = {"foo":"bar"} + depends_on: Optional[List[str]] = ['service 0'] @model_validator(mode="after") def validator(self): @@ -28,6 +28,6 @@ class Network(BaseModel): class DockerCompose(BaseModel): version: str = "3" - services: Dict[str, Service] - networks: Optional[Dict[str, Network]] + services: Dict[str, Service] = {"web":Service(), "web2":Service()} + networks: Optional[Dict[str, Network]] = {"app_network": {"driver":"bridge"}} From 41c6fdeb533eacee97161f93dfd3ee33cc8182ad Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 5 Dec 2024 12:06:52 +0330 Subject: [PATCH 09/25] feat(compose): add union type input for networks --- app/media/MyCompose/docker-compose.yaml | 8 ++++---- app/models/compose_models.py | 13 ++++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index 27640329..2bd21d8e 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -4,6 +4,8 @@ services: build: context: . dockerfile: DockerFile + args: + foo: bar image: nginx:latest container_name: web_server command: command... @@ -15,14 +17,14 @@ services: - 80:80 networks: - app_network - args: - foo: bar depends_on: - service 0 web2: build: context: . dockerfile: DockerFile + args: + foo: bar image: nginx:latest container_name: web_server command: command... @@ -34,8 +36,6 @@ services: - 80:80 networks: - app_network - args: - foo: bar depends_on: - service 0 networks: diff --git a/app/models/compose_models.py b/app/models/compose_models.py index 9f53a9f5..907a4ef8 100644 --- a/app/models/compose_models.py +++ b/app/models/compose_models.py @@ -1,10 +1,10 @@ -from typing import Dict, List, Optional +from typing import Dict, List, Optional,Union from pydantic import BaseModel, model_validator class Build(BaseModel): context: str = "." dockerfile: str = "DockerFile" - + args: Optional[Dict[str, str]] = {"foo":"bar"} class Service(BaseModel): build: Optional[Build] = Build() image: Optional[str] = "nginx:latest" @@ -14,7 +14,7 @@ class Service(BaseModel): environment: Optional[Dict[str, str]] = {"foo":"bar"} ports: Optional[List[str]] = ["80:80"] networks: Optional[List[str]] = ["app_network"] - args: Optional[Dict[str, str]] = {"foo":"bar"} + depends_on: Optional[List[str]] = ['service 0'] @model_validator(mode="after") @@ -26,8 +26,11 @@ def validator(self): class Network(BaseModel): driver: str = "bridge" +class PreCreatedNetwork(BaseModel): + name:str = "net1" + external:bool = True class DockerCompose(BaseModel): version: str = "3" services: Dict[str, Service] = {"web":Service(), "web2":Service()} - networks: Optional[Dict[str, Network]] = {"app_network": {"driver":"bridge"}} - + networks: Union[Optional[Dict[str, PreCreatedNetwork]],Optional[Dict[str, Network]]] = {"app_network": {"driver":"bridge"}} + From e4f71179f66b37e89dc5d928c59d0ef80d6c613f Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Fri, 6 Dec 2024 21:20:53 +0330 Subject: [PATCH 10/25] fix(routes): add /api to all routes --- app/media/MyCompose/docker-compose.yaml | 39 ++++-------------------- app/media/MyCompose_zip.zip | Bin 0 -> 285 bytes app/models/compose_models.py | 3 +- app/routes/ansible.py | 6 ++-- app/routes/docker.py | 2 +- app/routes/helm.py | 2 +- app/routes/jcasc.py | 2 +- app/routes/terraform.py | 20 ++++++------ app/routes/utils.py | 2 +- 9 files changed, 25 insertions(+), 51 deletions(-) create mode 100644 app/media/MyCompose_zip.zip diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index 2bd21d8e..2eafd8d6 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -3,41 +3,14 @@ services: web: build: context: . - dockerfile: DockerFile - args: - foo: bar + dockerfile: Dockerfile + args: {} image: nginx:latest - container_name: web_server - command: command... - volumes: - - ./foo:bar - environment: - foo: bar + container_name: x ports: - 80:80 - networks: - - app_network - depends_on: - - service 0 - web2: - build: - context: . - dockerfile: DockerFile - args: - foo: bar - image: nginx:latest - container_name: web_server - command: command... - volumes: - - ./foo:bar - environment: - foo: bar - ports: - - 80:80 - networks: - - app_network - depends_on: - - service 0 + - 90:90 + - 70:70 networks: - app_network: + p: driver: bridge diff --git a/app/media/MyCompose_zip.zip b/app/media/MyCompose_zip.zip new file mode 100644 index 0000000000000000000000000000000000000000..66b5179797380b71eaa466b0d75c41882d82f9f9 GIT binary patch literal 285 zcmWIWW@Zs#U|`^2khE)ytjR4}GZn~t3dF)doRXiMom!-uoS$2eU!1B}nV6en+iT0$ zY{298ea<22;ME?o4CnW{i*`syHZ;!>nXu~K-YN6_>i+%7@n6!pSV3g_A(cy=(Owx0 zX<2ow3n~lWm$PInPKh!)^p&F~$NG}`_m+bHpAAc-gbw}QywzsM1}~uqu{W{Hrkt(3 zv~hyn5#H4sa$ZW9F8}+QRjYfd?T&2`^JAiP)(V~~zLK~3c Output: if os.environ.get("TEST"): @@ -22,7 +22,7 @@ async def ansible_install_generation_nginx(request:AnsibleInstallNginx) -> Outpu return Output(output='output') -@app.post("/ansible-install/docker/") +@app.post("/api/ansible-install/docker/") async def ansible_install_generation_docker(request:AnsibleInstallDocker) -> Output: if os.environ.get("TEST"): @@ -35,7 +35,7 @@ async def ansible_install_generation_docker(request:AnsibleInstallDocker) -> Out return Output(output='output') -@app.post("/ansible-install/kuber/") +@app.post("/api/ansible-install/kuber/") async def ansible_install_generation_kuber(request:AnsibleInstallKuber) -> Output: if os.environ.get("TEST"): diff --git a/app/routes/docker.py b/app/routes/docker.py index c62a487c..727d9773 100644 --- a/app/routes/docker.py +++ b/app/routes/docker.py @@ -5,7 +5,7 @@ from app.template_generators.docker.compose import docker_compose_generator import os -@app.post("/docker-compose/") +@app.post("/api/docker-compose/") async def docker_compose_template(request:DockerCompose) -> Output: if os.environ.get("TEST"): diff --git a/app/routes/helm.py b/app/routes/helm.py index 7470024c..dde13d44 100644 --- a/app/routes/helm.py +++ b/app/routes/helm.py @@ -4,7 +4,7 @@ from app.models import (HelmTemplateGeneration,Output) from app.prompt_generators import (helm_template_generator) import os -@app.post("/Helm-template/") +@app.post("/api/Helm-template/") async def Helm_template_generation(request:HelmTemplateGeneration) -> Output: if os.environ.get("TEST"): return Output(output='output') diff --git a/app/routes/jcasc.py b/app/routes/jcasc.py index 99802d8f..5ad85ab6 100644 --- a/app/routes/jcasc.py +++ b/app/routes/jcasc.py @@ -5,7 +5,7 @@ from app.template_generators.jenkins.jcasc import jcasc_template_generator import os -@app.post("/jcasc-template/") +@app.post("/api/jcasc-template/") async def jcasc_template_generation(request:Jcasc) -> Output: if os.environ.get("TEST"): return Output(output='output') diff --git a/app/routes/terraform.py b/app/routes/terraform.py index 800e1da4..6efab360 100644 --- a/app/routes/terraform.py +++ b/app/routes/terraform.py @@ -31,7 +31,7 @@ from app.template_generators.terraform.aws.EFS import (IaC_template_generator_efs) import os -@app.post("/IaC-basic/") +@app.post("/api/IaC-basic/") async def IaC_basic_generation(request:IaCBasicInput) -> Output: if os.environ.get("TEST"): return Output(output='Terraform developed by hashicorp and it is very usefull') @@ -39,7 +39,7 @@ async def IaC_basic_generation(request:IaCBasicInput) -> Output: output = gpt_service(generated_prompt) return Output(output=output) -@app.post("/IaC-bugfix/") +@app.post("/api/IaC-bugfix/") async def IaC_bugfix_generation(request:IaCBugfixInput) -> Output: if os.environ.get("TEST"): return Output(output='fix this bug by adding x to the y') @@ -48,7 +48,7 @@ async def IaC_bugfix_generation(request:IaCBugfixInput) -> Output: return Output(output=output) -@app.post("/IaC-install/") +@app.post("/api/IaC-install/") async def IaC_install_generation(request:IaCInstallationInput) -> Output: if os.environ.get("TEST"): return Output(output='apt-get install xyz \n apt-get update (covert them to shell file output)') @@ -56,7 +56,7 @@ async def IaC_install_generation(request:IaCInstallationInput) -> Output: output = gpt_service(generated_prompt) return Output(output=output) -@app.post("/IaC-template/docker") +@app.post("/api/IaC-template/docker") async def IaC_template_generation_docker(request:IaCTemplateGenerationDocker) -> Output: if os.environ.get("TEST"): return Output(output='output (nothing special)') @@ -66,7 +66,7 @@ async def IaC_template_generation_docker(request:IaCTemplateGenerationDocker) -> execute_pythonfile("MyTerraform","terraform_generator") return Output(output='output') -@app.post("/IaC-template/aws/ec2") +@app.post("/api/IaC-template/aws/ec2") async def IaC_template_generation_aws_ec2(request:IaCTemplateGenerationEC2) -> Output: if os.environ.get("TEST"): return Output(output='output (nothing special)') @@ -77,7 +77,7 @@ async def IaC_template_generation_aws_ec2(request:IaCTemplateGenerationEC2) -> O execute_pythonfile("MyTerraform","terraform_generator") return Output(output='output') -@app.post("/IaC-template/aws/s3") +@app.post("/api/IaC-template/aws/s3") async def IaC_template_generation_aws_s3(request:IaCTemplateGenerationS3) -> Output: if os.environ.get("TEST"): return Output(output='output (nothing special)') @@ -87,7 +87,7 @@ async def IaC_template_generation_aws_s3(request:IaCTemplateGenerationS3) -> Out execute_pythonfile("MyTerraform","terraform_generator") return Output(output='output') -@app.post("/IaC-template/aws/iam") +@app.post("/api/IaC-template/aws/iam") async def IaC_template_generation_aws_iam(request:IaCTemplateGenerationIAM) -> Output: if os.environ.get("TEST"): return Output(output='output (nothing special)') @@ -98,7 +98,7 @@ async def IaC_template_generation_aws_iam(request:IaCTemplateGenerationIAM) -> O return Output(output='output') -@app.post("/IaC-template/argocd") +@app.post("/api/IaC-template/argocd") async def IaC_template_generation_argocd(request:IaCTemplateGenerationArgoCD) -> Output: if os.environ.get("TEST"): return Output(output='output (nothing special)') @@ -110,7 +110,7 @@ async def IaC_template_generation_argocd(request:IaCTemplateGenerationArgoCD) -> -@app.post("/IaC-template/aws/elb") +@app.post("/api/IaC-template/aws/elb") async def IaC_template_generation_aws_elb(request:IaCTemplateGenerationELB) -> Output: if os.environ.get("TEST"): return Output(output='output (nothing special)') @@ -120,7 +120,7 @@ async def IaC_template_generation_aws_elb(request:IaCTemplateGenerationELB) -> O execute_pythonfile("MyTerraform","terraform_generator") return Output(output='output') -@app.post("/IaC-template/aws/efs") +@app.post("/api/IaC-template/aws/efs") async def IaC_template_generation_aws_efs(request:IaCTemplateGenerationEFS) -> Output: if os.environ.get("TEST"): return Output(output='output (nothing special)') diff --git a/app/routes/utils.py b/app/routes/utils.py index a7a5d7b1..b0a62ef7 100644 --- a/app/routes/utils.py +++ b/app/routes/utils.py @@ -26,7 +26,7 @@ def add_files_to_folder(files:list,folder:str): shutil.copy(filename, destination_file) -@app.get("/download-folder{folder_name}/{source}") +@app.get("/api/download-folder{folder_name}/{source}") async def download_folder_MyHelm(folder_name: str,source:str): folder_path = f"app/media/{folder_name}" # Adjust the path as needed if not os.path.exists(folder_path): From fd2efe7fe4563c44f485ee795f5d90a2b090bd9a Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Sun, 8 Dec 2024 11:20:54 +0330 Subject: [PATCH 11/25] fix(installation): fix terraform installation process and model --- Makefile | 6 ++-- .../Terraform/amazon_linux.sh | 8 +++++ .../Installation_base/Terraform/centos.sh | 8 +++++ .../Installation_base/Terraform/fedora.sh | 8 +++++ .../Installation_base/Terraform/ubuntu.sh | 15 ++++++++ app/media/MyCompose_zip.zip | Bin 285 -> 0 bytes app/models/compose_models.py | 1 - app/models/terraform_models.py | 22 ++++++------ app/routes/terraform.py | 6 ++-- .../terraform/Installation/__init__.py | 0 .../terraform/Installation/main.py | 33 ++++++++++++++++++ 11 files changed, 89 insertions(+), 18 deletions(-) create mode 100644 app/media/Installation_base/Terraform/amazon_linux.sh create mode 100644 app/media/Installation_base/Terraform/centos.sh create mode 100644 app/media/Installation_base/Terraform/fedora.sh create mode 100644 app/media/Installation_base/Terraform/ubuntu.sh delete mode 100644 app/media/MyCompose_zip.zip create mode 100644 app/template_generators/terraform/Installation/__init__.py create mode 100644 app/template_generators/terraform/Installation/main.py diff --git a/Makefile b/Makefile index 056f35a1..5b552a6f 100644 --- a/Makefile +++ b/Makefile @@ -4,11 +4,11 @@ releaseName ?= devopsgpt all: build up build: - docker-compose build + docker compose build up: - docker-compose up -d + docker compose up -d down: - docker-compose down -v + docker compose down -v helm-install: helm install $(releaseName) helm/ -f helm/values.yaml -n $(namespace) --create-namespace helm-uninstall: diff --git a/app/media/Installation_base/Terraform/amazon_linux.sh b/app/media/Installation_base/Terraform/amazon_linux.sh new file mode 100644 index 00000000..9b423b8c --- /dev/null +++ b/app/media/Installation_base/Terraform/amazon_linux.sh @@ -0,0 +1,8 @@ +#!bin/bash + + +sudo yum install -y yum-utils + +sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo + +sudo yum -y install terraform \ No newline at end of file diff --git a/app/media/Installation_base/Terraform/centos.sh b/app/media/Installation_base/Terraform/centos.sh new file mode 100644 index 00000000..bf7e5745 --- /dev/null +++ b/app/media/Installation_base/Terraform/centos.sh @@ -0,0 +1,8 @@ +#!bin/bash + + +sudo yum install -y yum-utils + +sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/RHEL/hashicorp.repo + +sudo yum -y install terraform \ No newline at end of file diff --git a/app/media/Installation_base/Terraform/fedora.sh b/app/media/Installation_base/Terraform/fedora.sh new file mode 100644 index 00000000..9851b25e --- /dev/null +++ b/app/media/Installation_base/Terraform/fedora.sh @@ -0,0 +1,8 @@ +#!bin/bash + + +sudo dnf install -y dnf-plugins-core + +sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/fedora/hashicorp.repo + +sudo dnf -y install terraform \ No newline at end of file diff --git a/app/media/Installation_base/Terraform/ubuntu.sh b/app/media/Installation_base/Terraform/ubuntu.sh new file mode 100644 index 00000000..d3bd0505 --- /dev/null +++ b/app/media/Installation_base/Terraform/ubuntu.sh @@ -0,0 +1,15 @@ +#!bin/bash + +sudo apt-get update && sudo apt-get install -y gnupg software-properties-common + +wget -O- https://apt.releases.hashicorp.com/gpg | \ +gpg --dearmor | \ +sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg > /dev/null + +echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] \ +https://apt.releases.hashicorp.com $(lsb_release -cs) main" | \ +sudo tee /etc/apt/sources.list.d/hashicorp.list + +sudo apt update + +sudo apt-get install terraform \ No newline at end of file diff --git a/app/media/MyCompose_zip.zip b/app/media/MyCompose_zip.zip deleted file mode 100644 index 66b5179797380b71eaa466b0d75c41882d82f9f9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 285 zcmWIWW@Zs#U|`^2khE)ytjR4}GZn~t3dF)doRXiMom!-uoS$2eU!1B}nV6en+iT0$ zY{298ea<22;ME?o4CnW{i*`syHZ;!>nXu~K-YN6_>i+%7@n6!pSV3g_A(cy=(Owx0 zX<2ow3n~lWm$PInPKh!)^p&F~$NG}`_m+bHpAAc-gbw}QywzsM1}~uqu{W{Hrkt(3 zv~hyn5#H4sa$ZW9F8}+QRjYfd?T&2`^JAiP)(V~~zLK~3c Output: async def IaC_install_generation(request:IaCInstallationInput) -> Output: if os.environ.get("TEST"): return Output(output='apt-get install xyz \n apt-get update (covert them to shell file output)') - generated_prompt = IaC_installation_generator(request) - output = gpt_service(generated_prompt) - return Output(output=output) + selected_script = select_install(request) + return Output(output=selected_script) @app.post("/api/IaC-template/docker") async def IaC_template_generation_docker(request:IaCTemplateGenerationDocker) -> Output: diff --git a/app/template_generators/terraform/Installation/__init__.py b/app/template_generators/terraform/Installation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/template_generators/terraform/Installation/main.py b/app/template_generators/terraform/Installation/main.py new file mode 100644 index 00000000..b4d3bc78 --- /dev/null +++ b/app/template_generators/terraform/Installation/main.py @@ -0,0 +1,33 @@ + +def select_install(input): + + match input.os: + + case "Ubuntu": + with open("app/media/Installation_base/Terraform/ubuntu.sh", 'r') as file: + file_content = file.read() + + return file_content + + case "Fedora": + with open("app/media/Installation_base/Terraform/fedora.sh", 'r') as file: + file_content = file.read() + + return file_content + + case "Centos": + with open("app/media/Installation_base/Terraform/centos.sh", 'r') as file: + file_content = file.read() + + return file_content + + case "Amazon_linux": + with open("app/media/Installation_base/Terraform/amazon_linux.sh", 'r') as file: + file_content = file.read() + + return file_content + case _: + raise ValueError() + + + \ No newline at end of file From 26c3bd89c8053c9677d2e783f7fd621114ad91b9 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Sun, 8 Dec 2024 12:16:15 +0330 Subject: [PATCH 12/25] fix(installation): create MyBash for scripts --- app/media/MyBash/bash.sh | 15 +++++++ app/routes/terraform.py | 6 +-- .../terraform/Installation/main.py | 44 ++++++++++++------- 3 files changed, 46 insertions(+), 19 deletions(-) create mode 100644 app/media/MyBash/bash.sh diff --git a/app/media/MyBash/bash.sh b/app/media/MyBash/bash.sh new file mode 100644 index 00000000..d3bd0505 --- /dev/null +++ b/app/media/MyBash/bash.sh @@ -0,0 +1,15 @@ +#!bin/bash + +sudo apt-get update && sudo apt-get install -y gnupg software-properties-common + +wget -O- https://apt.releases.hashicorp.com/gpg | \ +gpg --dearmor | \ +sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg > /dev/null + +echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] \ +https://apt.releases.hashicorp.com $(lsb_release -cs) main" | \ +sudo tee /etc/apt/sources.list.d/hashicorp.list + +sudo apt update + +sudo apt-get install terraform \ No newline at end of file diff --git a/app/routes/terraform.py b/app/routes/terraform.py index 1a21a437..20ebbe03 100644 --- a/app/routes/terraform.py +++ b/app/routes/terraform.py @@ -52,9 +52,9 @@ async def IaC_bugfix_generation(request:IaCBugfixInput) -> Output: @app.post("/api/IaC-install/") async def IaC_install_generation(request:IaCInstallationInput) -> Output: if os.environ.get("TEST"): - return Output(output='apt-get install xyz \n apt-get update (covert them to shell file output)') - selected_script = select_install(request) - return Output(output=selected_script) + return Output(output='nothing special') + select_install(request) + return Output(output="pk") @app.post("/api/IaC-template/docker") async def IaC_template_generation_docker(request:IaCTemplateGenerationDocker) -> Output: diff --git a/app/template_generators/terraform/Installation/main.py b/app/template_generators/terraform/Installation/main.py index b4d3bc78..ffdb14e1 100644 --- a/app/template_generators/terraform/Installation/main.py +++ b/app/template_generators/terraform/Installation/main.py @@ -1,31 +1,43 @@ +import os +import shutil + +def create_MyBash_directory(): + + dir = 'app/media/MyBash' + + + if not os.path.exists(dir): + os.makedirs(dir) + os.path.join(dir, 'bash.sh') + + + def select_install(input): + create_MyBash_directory() match input.os: + case "Ubuntu": - with open("app/media/Installation_base/Terraform/ubuntu.sh", 'r') as file: - file_content = file.read() - - return file_content + source = 'app/media/Installation_base/Terraform/ubuntu.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) case "Fedora": - with open("app/media/Installation_base/Terraform/fedora.sh", 'r') as file: - file_content = file.read() - - return file_content + source = 'app/media/Installation_base/Terraform/fedora.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) case "Centos": - with open("app/media/Installation_base/Terraform/centos.sh", 'r') as file: - file_content = file.read() - - return file_content + source = 'app/media/Installation_base/Terraform/centos.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) case "Amazon_linux": - with open("app/media/Installation_base/Terraform/amazon_linux.sh", 'r') as file: - file_content = file.read() - - return file_content + source = 'app/media/Installation_base/Terraform/amazon_linux.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) case _: raise ValueError() From 90bece3e2864b3cfe9fdaa54376e844955b9fdf8 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Sun, 8 Dec 2024 13:07:20 +0330 Subject: [PATCH 13/25] fix(bash): edit bi/bash --- app/media/Installation_base/Terraform/amazon_linux.sh | 2 +- app/media/Installation_base/Terraform/centos.sh | 2 +- app/media/Installation_base/Terraform/fedora.sh | 2 +- app/media/Installation_base/Terraform/ubuntu.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/app/media/Installation_base/Terraform/amazon_linux.sh b/app/media/Installation_base/Terraform/amazon_linux.sh index 9b423b8c..fa26a32b 100644 --- a/app/media/Installation_base/Terraform/amazon_linux.sh +++ b/app/media/Installation_base/Terraform/amazon_linux.sh @@ -1,4 +1,4 @@ -#!bin/bash +#!/bin/bash sudo yum install -y yum-utils diff --git a/app/media/Installation_base/Terraform/centos.sh b/app/media/Installation_base/Terraform/centos.sh index bf7e5745..094b3079 100644 --- a/app/media/Installation_base/Terraform/centos.sh +++ b/app/media/Installation_base/Terraform/centos.sh @@ -1,4 +1,4 @@ -#!bin/bash +#!/bin/bash sudo yum install -y yum-utils diff --git a/app/media/Installation_base/Terraform/fedora.sh b/app/media/Installation_base/Terraform/fedora.sh index 9851b25e..673dbbdf 100644 --- a/app/media/Installation_base/Terraform/fedora.sh +++ b/app/media/Installation_base/Terraform/fedora.sh @@ -1,4 +1,4 @@ -#!bin/bash +#!/bin/bash sudo dnf install -y dnf-plugins-core diff --git a/app/media/Installation_base/Terraform/ubuntu.sh b/app/media/Installation_base/Terraform/ubuntu.sh index d3bd0505..6dc8998a 100644 --- a/app/media/Installation_base/Terraform/ubuntu.sh +++ b/app/media/Installation_base/Terraform/ubuntu.sh @@ -1,4 +1,4 @@ -#!bin/bash +#!/bin/bash sudo apt-get update && sudo apt-get install -y gnupg software-properties-common From 54d507ff495c61a2c0e79f5155b2a18793bed2b1 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Sun, 8 Dec 2024 14:10:15 +0330 Subject: [PATCH 14/25] fix(docker install): fix it --- app/media/Installation_base/Docker/RHEL.sh | 19 ++++++++ app/media/Installation_base/Docker/centos.sh | 16 +++++++ app/media/Installation_base/Docker/fedora.sh | 19 ++++++++ app/media/Installation_base/Docker/ubuntu.sh | 16 +++++++ app/media/MyBash/bash.sh | 25 ++++++----- app/models/__init__.py | 3 +- app/models/docker_installation_models.py | 20 +++++++++ app/routes/docker.py | 13 ++++-- app/template_generators/docker/__init__.py | 0 .../docker/installation.py | 45 +++++++++++++++++++ 10 files changed, 159 insertions(+), 17 deletions(-) create mode 100644 app/media/Installation_base/Docker/RHEL.sh create mode 100644 app/media/Installation_base/Docker/centos.sh create mode 100644 app/media/Installation_base/Docker/fedora.sh create mode 100644 app/media/Installation_base/Docker/ubuntu.sh create mode 100644 app/models/docker_installation_models.py create mode 100644 app/template_generators/docker/__init__.py create mode 100644 app/template_generators/docker/installation.py diff --git a/app/media/Installation_base/Docker/RHEL.sh b/app/media/Installation_base/Docker/RHEL.sh new file mode 100644 index 00000000..6613df87 --- /dev/null +++ b/app/media/Installation_base/Docker/RHEL.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +sudo dnf remove -y \ + docker \ + docker-client \ + docker-client-latest \ + docker-common \ + docker-latest \ + docker-latest-logrotate \ + docker-logrotate \ + docker-engine \ + podman \ + runc + + +sudo dnf -y install dnf-plugins-core +sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo + +sudo dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin \ No newline at end of file diff --git a/app/media/Installation_base/Docker/centos.sh b/app/media/Installation_base/Docker/centos.sh new file mode 100644 index 00000000..b29d7708 --- /dev/null +++ b/app/media/Installation_base/Docker/centos.sh @@ -0,0 +1,16 @@ +#!/bin/bash +sudo dnf remove -y \ + docker \ + docker-client \ + docker-client-latest \ + docker-common \ + docker-latest \ + docker-latest-logrotate \ + docker-logrotate \ + docker-engine + + +sudo dnf -y install dnf-plugins-core +sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + +sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin \ No newline at end of file diff --git a/app/media/Installation_base/Docker/fedora.sh b/app/media/Installation_base/Docker/fedora.sh new file mode 100644 index 00000000..bbd9cff3 --- /dev/null +++ b/app/media/Installation_base/Docker/fedora.sh @@ -0,0 +1,19 @@ +#!/bin/bash +sudo dnf remove -y \ + docker \ + docker-client \ + docker-client-latest \ + docker-common \ + docker-latest \ + docker-latest-logrotate \ + docker-logrotate \ + docker-selinux \ + docker-engine-selinux \ + docker-engine + + +sudo dnf -y install dnf-plugins-core +sudo dnf-3 config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo + + +sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin \ No newline at end of file diff --git a/app/media/Installation_base/Docker/ubuntu.sh b/app/media/Installation_base/Docker/ubuntu.sh new file mode 100644 index 00000000..b223db9f --- /dev/null +++ b/app/media/Installation_base/Docker/ubuntu.sh @@ -0,0 +1,16 @@ +#!/bin/bash +sudo apt-get update -y +sudo apt-get install ca-certificates curl -y +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc + +# Add the repository to Apt sources: +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get update -y + + +sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin \ No newline at end of file diff --git a/app/media/MyBash/bash.sh b/app/media/MyBash/bash.sh index d3bd0505..b223db9f 100644 --- a/app/media/MyBash/bash.sh +++ b/app/media/MyBash/bash.sh @@ -1,15 +1,16 @@ -#!bin/bash +#!/bin/bash +sudo apt-get update -y +sudo apt-get install ca-certificates curl -y +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc -sudo apt-get update && sudo apt-get install -y gnupg software-properties-common +# Add the repository to Apt sources: +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get update -y -wget -O- https://apt.releases.hashicorp.com/gpg | \ -gpg --dearmor | \ -sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg > /dev/null -echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] \ -https://apt.releases.hashicorp.com $(lsb_release -cs) main" | \ -sudo tee /etc/apt/sources.list.d/hashicorp.list - -sudo apt update - -sudo apt-get install terraform \ No newline at end of file +sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin \ No newline at end of file diff --git a/app/models/__init__.py b/app/models/__init__.py index f206434e..9f97ab17 100644 --- a/app/models/__init__.py +++ b/app/models/__init__.py @@ -3,4 +3,5 @@ from .utils import * from .ansible_models import * from .jcasc import * -from .compose_models import * \ No newline at end of file +from .compose_models import * +from .docker_installation_models import * \ No newline at end of file diff --git a/app/models/docker_installation_models.py b/app/models/docker_installation_models.py new file mode 100644 index 00000000..f64b9f58 --- /dev/null +++ b/app/models/docker_installation_models.py @@ -0,0 +1,20 @@ +from typing import Dict, List, Optional,Union +from pydantic import BaseModel, model_validator,validator + +class DockerInstallationInput(BaseModel): + os:str = "Ubuntu" + environment:str = "Linux" + + @validator("os") + def validate_os(cls, value): + allowed_os = ['Ubuntu', 'Centos', 'Fedora', 'RHEL'] + if value not in allowed_os: + raise ValueError(f"OS must be one of {allowed_os}.") + return value + + @validator("environment") + def validate_environment(cls, value): + allowed_os = ['Linux'] + if value not in allowed_os: + raise ValueError(f"Environment must be one of {allowed_os}.") + return value \ No newline at end of file diff --git a/app/routes/docker.py b/app/routes/docker.py index 727d9773..04b831aa 100644 --- a/app/routes/docker.py +++ b/app/routes/docker.py @@ -1,8 +1,7 @@ from app.app_instance import app -from app.gpt_services import gpt_service -from app.services import (write_installation,edit_directory_generator,execute_pythonfile) -from app.models import (DockerCompose,Output) +from app.models import (DockerCompose,DockerInstallationInput,Output) from app.template_generators.docker.compose import docker_compose_generator +from app.template_generators.docker.installation import docker_installation_selection import os @app.post("/api/docker-compose/") @@ -13,4 +12,10 @@ async def docker_compose_template(request:DockerCompose) -> Output: docker_compose_generator(request) return Output(output='output') - \ No newline at end of file + +@app.post("/api/docker/installation") +async def docker_installation(request:DockerInstallationInput) -> Output: + + docker_installation_selection(request) + + return Output(output='output') \ No newline at end of file diff --git a/app/template_generators/docker/__init__.py b/app/template_generators/docker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/template_generators/docker/installation.py b/app/template_generators/docker/installation.py new file mode 100644 index 00000000..10adcad5 --- /dev/null +++ b/app/template_generators/docker/installation.py @@ -0,0 +1,45 @@ +import os +import shutil + + +def create_MyBash_directory(): + + dir = 'app/media/MyBash' + + + if not os.path.exists(dir): + os.makedirs(dir) + os.path.join(dir, 'bash.sh') + + + +def docker_installation_selection(input): + + create_MyBash_directory() + + match input.os: + + case "Ubuntu": + + source = 'app/media/Installation_base/Docker/ubuntu.sh' + dest = 'app/media/MyBash/bash.sh' + + shutil.copyfile(source, dest) + + + case "Fedora": + source = 'app/media/Installation_base/Docker/fedora.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) + + case "Centos": + source = 'app/media/Installation_base/Docker/centos.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) + + case "RHEL": + source = 'app/media/Installation_base/Docker/RHEL.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) + case _: + raise ValueError() \ No newline at end of file From 3dcf0ee96e8bd1199569618877fc3042d821f19d Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Mon, 9 Dec 2024 13:10:03 +0330 Subject: [PATCH 15/25] feat(install): add jenkins and gitlab installation --- app/main.py | 4 +- .../Gitlab/docker-compose.yaml | 31 +++++++++++ app/media/Installation_base/Jenkins/RHEL.sh | 9 +++ .../Jenkins/docker-compose.yml | 15 +++++ app/media/Installation_base/Jenkins/fedora.sh | 9 +++ app/media/Installation_base/Jenkins/ubuntu.sh | 12 ++++ app/media/MyBash/bash.sh | 24 ++++---- app/media/MyCompose/docker-compose.yaml | 31 +++++++++++ app/models/__init__.py | 4 +- app/models/gitlab_models.py | 17 ++++++ app/models/jenkins.py | 24 ++++++++ app/routes/docker.py | 1 + app/routes/gitlab.py | 14 +++++ app/routes/jenkins.py | 14 +++++ .../gitlab/installation.py | 35 ++++++++++++ .../jenkins/installation.py | 55 +++++++++++++++++++ .../terraform/Installation/main.py | 8 +-- 17 files changed, 287 insertions(+), 20 deletions(-) create mode 100644 app/media/Installation_base/Gitlab/docker-compose.yaml create mode 100644 app/media/Installation_base/Jenkins/RHEL.sh create mode 100644 app/media/Installation_base/Jenkins/docker-compose.yml create mode 100644 app/media/Installation_base/Jenkins/fedora.sh create mode 100644 app/media/Installation_base/Jenkins/ubuntu.sh create mode 100644 app/models/gitlab_models.py create mode 100644 app/models/jenkins.py create mode 100644 app/routes/gitlab.py create mode 100644 app/routes/jenkins.py create mode 100644 app/template_generators/gitlab/installation.py create mode 100644 app/template_generators/jenkins/installation.py diff --git a/app/main.py b/app/main.py index ca950b8b..1980475c 100644 --- a/app/main.py +++ b/app/main.py @@ -3,4 +3,6 @@ from app.routes.helm import * from app.routes.ansible import * from app.routes.jcasc import * -from app.routes.docker import * \ No newline at end of file +from app.routes.docker import * +from app.routes.jenkins import * +from app.routes.gitlab import * \ No newline at end of file diff --git a/app/media/Installation_base/Gitlab/docker-compose.yaml b/app/media/Installation_base/Gitlab/docker-compose.yaml new file mode 100644 index 00000000..fe15d044 --- /dev/null +++ b/app/media/Installation_base/Gitlab/docker-compose.yaml @@ -0,0 +1,31 @@ +# sudo mkdir -p /srv/gitlab +# export GITLAB_HOME=/srv/gitlab + +version: '3.6' +services: + gitlab: + image: gitlab/gitlab-ee:-ee.0 + container_name: gitlab + restart: always + hostname: 'gitlab.example.com' + environment: + GITLAB_OMNIBUS_CONFIG: | + # Add any other gitlab.rb configuration here, each on its own line + external_url 'https://gitlab.example.com' + + # you can also use custom HTTP and SSH port. if you you want to do that, follow the below syntax + + # external_url 'http://gitlab.example.com:8929' + # gitlab_rails['gitlab_shell_ssh_port'] = 2424 + + ports: + # - '8929:8929' # Custom HTTP Port + # - '2424:22' # Custom SSH Port + - '80:80' + - '443:443' + - '22:22' + volumes: + - '$GITLAB_HOME/config:/etc/gitlab' + - '$GITLAB_HOME/logs:/var/log/gitlab' + - '$GITLAB_HOME/data:/var/opt/gitlab' + shm_size: '256m' \ No newline at end of file diff --git a/app/media/Installation_base/Jenkins/RHEL.sh b/app/media/Installation_base/Jenkins/RHEL.sh new file mode 100644 index 00000000..5ee7896f --- /dev/null +++ b/app/media/Installation_base/Jenkins/RHEL.sh @@ -0,0 +1,9 @@ +sudo wget -O /etc/yum.repos.d/jenkins.repo \ + https://pkg.jenkins.io/redhat-stable/jenkins.repo +sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io-2023.key +sudo yum upgrade -y +# Add required dependencies for the jenkins package +sudo yum install -y fontconfig java-17-openjdk +sudo yum install -y jenkins +sudo systemctl daemon-reload +sudo systemctl enable --now jenkins \ No newline at end of file diff --git a/app/media/Installation_base/Jenkins/docker-compose.yml b/app/media/Installation_base/Jenkins/docker-compose.yml new file mode 100644 index 00000000..efc50a8b --- /dev/null +++ b/app/media/Installation_base/Jenkins/docker-compose.yml @@ -0,0 +1,15 @@ +version: '3.8' +services: + jenkins: + image: jenkins/jenkins:lts + privileged: true + user: root + ports: + - 8080:8080 + - 50000:50000 + container_name: jenkins + volumes: + - /home/${myname}/jenkins_compose/jenkins_configuration:/var/jenkins_home + - /var/run/docker.sock:/var/run/docker.sock + +# Replace "/home/${myname}/jenkins_compose/jenkins_configuration" with the path you want to use to store your jenkins data \ No newline at end of file diff --git a/app/media/Installation_base/Jenkins/fedora.sh b/app/media/Installation_base/Jenkins/fedora.sh new file mode 100644 index 00000000..0f2a543e --- /dev/null +++ b/app/media/Installation_base/Jenkins/fedora.sh @@ -0,0 +1,9 @@ +sudo wget -O /etc/yum.repos.d/jenkins.repo \ + https://pkg.jenkins.io/redhat-stable/jenkins.repo +sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io-2023.key +sudo dnf upgrade -y +# Add required dependencies for the jenkins package +sudo dnf install -y fontconfig java-17-openjdk +sudo dnf install -y jenkins +sudo systemctl daemon-reload +sudo systemctl enable --now jenkins \ No newline at end of file diff --git a/app/media/Installation_base/Jenkins/ubuntu.sh b/app/media/Installation_base/Jenkins/ubuntu.sh new file mode 100644 index 00000000..5ff50a32 --- /dev/null +++ b/app/media/Installation_base/Jenkins/ubuntu.sh @@ -0,0 +1,12 @@ +sudo apt update -y +sudo apt install -y fontconfig openjdk-17-jre + + +sudo wget -O /usr/share/keyrings/jenkins-keyring.asc \ + https://pkg.jenkins.io/debian-stable/jenkins.io-2023.key +echo "deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc]" \ + https://pkg.jenkins.io/debian-stable binary/ | sudo tee \ + /etc/apt/sources.list.d/jenkins.list > /dev/null +sudo apt-get update -y +sudo apt-get install -y jenkins +sudo systemctl enable --now jenkins \ No newline at end of file diff --git a/app/media/MyBash/bash.sh b/app/media/MyBash/bash.sh index b223db9f..5ff50a32 100644 --- a/app/media/MyBash/bash.sh +++ b/app/media/MyBash/bash.sh @@ -1,16 +1,12 @@ -#!/bin/bash -sudo apt-get update -y -sudo apt-get install ca-certificates curl -y -sudo install -m 0755 -d /etc/apt/keyrings -sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc -sudo chmod a+r /etc/apt/keyrings/docker.asc - -# Add the repository to Apt sources: -echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ - $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt-get update -y +sudo apt update -y +sudo apt install -y fontconfig openjdk-17-jre -sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin \ No newline at end of file +sudo wget -O /usr/share/keyrings/jenkins-keyring.asc \ + https://pkg.jenkins.io/debian-stable/jenkins.io-2023.key +echo "deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc]" \ + https://pkg.jenkins.io/debian-stable binary/ | sudo tee \ + /etc/apt/sources.list.d/jenkins.list > /dev/null +sudo apt-get update -y +sudo apt-get install -y jenkins +sudo systemctl enable --now jenkins \ No newline at end of file diff --git a/app/media/MyCompose/docker-compose.yaml b/app/media/MyCompose/docker-compose.yaml index e69de29b..fe15d044 100644 --- a/app/media/MyCompose/docker-compose.yaml +++ b/app/media/MyCompose/docker-compose.yaml @@ -0,0 +1,31 @@ +# sudo mkdir -p /srv/gitlab +# export GITLAB_HOME=/srv/gitlab + +version: '3.6' +services: + gitlab: + image: gitlab/gitlab-ee:-ee.0 + container_name: gitlab + restart: always + hostname: 'gitlab.example.com' + environment: + GITLAB_OMNIBUS_CONFIG: | + # Add any other gitlab.rb configuration here, each on its own line + external_url 'https://gitlab.example.com' + + # you can also use custom HTTP and SSH port. if you you want to do that, follow the below syntax + + # external_url 'http://gitlab.example.com:8929' + # gitlab_rails['gitlab_shell_ssh_port'] = 2424 + + ports: + # - '8929:8929' # Custom HTTP Port + # - '2424:22' # Custom SSH Port + - '80:80' + - '443:443' + - '22:22' + volumes: + - '$GITLAB_HOME/config:/etc/gitlab' + - '$GITLAB_HOME/logs:/var/log/gitlab' + - '$GITLAB_HOME/data:/var/opt/gitlab' + shm_size: '256m' \ No newline at end of file diff --git a/app/models/__init__.py b/app/models/__init__.py index 9f97ab17..a529a87f 100644 --- a/app/models/__init__.py +++ b/app/models/__init__.py @@ -4,4 +4,6 @@ from .ansible_models import * from .jcasc import * from .compose_models import * -from .docker_installation_models import * \ No newline at end of file +from .docker_installation_models import * +from .jenkins import * +from .gitlab_models import * \ No newline at end of file diff --git a/app/models/gitlab_models.py b/app/models/gitlab_models.py new file mode 100644 index 00000000..44bbe939 --- /dev/null +++ b/app/models/gitlab_models.py @@ -0,0 +1,17 @@ +from typing import List, Optional +from pydantic import BaseModel, validator, ValidationError + + + +class GitLabInstallation(BaseModel): + + + environment:str = 'Docker' + + + @validator("environment") + def validator_environment(cls, value): + env = ['Docker'] + if value not in env: + raise ValueError(f"your selected Environemnt must be in {env}") + return value \ No newline at end of file diff --git a/app/models/jenkins.py b/app/models/jenkins.py new file mode 100644 index 00000000..e84ca17e --- /dev/null +++ b/app/models/jenkins.py @@ -0,0 +1,24 @@ +from typing import List, Optional +from pydantic import BaseModel, validator, ValidationError + + + +class JenkinsInstallation(BaseModel): + + os: str = 'Ubuntu' + + environment:str = 'Linux' + + @validator("os") + def validator_os(cls, value): + valid_oss = ['Ubuntu','Fedora','RHEL'] + if value not in valid_oss: + raise ValueError(f"your selected OS must be in {valid_oss}") + return value + + @validator("environment") + def validator_environment(cls, value): + env = ['Linux','Docker'] + if value not in env: + raise ValueError(f"your selected Environemnt must be in {env}") + return value \ No newline at end of file diff --git a/app/routes/docker.py b/app/routes/docker.py index 04b831aa..913a6ded 100644 --- a/app/routes/docker.py +++ b/app/routes/docker.py @@ -13,6 +13,7 @@ async def docker_compose_template(request:DockerCompose) -> Output: return Output(output='output') + @app.post("/api/docker/installation") async def docker_installation(request:DockerInstallationInput) -> Output: diff --git a/app/routes/gitlab.py b/app/routes/gitlab.py new file mode 100644 index 00000000..ec8338bf --- /dev/null +++ b/app/routes/gitlab.py @@ -0,0 +1,14 @@ +from app.app_instance import app +from app.models import (GitLabInstallation,Output) +from app.template_generators.gitlab.installation import select_install_gitlab +import os + + + + +@app.post("/api/gitlab/installation") +async def gitlab_installation(request:GitLabInstallation) -> Output: + + select_install_gitlab(request) + + return Output(output='output') \ No newline at end of file diff --git a/app/routes/jenkins.py b/app/routes/jenkins.py new file mode 100644 index 00000000..5f1aa788 --- /dev/null +++ b/app/routes/jenkins.py @@ -0,0 +1,14 @@ +from app.app_instance import app +from app.models import (DockerCompose,JenkinsInstallation,Output) +from app.template_generators.jenkins.installation import select_install_jenkins +import os + + + + +@app.post("/api/jenkins/installation") +async def jenkins_installation(request:JenkinsInstallation) -> Output: + + select_install_jenkins(request) + + return Output(output='output') \ No newline at end of file diff --git a/app/template_generators/gitlab/installation.py b/app/template_generators/gitlab/installation.py new file mode 100644 index 00000000..a50c333e --- /dev/null +++ b/app/template_generators/gitlab/installation.py @@ -0,0 +1,35 @@ +import os +import shutil + + +def create_directory(folder:str,filename:str): + + dir = f"app/media/{folder}" + + + if not os.path.exists(dir): + os.makedirs(dir) + os.path.join(dir, filename) + + +def select_install_gitlab(input): + + create_directory("MyCompose","docker-compose.yaml") + + + match input.environment: + + case "Docker": + source = 'app/media/Installation_base/Gitlab/docker-compose.yaml' + dest = 'app/media/MyCompose/docker-compose.yaml' + shutil.copyfile(source, dest) + + + case _: + raise ValueError() + + + + + + \ No newline at end of file diff --git a/app/template_generators/jenkins/installation.py b/app/template_generators/jenkins/installation.py new file mode 100644 index 00000000..4177124e --- /dev/null +++ b/app/template_generators/jenkins/installation.py @@ -0,0 +1,55 @@ +import os +import shutil + + +def create_directory(folder:str,filename:str): + + dir = f"app/media/{folder}" + + + if not os.path.exists(dir): + os.makedirs(dir) + os.path.join(dir, filename) + + + +def select_install_jenkins(input): + + create_directory("MyBash",'bash.sh') + create_directory("MyCompose",'docker-compose.yaml') + + if input.environment == 'Docker': + + source = 'app/media/Installation_base/Jenkins/docker-compose.yml' + dest = 'app/media/MyCompose/docker-compose.yaml' + shutil.copyfile(source, dest) + + else: + + match input.os: + + + case "Ubuntu": + source = 'app/media/Installation_base/Jenkins/ubuntu.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) + + case "Fedora": + source = 'app/media/Installation_base/Jenkins/fedora.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) + + + case "RHEL": + source = 'app/media/Installation_base/Jenkins/RHEL.sh' + dest = 'app/media/MyBash/bash.sh' + shutil.copyfile(source, dest) + + case _: + raise ValueError() + + + + + + \ No newline at end of file diff --git a/app/template_generators/terraform/Installation/main.py b/app/template_generators/terraform/Installation/main.py index ffdb14e1..c4c479e4 100644 --- a/app/template_generators/terraform/Installation/main.py +++ b/app/template_generators/terraform/Installation/main.py @@ -2,19 +2,19 @@ import shutil -def create_MyBash_directory(): +def create_directory(folder:str,filename:str): - dir = 'app/media/MyBash' + dir = f"app/media/{folder}" if not os.path.exists(dir): os.makedirs(dir) - os.path.join(dir, 'bash.sh') + os.path.join(dir, filename) def select_install(input): - create_MyBash_directory() + create_directory("MyBash","bash.sh") match input.os: From fddb77c6f1aada3c7e6cee0b9a98e6d178973783 Mon Sep 17 00:00:00 2001 From: Abolfazl Andalib <79583121+abolfazl8131@users.noreply.github.com> Date: Mon, 9 Dec 2024 13:55:26 +0330 Subject: [PATCH 16/25] Update unit-test.yml --- .github/workflows/unit-test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 4145ba88..529e1fdb 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -13,6 +13,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: + submodules: true ref: ${{ github.event.pull_request.head.ref }} repository: ${{ github.event.pull_request.head.repo.full_name }} From 55deb590c58da7e2f27cfc9bafb9c983840a1aaf Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Tue, 10 Dec 2024 14:14:19 +0330 Subject: [PATCH 17/25] fix(install): fix jenkins and gitlab --- app/media/Installation_base/Gitlab/docker-compose.yaml | 2 +- app/media/Installation_base/Jenkins/RHEL.sh | 1 + app/media/Installation_base/Jenkins/fedora.sh | 1 + app/media/Installation_base/Jenkins/ubuntu.sh | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/app/media/Installation_base/Gitlab/docker-compose.yaml b/app/media/Installation_base/Gitlab/docker-compose.yaml index fe15d044..2c3e366a 100644 --- a/app/media/Installation_base/Gitlab/docker-compose.yaml +++ b/app/media/Installation_base/Gitlab/docker-compose.yaml @@ -4,7 +4,7 @@ version: '3.6' services: gitlab: - image: gitlab/gitlab-ee:-ee.0 + image: gitlab/gitlab-ce:-ce.0 container_name: gitlab restart: always hostname: 'gitlab.example.com' diff --git a/app/media/Installation_base/Jenkins/RHEL.sh b/app/media/Installation_base/Jenkins/RHEL.sh index 5ee7896f..5c324160 100644 --- a/app/media/Installation_base/Jenkins/RHEL.sh +++ b/app/media/Installation_base/Jenkins/RHEL.sh @@ -1,3 +1,4 @@ +#!/bin/bash sudo wget -O /etc/yum.repos.d/jenkins.repo \ https://pkg.jenkins.io/redhat-stable/jenkins.repo sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io-2023.key diff --git a/app/media/Installation_base/Jenkins/fedora.sh b/app/media/Installation_base/Jenkins/fedora.sh index 0f2a543e..aeaa6d7b 100644 --- a/app/media/Installation_base/Jenkins/fedora.sh +++ b/app/media/Installation_base/Jenkins/fedora.sh @@ -1,3 +1,4 @@ +#!/bin/bash sudo wget -O /etc/yum.repos.d/jenkins.repo \ https://pkg.jenkins.io/redhat-stable/jenkins.repo sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io-2023.key diff --git a/app/media/Installation_base/Jenkins/ubuntu.sh b/app/media/Installation_base/Jenkins/ubuntu.sh index 5ff50a32..fcb3d783 100644 --- a/app/media/Installation_base/Jenkins/ubuntu.sh +++ b/app/media/Installation_base/Jenkins/ubuntu.sh @@ -1,3 +1,4 @@ +#!/bin/bash sudo apt update -y sudo apt install -y fontconfig openjdk-17-jre From 81895d21d2ddc93974de9b27cba0b9730209bea0 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Wed, 11 Dec 2024 17:06:55 +0330 Subject: [PATCH 18/25] fix(crawler): fix the crowler to crawl 2 aws urls --- admin-panel | 2 +- crawl/content_parser.py | 105 ------------ ...Amazon EC2 instance types - Amazon EC2.txt | 79 +++++++++ ...on EC2? - Amazon Elastic Compute Cloud.txt | 151 ++++++++++++++++++ crawl/main.py | 131 +++++---------- crawl/readme.md | 39 ----- crawl/urls.csv | 1 - 7 files changed, 272 insertions(+), 236 deletions(-) delete mode 100644 crawl/content_parser.py create mode 100644 crawl/crawled_data/Amazon EC2 instance types - Amazon EC2.txt create mode 100644 crawl/crawled_data/What is Amazon EC2? - Amazon Elastic Compute Cloud.txt delete mode 100644 crawl/readme.md delete mode 100644 crawl/urls.csv diff --git a/admin-panel b/admin-panel index 5b9c0c12..bfa06012 160000 --- a/admin-panel +++ b/admin-panel @@ -1 +1 @@ -Subproject commit 5b9c0c123018e42b185681bb955c7a8b48b6b7f8 +Subproject commit bfa06012cc943bdb1a59fde5fe235be06840005d diff --git a/crawl/content_parser.py b/crawl/content_parser.py deleted file mode 100644 index 9e03e97c..00000000 --- a/crawl/content_parser.py +++ /dev/null @@ -1,105 +0,0 @@ -import requests -from bs4 import BeautifulSoup -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.util.retry import Retry - -class WebContentParser: - def __init__(self, url): - self.url = url - self.headers = { - 'User-Agent': ( - 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) ' - 'AppleWebKit/537.36 (KHTML, like Gecko) ' - 'Chrome/50.0.2661.102 Safari/537.36' - ) - } - self.session = self._initialize_session() - self.main_response = None - self.all_page_data = [] - - def _initialize_session(self): - """Set up the session with retry strategy.""" - retry_strategy = Retry( - total=5, - backoff_factor=8, - ) - adapter = HTTPAdapter(max_retries=retry_strategy) - adapter.max_retries.respect_retry_after_header = False - - session = requests.Session() - session.mount("https://", adapter) - session.mount("http://", adapter) - return session - - def fetch_content(self): - """Fetch the main content from the URL.""" - try: - self.main_response = self.session.get( - self.url, verify=False, timeout=30, headers=self.headers - ) - print(f'URL fetched: {self.url}') - return self.main_response - except requests.RequestException as e: - print(f"Failed to fetch the URL: {e}") - return None - - def parse_content(self): - """Parse the fetched HTML content.""" - if not self.main_response: - print("No response available to parse.") - return [] - - main_soup = BeautifulSoup(self.main_response.content, 'html.parser') - datas = main_soup.find('main', {'id': 'main'}) - if not datas: - print("No 'main' element found.") - return [] - - all_tag = datas.find_all(['h1', 'h2', 'h3', 'p', 'blockquote', 'ul']) - each_title_data = {} - - for tag in all_tag: - if tag.name in ['h1', 'h2']: - if each_title_data: - self.all_page_data.append(each_title_data) - each_title_data = {} - each_title_data['metadata'] = tag.text.strip() - - elif tag.name == 'h3': - if tag.text.strip() == 'Resources': - each_title_data[tag.text.strip()] = '' - else: - if each_title_data: - self.all_page_data.append(each_title_data) - each_title_data = {} - each_title_data['metadata'] = tag.text.strip() - - elif tag.name in ['p', 'blockquote']: - num = len(each_title_data) - key = f'content {num}' - if tag.text.strip(): - each_title_data[key] = tag.text.strip() - - elif tag.name == 'ul': - text = ' '.join( - li.text.strip() - for li in tag.find_all('li', {'class': 'mdx-lists_listItem__nkqhg'}) - ) - if 'Resources' in each_title_data: - each_title_data['Resources'] = text - else: - num = len(each_title_data) - key = f'content {num}' - if text: - each_title_data[key] = text - - if each_title_data: - self.all_page_data.append(each_title_data) - - return self.all_page_data - - def get_data(self): - """Main method to fetch and parse content.""" - self.fetch_content() - return self.parse_content() - diff --git a/crawl/crawled_data/Amazon EC2 instance types - Amazon EC2.txt b/crawl/crawled_data/Amazon EC2 instance types - Amazon EC2.txt new file mode 100644 index 00000000..861c4cda --- /dev/null +++ b/crawl/crawled_data/Amazon EC2 instance types - Amazon EC2.txt @@ -0,0 +1,79 @@ +Title: Amazon EC2 instance types - Amazon EC2 + +When you launch an EC2 instance, the instance type that you specify + determines the hardware of the host computer used for your instance. Each instance type + offers different compute, memory, and storage capabilities, and is grouped in an instance + family based on these capabilities. Select an instance type based on the requirements of the + application or software that you plan to run on your instance. +Amazon EC2 dedicates some resources of the host computer, such as CPU, memory, and instance + storage, to a particular instance. Amazon EC2 shares other resources of the host computer, such + as the network and the disk subsystem, among instances. If each instance on a host computer + tries to use as much of one of these shared resources as possible, each receives an equal + share of that resource. However, when a resource is underused, an instance can consume a + higher share of that resource while it's available. +Each instance type provides higher or lower minimum performance from a shared resource. + For example, instance types with high I/O performance have a larger allocation of shared resources. + Allocating a larger share of shared resources also reduces the variance of I/O performance. + For most applications, moderate I/O performance is more than enough. However, for + applications that require greater or more consistent I/O performance, consider + an instance type with higher I/O performance. +Current generation instances +Previous generation instances +Amazon EC2 instance type naming conventions +Amazon EC2 instance type specifications +Instances built on the AWS Nitro System +Amazon EC2 instance type quotas +For the best performance, we recommend that you use the following instance types + when you launch new instances. For more information, see Amazon EC2 Instance Types. +General purpose: M5 | M5a | M5ad | M5d | M5dn | M5n | M5zn | M6a | M6g | M6gd | M6i | M6id | M6idn | M6in | M7a | M7g | M7gd | M7i | M7i-flex | M8g | Mac1 | Mac2 | Mac2-m1ultra | Mac2-m2 | Mac2-m2pro | T2 | T3 | T3a | T4g +Compute optimized: C5 | C5a | C5ad | C5d | C5n | C6a | C6g | C6gd | C6gn | C6i | C6id | C6in | C7a | C7g | C7gd | C7gn | C7i | C7i-flex | C8g +Memory optimized: R5 | R5a | R5ad | R5b | R5d | R5dn | R5n | R6a | R6g | R6gd | R6i | R6idn | R6in | R6id | R7a | R7g | R7gd | R7i | R7iz | R8g | U-3tb1 | U-6tb1 | U-9tb1 | U-12tb1 | U-18tb1 | U-24tb1 | U7i-6tb | U7i-8tb | U7i-12tb | U7in-16tb | U7in-24tb | U7in-32tb | X1 | X1e | X2gd | X2idn | X2iedn | X2iezn | X8g | z1d +Storage optimized: D2 | D3 | D3en | H1 | I3 | I3en | I4g | I4i | I7ie | I8g | Im4gn | Is4gen +Accelerated computing: DL1 | DL2q | F1 | G4ad | G4dn | G5 | G5g | G6 | G6e | Gr6 | Inf1 | Inf2 | P2 | P3 | P3dn | P4d | P4de | P5 | P5e | P5en | Trn1 | Trn1n | Trn2 | Trn2u | VT1 +High-performance computing: Hpc6a | Hpc6id | Hpc7a | Hpc7g +Amazon Web Services offers previous generation instance types for users who have optimized their + applications around them and have yet to upgrade. We encourage you to use current generation + instance types to get the best performance, but we continue to support the following previous + generation instance types. For more information about which current + generation instance type would be a suitable upgrade, see + Previous Generation Instances. +General purpose: A1 | M1 | M2 | M3 | M4 | T1 +Compute optimized: C1 | C3 | C4 +Memory optimized: R3 | R4 +Storage optimized: I2 +Accelerated computing: G3 +Fixed performance instances provide fixed CPU resources. These instances can + deliver and sustain full CPU performance at any time, and for as long as a workload + needs it. If you need consistently high CPU performance for applications such as + video encoding, high volume websites, or HPC applications, we recommend that you use + fixed performance instances. +Burstable performance (T) instances provide a baseline level of CPU + performance with the ability to burst above the baseline. The baseline CPU is + designed to meet the needs of the majority of general purpose workloads, such as + large-scale micro-services, web servers, small and medium databases, data logging, + code repositories, virtual desktops, and development and test environments. +The baseline utilization and ability to burst are governed by CPU credits. Each + burstable performance instance continuously earns credits when it stays below the CPU + baseline, and continuously spends credits when it bursts above the baseline. For more + information, see Burstable + performance instances in the Amazon EC2 User Guide. +M7i-flex and C7i-flex instances offer a balance of compute, memory, and network + resources, and they provide the most cost-effective way to run a broad spectrum of + general purpose applications. These instances provide reliable CPU resources to + deliver a baseline CPU performance of 40 percent, which is designed to meet the + compute requirements for a majority of general purpose workloads. When more + performance is needed, these instances provide the ability to exceed the baseline + CPU performance and deliver up to 100 percent CPU performance for 95 percent of the + time over a 24-hour window. +M7i-flex and C7i-flex instances running at a high CPU utilization that is consistently + above the baseline for long periods of time might see a gradual reduction in the maximum + burst CPU throughput. For more information, see M7i-flex instances and C7i-flex instances. +For pricing information, see Amazon EC2 Pricing. + Javascript is disabled or is unavailable in your browser. +To use the Amazon Web Services Documentation, Javascript must be enabled. Please refer to your browser's Help pages for instructions. +Thanks for letting us know we're doing a good job! +If you've got a moment, please tell us what we did right so we can do more of it. + +Thanks for letting us know this page needs work. We're sorry we let you down. +If you've got a moment, please tell us how we can make the documentation better. + diff --git a/crawl/crawled_data/What is Amazon EC2? - Amazon Elastic Compute Cloud.txt b/crawl/crawled_data/What is Amazon EC2? - Amazon Elastic Compute Cloud.txt new file mode 100644 index 00000000..d0e78fd3 --- /dev/null +++ b/crawl/crawled_data/What is Amazon EC2? - Amazon Elastic Compute Cloud.txt @@ -0,0 +1,151 @@ +Title: What is Amazon EC2? - Amazon Elastic Compute Cloud + +Amazon Elastic Compute Cloud (Amazon EC2) provides on-demand, scalable computing capacity in the Amazon Web + Services (AWS) Cloud. Using Amazon EC2 reduces hardware costs so you can develop and deploy + applications faster. You can use Amazon EC2 to launch as many or as few virtual servers as you + need, configure security and networking, and manage storage. You can add capacity (scale up) + to handle compute-heavy tasks, such as monthly or yearly processes, or spikes in website + traffic. When usage decreases, you can reduce capacity (scale down) again. +An EC2 instance is a virtual server in the AWS Cloud. When you launch an EC2 instance, + the instance type that you specify determines the hardware available to your instance. + Each instance type offers a different balance of compute, memory, network, and storage + resources. For more information, see the Amazon EC2 Instance Types Guide. +Amazon EC2 provides the following high-level features: +Virtual servers. +Preconfigured templates for your instances that package the components you + need for your server (including the operating system and additional + software). +Various configurations of CPU, memory, storage, networking capacity, and + graphics hardware for your instances. +Persistent storage volumes for your data using Amazon Elastic Block Store (Amazon EBS). +Storage volumes for temporary data that is deleted when you stop, + hibernate, or terminate your instance. +Secure login information for your instances. AWS stores the public key + and you store the private key in a secure place. +A virtual firewall that allows you to specify the protocols, ports, and + source IP ranges that can reach your instances, and the destination IP + ranges to which your instances can connect. +Amazon EC2 supports the processing, storage, and transmission +of credit card data by a merchant or service provider, and has been +validated as being compliant with Payment Card Industry (PCI) Data Security Standard (DSS). +For more information about PCI DSS, including how to request a copy of the AWS PCI Compliance Package, +see PCI DSS Level 1. + +You can use other AWS services with the instances that you deploy using Amazon EC2. +Helps ensure you have the correct number of Amazon EC2 instances available to + handle the load for your application. +Automate backing up your Amazon EC2 instances and the Amazon EBS volumes attached to + them. +Monitor your instances and Amazon EBS volumes. +Automatically distribute incoming application traffic across multiple + instances. +Detect potentially unauthorized or malicious use of your EC2 instances. +Automate the creation, management, and deployment of customized, secure, and + up-to-date server images. +Size, configure, and deploy AWS resources for third-party applications + without having to manually identify and provision individual AWS + resources. +Perform operations at scale on EC2 instances with this secure end-to-end + management solution. +You can launch instances using another AWS compute service instead of using Amazon EC2. +Build websites or web applications using Amazon Lightsail, a cloud platform + that provides the resources that you need to deploy your project quickly, for + a low, predictable monthly price. To compare Amazon EC2 and Lightsail, see + Amazon Lightsail or Amazon EC2. +Deploy, manage, and scale containerized applications on a cluster of EC2 + instances. For more information, see Choosing an AWS container service. +Run your Kubernetes applications on AWS. For more information, see + Choosing an AWS container service. +You can create and manage your Amazon EC2 instances using the following interfaces: +A simple web interface to create and manage Amazon EC2 instances and resources. + If you've signed up for an AWS account, you can access the Amazon EC2 console + by signing into the AWS Management Console and selecting EC2 from + the console home page. +Enables you to interact with AWS services using commands in your command-line shell. It + is supported on Windows, Mac, and Linux. For more information about the + AWS CLI , see AWS Command Line Interface User Guide. You can find the Amazon EC2 commands in the AWS CLI Command Reference. +Amazon EC2 supports creating resources using AWS CloudFormation. You create a template, in JSON or YAML + format, that describes your AWS resources, and AWS CloudFormation provisions and + configures those resources for you. You can reuse your CloudFormation + templates to provision the same resources multiple times, whether in the + same Region and account or in multiple Regions and accounts. For more + information about supported resource types and properties for Amazon EC2, see + EC2 resource type + reference in the AWS CloudFormation User Guide. +If you prefer to build applications using language-specific APIs instead + of submitting a request over HTTP or HTTPS, AWS provides libraries, sample + code, tutorials, and other resources for software developers. These + libraries provide basic functions that automate tasks such as + cryptographically signing your requests, retrying requests, and handling + error responses, making it easier for you to get started. For more + information, see + Tools to Build + on AWS. +A set of PowerShell modules that are built on the functionality exposed by + the AWS SDK for .NET. The Tools for PowerShell enable you to script operations on your AWS + resources from the PowerShell command line. To get started, see the + AWS Tools for Windows PowerShell User Guide. You can find the cmdlets for Amazon EC2, in the AWS Tools for PowerShell Cmdlet Reference. +Amazon EC2 provides a Query API. These requests are HTTP or HTTPS requests that + use the HTTP verbs GET or POST and a Query parameter named + Action. For more information about the API actions for + Amazon EC2, see Actions in the + Amazon EC2 API Reference. +Amazon EC2 provides the following pricing options: +You can get started with Amazon EC2 for free. To explore the Free Tier options, + see AWS Free Tier. +Pay for the instances that you use by the second, with a minimum of 60 + seconds, with no long-term commitments or upfront payments. +You can reduce your Amazon EC2 costs by making a commitment to a consistent + amount of usage, in USD per hour, for a term of 1 or 3 years. +You can reduce your Amazon EC2 costs by making a commitment to a specific + instance configuration, including instance type and Region, for a term of 1 + or 3 years. +Request unused EC2 instances, which can reduce your Amazon EC2 costs + significantly. +Reduce costs by using a physical EC2 server that is fully dedicated for + your use, either On-Demand or as part of a Savings Plan. You can use your + existing server-bound software licenses and get help meeting compliance + requirements. +Reserve compute capacity for your EC2 instances in a specific Availability + Zone for any duration of time. +Removes the cost of unused minutes and seconds from your bill. +For a complete list of charges and prices for Amazon EC2 and more information about the purchase + models, see Amazon EC2 pricing. +To create estimates for your AWS use cases, use the AWS Pricing Calculator. +To estimate the cost of transforming Microsoft + workloads to a modern architecture that uses open source and + cloud-native services deployed on AWS, use the AWS + Modernization Calculator for Microsoft Workloads. +To see your bill, go to the Billing and Cost Management + Dashboard in the AWS Billing and Cost Management + console. Your bill contains links to usage reports that provide details + about your bill. To learn more about AWS account billing, see AWS Billing and Cost Management User + Guide. +If you have questions concerning AWS billing, accounts, and events, contact AWS Support. +To calculate the cost of a sample provisioned + environment, see Cloud Economics + Center. When calculating the cost of a provisioned + environment, remember to include incidental costs such as snapshot storage for EBS + volumes. +You can optimize the cost, security, and performance of your AWS environment + using AWS Trusted Advisor. +You can use AWS Cost Explorer to analyze the cost and usage of your EC2 instances. You can view + data up to the last 13 months, and forecast how much you are likely to spend for the next + 12 months. For more information, see + Analyzing your costs with + AWS Cost Explorer in the AWS Cost Management User Guide. +Amazon EC2 features +AWS re:Post +AWS Skill Builder +AWS Support +Hands-on Tutorials +Web Hosting +Windows on AWS + Javascript is disabled or is unavailable in your browser. +To use the Amazon Web Services Documentation, Javascript must be enabled. Please refer to your browser's Help pages for instructions. +Thanks for letting us know we're doing a good job! +If you've got a moment, please tell us what we did right so we can do more of it. + +Thanks for letting us know this page needs work. We're sorry we let you down. +If you've got a moment, please tell us how we can make the documentation better. + diff --git a/crawl/main.py b/crawl/main.py index f86e632e..3a4621e3 100644 --- a/crawl/main.py +++ b/crawl/main.py @@ -1,92 +1,43 @@ - -import argparse -import csv -import logging import requests from bs4 import BeautifulSoup -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.util.retry import Retry -from content_parser import WebContentParser - - -def setup_logging(): - logging.basicConfig( - level=logging.INFO, - format='%(asctime)s - %(levelname)s - %(message)s', - handlers=[logging.StreamHandler()] - ) - - -def setup_http_session(): - retry_strategy = Retry( - total=5, - backoff_factor=8, - ) - adapter = HTTPAdapter(max_retries=retry_strategy) - adapter.max_retries.respect_retry_after_header = False - session = requests.Session() - session.mount("https://", adapter) - session.mount("http://", adapter) - return session - - -def process_urls(file_path, save_result): - http = setup_http_session() - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'} - - with open(file_path, 'r') as file: - csv_reader = csv.reader(file) - for row in csv_reader: - if row: # Check if the row is not empty - main_url = row[0] - try: - main_response = http.get(main_url, verify=False, timeout=30, headers=headers) - logging.info(f'Fetched URL: {main_url}') - except requests.RequestException as e: - logging.error(f"Failed to fetch URL {main_url}: {e}") - continue - - main_soup = BeautifulSoup(main_response.content, 'html.parser') - products = main_soup.find('div', {'class': 'marketing-content_root__DE3hU'}).find_all('div', {'class': 'card-grid-block_root__yDdm_'}) - logging.info(f'Found {len(products)} products on page: {main_url}') - all_data = [] - for product in products: - # Get org title - title = product.find('h2').text - sub_content_link=[] - all_sub_title = product.find_all('li') - for res in all_sub_title: - sub_part_content = {} - sub_part_content['main_title'] = title - sub_title = res.find('span', {'class': 'card-title_text__F97Wj'}).get_text() - sub_part_content['sub_title'] = sub_title - sub_title_link = 'https://developer.hashicorp.com' + res.find('a').attrs['href'] - sub_part_content['sub_title_link'] = sub_title_link - - parser = WebContentParser(sub_title_link) - data = parser.get_data() - sub_part_content['all_data_info'] = data - - logging.info(f'Parsed content for sub-title: {sub_title}') - sub_content_link.append(sub_part_content) - all_data.append(sub_content_link) - if save_result: - # Logic to save sub_part_content goes here (e.g., writing to a file or database) - logging.info(f'Saving result for: {all_data}') - else: - print(all_data) - - -def main(): - setup_logging() - - parser = argparse.ArgumentParser(description='Process URLs from a CSV file.') - parser.add_argument('--csv_path', type=str, default='./urls.csv', help='Path to the CSV file containing URLs') - parser.add_argument('--save_result', type=bool, default=False, help='Flag to indicate if the results should be saved') - args = parser.parse_args() - - process_urls(args.csv_path, args.save_result) - - -if __name__ == '__main__': - main() +import os + +# List of URLs to crawl +urls = [ + "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts.html", + "https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-types.html#current-gen-instances" +] + +# Directory to save the files +save_dir = "crawled_data" +os.makedirs(save_dir, exist_ok=True) + +def fetch_and_save(url): + try: + response = requests.get(url) + response.raise_for_status() # Check if the request was successful + + # Parse the HTML content + soup = BeautifulSoup(response.text, 'html.parser') + + # For demonstration, we are fetching the page title and all paragraphs + title = soup.title.string if soup.title else "no_title" + paragraphs = soup.find_all('p') + + # Prepare the file name + file_name = os.path.join(save_dir, f"{title}.txt") + + # Write the content to the file + with open(file_name, 'w', encoding='utf-8') as file: + file.write(f"Title: {title}\n\n") + for para in paragraphs: + file.write(para.get_text() + "\n") + + print(f"Saved content from {url} to {file_name}") + + except requests.RequestException as e: + print(f"Failed to fetch {url}: {e}") + +# Fetch and save data from each URL +for url in urls: + fetch_and_save(url) diff --git a/crawl/readme.md b/crawl/readme.md deleted file mode 100644 index 93e44d57..00000000 --- a/crawl/readme.md +++ /dev/null @@ -1,39 +0,0 @@ -# Documentation for Web Content Scraper - -## Overview -This script is designed to scrape data from a list of URLs provided in a CSV file. It fetches the content, extracts specific product information, and logs the operations performed. Optionally, the extracted content can also be saved. The script utilizes various libraries such as `requests`, `BeautifulSoup`, and `argparse` to ensure efficient and robust operation. - -## Prerequisites -Make sure the following Python packages are installed: -- `requests` -- `beautifulsoup4` -- `urllib3` - -To install the dependencies, run the following command: -```sh -pip install requests beautifulsoup4 -``` -## How to Use -Arguments -The script accepts command-line arguments that allow customization of behavior: ---csv_path: The path to the CSV file containing URLs to scrape. The default value is ./urls.csv. ---save_result: A boolean flag indicating whether to save the scraped results. The default value is False. -## Running the Script -You can run the script by using the following command: - -```sh -Copy code -python main.py --csv_path --save_result -``` -For example: -```sh -Copy code -python main.py --csv_path ./urls.csv --save_result True -``` -## CSV File Format -The CSV file should contain a list of URLs, with each URL on a new line. Here is an example: -``` -https://example.com/page1 -https://example.com/page2 -``` - diff --git a/crawl/urls.csv b/crawl/urls.csv deleted file mode 100644 index 46e1afd1..00000000 --- a/crawl/urls.csv +++ /dev/null @@ -1 +0,0 @@ -https://developer.hashicorp.com/terraform/docs \ No newline at end of file From b328af17028cfd3f1c09da22a8fd46ea15d5876e Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 12 Dec 2024 12:14:51 +0330 Subject: [PATCH 19/25] fix(ansible_nginx): fix nginx file generartion --- app/directory_generators/ansible_generator.py | 864 ++---------------- app/media/MyAnsible/group_vars/all | 37 - app/media/MyAnsible/group_vars/nginx_nodes | 2 + app/media/MyAnsible/hosts | 16 +- app/media/MyAnsible/kubernetes_playbook.yml | 38 - app/media/MyAnsible/nginx_playbook.yml | 3 + .../roles/init_k8s/defaults/main.yml | 0 .../MyAnsible/roles/init_k8s/files/sample.sh | 0 .../roles/init_k8s/handlers/main.yml | 0 .../MyAnsible/roles/init_k8s/tasks/cni.yml | 20 - .../roles/init_k8s/tasks/initk8s.yml | 64 -- .../MyAnsible/roles/init_k8s/tasks/main.yml | 8 - .../init_k8s/templates/kubeadmcnf.yml.j2 | 13 - .../roles/install_nginx/tasks/main.yml | 31 + .../roles/install_nginx/vars/main.yml | 3 + .../roles/join_master/defaults/main.yml | 0 .../roles/join_master/files/join-command | 0 .../roles/join_master/handlers/main.yml | 0 .../roles/join_master/tasks/join_master.yml | 100 -- .../roles/join_master/tasks/main.yml | 5 - .../templates/kubeadmcnf-join.yml.j2 | 12 - .../roles/join_worker/defaults/main.yml | 0 .../roles/join_worker/files/join-command | 0 .../roles/join_worker/handlers/main.yml | 0 .../roles/join_worker/tasks/join_worker.yml | 38 - .../roles/join_worker/tasks/main.yml | 5 - .../MyAnsible/roles/k8s/defaults/main.yml | 0 app/media/MyAnsible/roles/k8s/files/sample.sh | 0 .../MyAnsible/roles/k8s/handlers/main.yml | 12 - app/media/MyAnsible/roles/k8s/tasks/k8s.yml | 195 ---- app/media/MyAnsible/roles/k8s/tasks/main.yml | 3 - .../roles/preinstall/defaults/main.yml | 0 .../roles/preinstall/files/sample.sh | 0 .../roles/preinstall/handlers/main.yml | 0 .../roles/preinstall/tasks/basic.yml | 82 -- .../MyAnsible/roles/preinstall/tasks/main.yml | 3 - .../roles/preinstall/templates/resolv.conf.j2 | 20 - app/routes/ansible.py | 4 +- .../ansible/install/nginx.py | 185 ++-- 39 files changed, 188 insertions(+), 1575 deletions(-) delete mode 100644 app/media/MyAnsible/group_vars/all create mode 100644 app/media/MyAnsible/group_vars/nginx_nodes delete mode 100644 app/media/MyAnsible/kubernetes_playbook.yml create mode 100644 app/media/MyAnsible/nginx_playbook.yml delete mode 100644 app/media/MyAnsible/roles/init_k8s/defaults/main.yml delete mode 100644 app/media/MyAnsible/roles/init_k8s/files/sample.sh delete mode 100644 app/media/MyAnsible/roles/init_k8s/handlers/main.yml delete mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/cni.yml delete mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml delete mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/main.yml delete mode 100644 app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 create mode 100644 app/media/MyAnsible/roles/install_nginx/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/install_nginx/vars/main.yml delete mode 100644 app/media/MyAnsible/roles/join_master/defaults/main.yml delete mode 100644 app/media/MyAnsible/roles/join_master/files/join-command delete mode 100644 app/media/MyAnsible/roles/join_master/handlers/main.yml delete mode 100644 app/media/MyAnsible/roles/join_master/tasks/join_master.yml delete mode 100644 app/media/MyAnsible/roles/join_master/tasks/main.yml delete mode 100644 app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 delete mode 100644 app/media/MyAnsible/roles/join_worker/defaults/main.yml delete mode 100644 app/media/MyAnsible/roles/join_worker/files/join-command delete mode 100644 app/media/MyAnsible/roles/join_worker/handlers/main.yml delete mode 100644 app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml delete mode 100644 app/media/MyAnsible/roles/join_worker/tasks/main.yml delete mode 100644 app/media/MyAnsible/roles/k8s/defaults/main.yml delete mode 100644 app/media/MyAnsible/roles/k8s/files/sample.sh delete mode 100644 app/media/MyAnsible/roles/k8s/handlers/main.yml delete mode 100644 app/media/MyAnsible/roles/k8s/tasks/k8s.yml delete mode 100644 app/media/MyAnsible/roles/k8s/tasks/main.yml delete mode 100644 app/media/MyAnsible/roles/preinstall/defaults/main.yml delete mode 100644 app/media/MyAnsible/roles/preinstall/files/sample.sh delete mode 100644 app/media/MyAnsible/roles/preinstall/handlers/main.yml delete mode 100644 app/media/MyAnsible/roles/preinstall/tasks/basic.yml delete mode 100644 app/media/MyAnsible/roles/preinstall/tasks/main.yml delete mode 100644 app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 diff --git a/app/directory_generators/ansible_generator.py b/app/directory_generators/ansible_generator.py index e3965501..1e5f7dd4 100644 --- a/app/directory_generators/ansible_generator.py +++ b/app/directory_generators/ansible_generator.py @@ -5,812 +5,80 @@ group_vars_dir = os.path.join(ansible_dir, "group_vars") host_vars_dir = os.path.join(ansible_dir, "host_vars") roles_dir = os.path.join(ansible_dir, "roles") +install_nginx_dir = os.path.join(roles_dir, "install_nginx") +tasks_dir = os.path.join(install_nginx_dir, "tasks") +vars_dir = os.path.join(install_nginx_dir, "vars") +defaults_dir = os.path.join(install_nginx_dir, "defaults") +files_dir = os.path.join(install_nginx_dir, "files") +handlers_dir = os.path.join(install_nginx_dir, "handlers") +templates_dir = os.path.join(install_nginx_dir, "templates") # Create project directories os.makedirs(group_vars_dir, exist_ok=True) os.makedirs(host_vars_dir, exist_ok=True) os.makedirs(roles_dir, exist_ok=True) - -preinstall_dir = os.path.join(roles_dir, "preinstall") -k8s_dir = os.path.join(roles_dir, "k8s") -init_k8s_dir = os.path.join(roles_dir, "init_k8s") -join_master_dir = os.path.join(roles_dir, "join_master") -join_worker_dir = os.path.join(roles_dir, "join_worker") - -os.makedirs(preinstall_dir, exist_ok=True) -os.makedirs(k8s_dir, exist_ok=True) -os.makedirs(init_k8s_dir, exist_ok=True) -os.makedirs(join_master_dir, exist_ok=True) -os.makedirs(join_worker_dir, exist_ok=True) +os.makedirs(install_nginx_dir, exist_ok=True) +os.makedirs(tasks_dir, exist_ok=True) +os.makedirs(vars_dir, exist_ok=True) +os.makedirs(defaults_dir, exist_ok=True) +os.makedirs(files_dir, exist_ok=True) +os.makedirs(handlers_dir, exist_ok=True) +os.makedirs(templates_dir, exist_ok=True) # Create ansible.cfg -with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg_file: - ansible_cfg_file.write("[defaults]\nhost_key_checking=false\n") - -# Create group_vars/all -with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: - group_vars_file.write("""# General -install_ansible_modules: "true" -disable_transparent_huge_pages: "true" - -setup_interface: "false" - -# Network Calico see here for more details https://github.com/projectcalico/calico/releases -calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" -calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" -pod_network_cidr: "192.168.0.0/16" - -# DNS -resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - -# Sanction shekan -use_iran: "true" # change it to "false" if you are outside of iran - -# Docker -docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" -docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" -docker_apt_repo: "https://download.docker.com/linux/ubuntu" - -# Kubernetes -kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" -kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" -kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" -k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases +with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") -# CRI -cri_socket: unix:///var/run/containerd/containerd.sock - -# Ansible Connection -ansible_user: root -ansible_port: 22 -ansible_python_interpreter: "/usr/bin/python3" -domain: "devopsgpt.com" -apiserver_url: "devopsgpt.com" -""") +# Create group_vars/nginx_nodes +with open(os.path.join(group_vars_dir, "nginx_nodes"), "w") as nginx_nodes: + nginx_nodes.write("ansible_port: 22\n") + nginx_nodes.write("ansible_user: root\n") # Create hosts with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: - hosts_file.write("""[all] -string private_ip=x.x.x.x -string private_ip=x.x.x.x - -[k8s] -string -string - -[k8s_masters] -string - -[k8s_workers] -string -""") - -# Create kubernetes_playbook.yml -with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: - playbook_file.write("""- hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - -- hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - -- hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - -- hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - -- hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] -""") - -# Create preinstall files -preinstall_defaults_dir = os.path.join(preinstall_dir, "defaults") -preinstall_files_dir = os.path.join(preinstall_dir, "files") -preinstall_handlers_dir = os.path.join(preinstall_dir, "handlers") -preinstall_tasks_dir = os.path.join(preinstall_dir, "tasks") -preinstall_templates_dir = os.path.join(preinstall_dir, "templates") -preinstall_vars_dir = os.path.join(preinstall_dir, "vars") - -os.makedirs(preinstall_defaults_dir, exist_ok=True) -os.makedirs(preinstall_files_dir, exist_ok=True) -os.makedirs(preinstall_handlers_dir, exist_ok=True) -os.makedirs(preinstall_tasks_dir, exist_ok=True) -os.makedirs(preinstall_templates_dir, exist_ok=True) -os.makedirs(preinstall_vars_dir, exist_ok=True) - -with open(os.path.join(preinstall_defaults_dir, "main.yml"), "w") as defaults_file: - defaults_file.write("") - -with open(os.path.join(preinstall_files_dir, "sample.sh"), "w") as files_file: - files_file.write("") - -with open(os.path.join(preinstall_handlers_dir, "main.yml"), "w") as handlers_file: - handlers_file.write("") - -with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: - basic_tasks_file.write("""- name: Set timezone to UTC - timezone: - name: Etc/UTC - -- name: Set hostname - command: hostnamectl set-hostname {{ inventory_hostname }} - -- name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - -- name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - -- name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\\.0\\.0\\.1' - line: "127.0.0.1 {{ inventory_hostname }} localhost" - owner: root - group: root - mode: 0644 - -- name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - -- name: Fix broken packages - apt: - state: fixed -""") - -with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: - tasks_main_file.write("""--- -- name: basic setup - include_tasks: basic.yml -""") - -# Create k8s files -k8s_defaults_dir = os.path.join(k8s_dir, "defaults") -k8s_files_dir = os.path.join(k8s_dir, "files") -k8s_handlers_dir = os.path.join(k8s_dir, "handlers") -k8s_tasks_dir = os.path.join(k8s_dir, "tasks") -k8s_templates_dir = os.path.join(k8s_dir, "templates") -k8s_vars_dir = os.path.join(k8s_dir, "vars") - -os.makedirs(k8s_defaults_dir, exist_ok=True) -os.makedirs(k8s_files_dir, exist_ok=True) -os.makedirs(k8s_handlers_dir, exist_ok=True) -os.makedirs(k8s_tasks_dir, exist_ok=True) -os.makedirs(k8s_templates_dir, exist_ok=True) -os.makedirs(k8s_vars_dir, exist_ok=True) - -with open(os.path.join(k8s_defaults_dir, "main.yml"), "w") as k8s_defaults_file: - k8s_defaults_file.write("") - -with open(os.path.join(k8s_files_dir, "sample.sh"), "w") as k8s_files_file: - k8s_files_file.write("") - -with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: - k8s_handlers_file.write("""--- -# handlers file for k8s - -- name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - -- name: Restart kubelet - service: - name: kubelet - state: restarted -""") - -with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: - k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - -- name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' - replace: '# \\1' - -- name: Check if ufw is installed - package_facts: - manager: "auto" - -- name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "'ufw' in ansible_facts.packages" - -- name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{{ item }}" - create: yes - state: present - loop: - - overlay - - br_netfilter - -- name: Load kernel modules - command: - cmd: "modprobe {{ item }}" - loop: - - overlay - - br_netfilter - -- name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {mark} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - -- name: Reload sysctl settings - command: - cmd: sysctl --system - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - state: present - update_cache: yes - -- name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - -- name: Remove existing Docker GPG key if it exists - file: - path: '{{ docker_gpg_key_path }}' - state: absent - -- name: Download Docker GPG key - shell: | - curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} - -- name: Determine the architecture - command: dpkg --print-architecture - register: architecture - -- name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - -- name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" - state: present - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - -- name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - -- name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - -- name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - -- name: Enable containerd service - systemd: - name: containerd - enabled: yes - -- name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{{ kubernetes_gpg_keyring_path }}' - state: absent - -- name: Download Kubernetes GPG key - shell: | - curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' - -- name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" - state: present - filename: kubernetes.list - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install Kubernetes packages - apt: - name: "{{ item }}" - state: present - loop: - - kubeadm=1.31.2-1.1 - - kubelet=1.31.2-1.1 - - kubectl=1.31.2-1.1 - -- name: Hold Kubernetes packages - dpkg_selections: - name: "{{ item }}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - -- name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} - create: yes - state: present - notify: Restart kubelet - -- name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" - state: present - create: no - loop: "{{ groups['all'] }}" - when: hostvars[item].private_ip is defined - -- name: Add apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: present - -- name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull -""") - -with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: - k8s_tasks_main_file.write("""--- -- name: Install kubernetes packages - include_tasks: k8s.yml -""") - -# Create init_k8s files -init_k8s_defaults_dir = os.path.join(init_k8s_dir, "defaults") -init_k8s_files_dir = os.path.join(init_k8s_dir, "files") -init_k8s_handlers_dir = os.path.join(init_k8s_dir, "handlers") -init_k8s_tasks_dir = os.path.join(init_k8s_dir, "tasks") -init_k8s_templates_dir = os.path.join(init_k8s_dir, "templates") -init_k8s_vars_dir = os.path.join(init_k8s_dir, "vars") - -os.makedirs(init_k8s_defaults_dir, exist_ok=True) -os.makedirs(init_k8s_files_dir, exist_ok=True) -os.makedirs(init_k8s_handlers_dir, exist_ok=True) -os.makedirs(init_k8s_tasks_dir, exist_ok=True) -os.makedirs(init_k8s_templates_dir, exist_ok=True) -os.makedirs(init_k8s_vars_dir, exist_ok=True) - -with open(os.path.join(init_k8s_defaults_dir, "main.yml"), "w") as init_k8s_defaults_file: - init_k8s_defaults_file.write("") - -with open(os.path.join(init_k8s_files_dir, "sample.sh"), "w") as init_k8s_files_file: - init_k8s_files_file.write("") - -with open(os.path.join(init_k8s_handlers_dir, "main.yml"), "w") as init_k8s_handlers_file: - init_k8s_handlers_file.write("") - -with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: - init_k8s_tasks_cni_file.write("""- block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 - delegate_to: "{{ groups['k8s_masters'][0] }}" - when: calico_crd_check.rc != 0 - run_once: true -""") - -with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: - init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - -- name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - -- name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." -""") - -with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: - init_k8s_tasks_main_file.write("""--- -# tasks file for init_k8s - -- name: Initialize kubernetes cluster - include_tasks: initk8s.yml - -- name: Initialize Calico CNI - include_tasks: cni.yml -""") - -# Create join_master files -join_master_defaults_dir = os.path.join(join_master_dir, "defaults") -join_master_files_dir = os.path.join(join_master_dir, "files") -join_master_handlers_dir = os.path.join(join_master_dir, "handlers") -join_master_tasks_dir = os.path.join(join_master_dir, "tasks") -join_master_templates_dir = os.path.join(join_master_dir, "templates") -join_master_vars_dir = os.path.join(join_master_dir, "vars") - -os.makedirs(join_master_defaults_dir, exist_ok=True) -os.makedirs(join_master_files_dir, exist_ok=True) -os.makedirs(join_master_handlers_dir, exist_ok=True) -os.makedirs(join_master_tasks_dir, exist_ok=True) -os.makedirs(join_master_templates_dir, exist_ok=True) -os.makedirs(join_master_vars_dir, exist_ok=True) - -with open(os.path.join(join_master_defaults_dir, "main.yml"), "w") as join_master_defaults_file: - join_master_defaults_file.write("") - -with open(os.path.join(join_master_files_dir, "join-command"), "w") as join_master_files_file: - join_master_files_file.write("") - -with open(os.path.join(join_master_handlers_dir, "main.yml"), "w") as join_master_handlers_file: - join_master_handlers_file.write("") - -with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: - join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - run_once: false - delegate_facts: true - -- name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: absent - -- name: Add apiserver_url to point to the masters - lineinfile: - dest: /etc/hosts - line: "{{ private_ip }} {{ apiserver_url }}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] -""") - -with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: - join_master_tasks_main_file.write("""--- -# tasks file for join_master - -- name: Join master(s) node to cluster - include_tasks: join_master.yml -""") - -# Create join_worker files -join_worker_defaults_dir = os.path.join(join_worker_dir, "defaults") -join_worker_files_dir = os.path.join(join_worker_dir, "files") -join_worker_handlers_dir = os.path.join(join_worker_dir, "handlers") -join_worker_tasks_dir = os.path.join(join_worker_dir, "tasks") -join_worker_templates_dir = os.path.join(join_worker_dir, "templates") -join_worker_vars_dir = os.path.join(join_worker_dir, "vars") - -os.makedirs(join_worker_defaults_dir, exist_ok=True) -os.makedirs(join_worker_files_dir, exist_ok=True) -os.makedirs(join_worker_handlers_dir, exist_ok=True) -os.makedirs(join_worker_tasks_dir, exist_ok=True) -os.makedirs(join_worker_templates_dir, exist_ok=True) -os.makedirs(join_worker_vars_dir, exist_ok=True) - -with open(os.path.join(join_worker_defaults_dir, "main.yml"), "w") as join_worker_defaults_file: - join_worker_defaults_file.write("") - -with open(os.path.join(join_worker_files_dir, "join-command"), "w") as join_worker_files_file: - join_worker_files_file.write("") - -with open(os.path.join(join_worker_handlers_dir, "main.yml"), "w") as join_worker_handlers_file: - join_worker_handlers_file.write("") - -with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: - join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists -""") - -with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: - join_worker_tasks_main_file.write("""--- -# tasks file for join_worker - -- name: Join worker(s) node to cluster - include_tasks: join_worker.yml -""") \ No newline at end of file + hosts_file.write("[nginx_nodes]\n") + hosts_file.write("www.example.com\n") + +# Create empty host_vars directory (already created) + +# Create nginx_playbook.yml +with open(os.path.join(ansible_dir, "nginx_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_nginx\n") + +# Create install_nginx/tasks/main.yml +with open(os.path.join(tasks_dir, "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install CA certificates to ensure HTTPS connections work\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: ca-certificates\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx signing key\n") + tasks_file.write(" apt_key:\n") + tasks_file.write(" url: \"{{ nginx_repo_key_url }}\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx repository\n") + tasks_file.write(" apt_repository:\n") + tasks_file.write(" repo: \"deb {{ nginx_repo_url }} {{ ansible_distribution_release }} nginx\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" filename: nginx\n\n") + tasks_file.write("- name: Update apt cache\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n\n") + tasks_file.write("- name: Install specific version of Nginx\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"nginx={{ nginx_version }}~{{ ansible_distribution_release }}\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Ensure Nginx service is running and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: nginx\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") + +# Create install_nginx/vars/main.yml +with open(os.path.join(vars_dir, "main.yml"), "w") as vars_file: + vars_file.write("nginx_repo_key_url: \"https://nginx.org/keys/nginx_signing.key\"\n") + vars_file.write("nginx_repo_url: \"http://nginx.org/packages/mainline/ubuntu/\"\n") + vars_file.write("nginx_version: \"*\"\n") \ No newline at end of file diff --git a/app/media/MyAnsible/group_vars/all b/app/media/MyAnsible/group_vars/all deleted file mode 100644 index 03bf2832..00000000 --- a/app/media/MyAnsible/group_vars/all +++ /dev/null @@ -1,37 +0,0 @@ -# General -install_ansible_modules: "true" -disable_transparent_huge_pages: "true" - -setup_interface: "false" - -# Network Calico see here for more details https://github.com/projectcalico/calico/releases -calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" -calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" -pod_network_cidr: "192.168.0.0/16" - -# DNS -resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - -# Sanction shekan -use_iran: "true" # change it to "false" if you are outside of iran - -# Docker -docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" -docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" -docker_apt_repo: "https://download.docker.com/linux/ubuntu" - -# Kubernetes -kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" -kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" -kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" -k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - -# CRI -cri_socket: unix:///var/run/containerd/containerd.sock - -# Ansible Connection -ansible_user: root -ansible_port: 22 -ansible_python_interpreter: "/usr/bin/python3" -domain: "devopsgpt.com" -apiserver_url: "devopsgpt.com" diff --git a/app/media/MyAnsible/group_vars/nginx_nodes b/app/media/MyAnsible/group_vars/nginx_nodes new file mode 100644 index 00000000..dd1b275a --- /dev/null +++ b/app/media/MyAnsible/group_vars/nginx_nodes @@ -0,0 +1,2 @@ +ansible_port : 23 +ansible_user : root diff --git a/app/media/MyAnsible/hosts b/app/media/MyAnsible/hosts index 79eace5b..33db3a29 100644 --- a/app/media/MyAnsible/hosts +++ b/app/media/MyAnsible/hosts @@ -1,13 +1,3 @@ -[all] -string private_ip=x.x.x.x -string private_ip=x.x.x.x - -[k8s] -string -string - -[k8s_masters] -string - -[k8s_workers] -string +[nginx_nodes] +www.example.com +dfgsg \ No newline at end of file diff --git a/app/media/MyAnsible/kubernetes_playbook.yml b/app/media/MyAnsible/kubernetes_playbook.yml deleted file mode 100644 index ea5f7985..00000000 --- a/app/media/MyAnsible/kubernetes_playbook.yml +++ /dev/null @@ -1,38 +0,0 @@ -- hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - -- hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - -- hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - -- hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - -- hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] diff --git a/app/media/MyAnsible/nginx_playbook.yml b/app/media/MyAnsible/nginx_playbook.yml new file mode 100644 index 00000000..475800e3 --- /dev/null +++ b/app/media/MyAnsible/nginx_playbook.yml @@ -0,0 +1,3 @@ +- hosts: all + roles: + - install_nginx diff --git a/app/media/MyAnsible/roles/init_k8s/defaults/main.yml b/app/media/MyAnsible/roles/init_k8s/defaults/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/init_k8s/files/sample.sh b/app/media/MyAnsible/roles/init_k8s/files/sample.sh deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/init_k8s/handlers/main.yml b/app/media/MyAnsible/roles/init_k8s/handlers/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml deleted file mode 100644 index 516dbff3..00000000 --- a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml +++ /dev/null @@ -1,20 +0,0 @@ -- block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 - delegate_to: "{{ groups['k8s_masters'][0] }}" - when: calico_crd_check.rc != 0 - run_once: true diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml deleted file mode 100644 index a1836485..00000000 --- a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml +++ /dev/null @@ -1,64 +0,0 @@ -- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - -- name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - -- name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml deleted file mode 100644 index bb40ddec..00000000 --- a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# tasks file for init_k8s - -- name: Initialize kubernetes cluster - include_tasks: initk8s.yml - -- name: Initialize Calico CNI - include_tasks: cni.yml diff --git a/app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 b/app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 deleted file mode 100644 index f07c8d0c..00000000 --- a/app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -kind: InitConfiguration -apiVersion: kubeadm.k8s.io/v1beta3 -nodeRegistration: - criSocket: {{ cri_socket }} - imagePullPolicy: IfNotPresent ---- -kind: ClusterConfiguration -apiVersion: kubeadm.k8s.io/v1beta3 -kubernetesVersion: "{{ k8s_version }}" -controlPlaneEndpoint: "{{ apiserver_url }}" -certificatesDir: /etc/kubernetes/pki -networking: - podSubnet: {{ pod_network_cidr }} diff --git a/app/media/MyAnsible/roles/install_nginx/tasks/main.yml b/app/media/MyAnsible/roles/install_nginx/tasks/main.yml new file mode 100644 index 00000000..a2dd7f03 --- /dev/null +++ b/app/media/MyAnsible/roles/install_nginx/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: Install CA certificates to ensure HTTPS connections work + apt: + name: ca-certificates + state: present + +- name: Add Nginx signing key + apt_key: + url: "{ nginx_repo_key_url }" + state: present + +- name: Add Nginx repository + apt_repository: + repo: "deb {{ nginx_repo_url }} {{ ansible_distribution_release }} nginx" + state: present + filename: nginx + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install specific version of Nginx + apt: + name: "nginx={{ nginx_version }}~{{ ansible_distribution_release }}" + state: present + +- name: Ensure Nginx service is running and enabled + service: + name: nginx + state: started + enabled: yes diff --git a/app/media/MyAnsible/roles/install_nginx/vars/main.yml b/app/media/MyAnsible/roles/install_nginx/vars/main.yml new file mode 100644 index 00000000..08839ccb --- /dev/null +++ b/app/media/MyAnsible/roles/install_nginx/vars/main.yml @@ -0,0 +1,3 @@ +nginx_repo_key_url: "https://nginx.org/keys/nginx_signing.key" +nginx_repo_url: "http://nginx.org/packages/mainline/ubuntu/" +nginx_version: "*" diff --git a/app/media/MyAnsible/roles/join_master/defaults/main.yml b/app/media/MyAnsible/roles/join_master/defaults/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/join_master/files/join-command b/app/media/MyAnsible/roles/join_master/files/join-command deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/join_master/handlers/main.yml b/app/media/MyAnsible/roles/join_master/handlers/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml deleted file mode 100644 index f82dbee0..00000000 --- a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml +++ /dev/null @@ -1,100 +0,0 @@ -- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - run_once: false - delegate_facts: true - -- name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: absent - -- name: Add apiserver_url to point to the masters - lineinfile: - dest: /etc/hosts - line: "{{ private_ip }} {{ apiserver_url }}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] diff --git a/app/media/MyAnsible/roles/join_master/tasks/main.yml b/app/media/MyAnsible/roles/join_master/tasks/main.yml deleted file mode 100644 index 316b5b1d..00000000 --- a/app/media/MyAnsible/roles/join_master/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# tasks file for join_master - -- name: Join master(s) node to cluster - include_tasks: join_master.yml diff --git a/app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 b/app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 deleted file mode 100644 index 967a6228..00000000 --- a/app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: kubeadm.k8s.io/v1beta3 -kind: JoinConfiguration -nodeRegistration: - criSocket: {{ cri_socket }} - ---- -kind: ClusterConfiguration -apiVersion: kubeadm.k8s.io/v1beta3 -kubernetesVersion: "{{ k8s_version }}" - - diff --git a/app/media/MyAnsible/roles/join_worker/defaults/main.yml b/app/media/MyAnsible/roles/join_worker/defaults/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/join_worker/files/join-command b/app/media/MyAnsible/roles/join_worker/files/join-command deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/join_worker/handlers/main.yml b/app/media/MyAnsible/roles/join_worker/handlers/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml deleted file mode 100644 index b9b94947..00000000 --- a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml +++ /dev/null @@ -1,38 +0,0 @@ -- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists diff --git a/app/media/MyAnsible/roles/join_worker/tasks/main.yml b/app/media/MyAnsible/roles/join_worker/tasks/main.yml deleted file mode 100644 index a43175cc..00000000 --- a/app/media/MyAnsible/roles/join_worker/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# tasks file for join_worker - -- name: Join worker(s) node to cluster - include_tasks: join_worker.yml diff --git a/app/media/MyAnsible/roles/k8s/defaults/main.yml b/app/media/MyAnsible/roles/k8s/defaults/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/k8s/files/sample.sh b/app/media/MyAnsible/roles/k8s/files/sample.sh deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/k8s/handlers/main.yml b/app/media/MyAnsible/roles/k8s/handlers/main.yml deleted file mode 100644 index de036f51..00000000 --- a/app/media/MyAnsible/roles/k8s/handlers/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# handlers file for k8s - -- name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - -- name: Restart kubelet - service: - name: kubelet - state: restarted diff --git a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml deleted file mode 100644 index 4620eef3..00000000 --- a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml +++ /dev/null @@ -1,195 +0,0 @@ -- name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - -- name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\sswap\s+sw\s+.*)$' - replace: '# \1' - -- name: Check if ufw is installed - package_facts: - manager: "auto" - -- name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "'ufw' in ansible_facts.packages" - -- name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{{ item }}" - create: yes - state: present - loop: - - overlay - - br_netfilter - -- name: Load kernel modules - command: - cmd: "modprobe {{ item }}" - loop: - - overlay - - br_netfilter - -- name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {mark} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - -- name: Reload sysctl settings - command: - cmd: sysctl --system - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - state: present - update_cache: yes - -- name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - -- name: Remove existing Docker GPG key if it exists - file: - path: '{{ docker_gpg_key_path }}' - state: absent - -- name: Download Docker GPG key - shell: | - curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} - -- name: Determine the architecture - command: dpkg --print-architecture - register: architecture - -- name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - -- name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" - state: present - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - -- name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - -- name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - -- name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - -- name: Enable containerd service - systemd: - name: containerd - enabled: yes - -- name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{{ kubernetes_gpg_keyring_path }}' - state: absent - -- name: Download Kubernetes GPG key - shell: | - curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' - -- name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" - state: present - filename: kubernetes.list - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install Kubernetes packages - apt: - name: "{{ item }}" - state: present - loop: - - kubeadm=1.31.2-1.1 - - kubelet=1.31.2-1.1 - - kubectl=1.31.2-1.1 - -- name: Hold Kubernetes packages - dpkg_selections: - name: "{{ item }}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - -- name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} - create: yes - state: present - notify: Restart kubelet - -- name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" - state: present - create: no - loop: "{{ groups['all'] }}" - when: hostvars[item].private_ip is defined - -- name: Add apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: present - -- name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull diff --git a/app/media/MyAnsible/roles/k8s/tasks/main.yml b/app/media/MyAnsible/roles/k8s/tasks/main.yml deleted file mode 100644 index a0ac6054..00000000 --- a/app/media/MyAnsible/roles/k8s/tasks/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: Install kubernetes packages - include_tasks: k8s.yml diff --git a/app/media/MyAnsible/roles/preinstall/defaults/main.yml b/app/media/MyAnsible/roles/preinstall/defaults/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/preinstall/files/sample.sh b/app/media/MyAnsible/roles/preinstall/files/sample.sh deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/preinstall/handlers/main.yml b/app/media/MyAnsible/roles/preinstall/handlers/main.yml deleted file mode 100644 index e69de29b..00000000 diff --git a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml deleted file mode 100644 index 43fae8cd..00000000 --- a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml +++ /dev/null @@ -1,82 +0,0 @@ -- name: Set timezone to UTC - timezone: - name: Etc/UTC - -- name: Set hostname - command: hostnamectl set-hostname {{ inventory_hostname }} - -- name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - -- name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - -- name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\.0\.0\.1' - line: "127.0.0.1 {{ inventory_hostname }} localhost" - owner: root - group: root - mode: 0644 - -- name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - -- name: Fix broken packages - apt: - state: fixed diff --git a/app/media/MyAnsible/roles/preinstall/tasks/main.yml b/app/media/MyAnsible/roles/preinstall/tasks/main.yml deleted file mode 100644 index 56a88e66..00000000 --- a/app/media/MyAnsible/roles/preinstall/tasks/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: basic setup - include_tasks: basic.yml diff --git a/app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 b/app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 deleted file mode 100644 index b40011a6..00000000 --- a/app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 +++ /dev/null @@ -1,20 +0,0 @@ - -# {{ ansible_managed }} - -{% if resolv_search is defined and resolv_search | length > 0 %} -search {{ resolv_search|join(' ') }} -{% endif %} -{% if resolv_domain is defined and resolv_domain != "" %} -domain {{ resolv_domain }} -{% endif %} -{% for ns in resolv_nameservers %} -nameserver {{ ns }} -{% endfor %} -{% if resolv_sortlist is defined and resolv_sortlist | length > 0 %} -{% for sl in resolv_sortlist %} -sortlist {{ sl }} -{% endfor %} -{% endif %} -{% if resolv_options is defined and resolv_options | length > 0 %} -options {{ resolv_options|join(' ') }} -{% endif %} diff --git a/app/routes/ansible.py b/app/routes/ansible.py index f1db3387..7c546345 100644 --- a/app/routes/ansible.py +++ b/app/routes/ansible.py @@ -16,9 +16,7 @@ async def ansible_install_generation_nginx(request:AnsibleInstallNginx) -> Outpu return Output(output='output') generated_prompt = ansible_install_template(request,"nginx") - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") + return Output(output='output') diff --git a/app/template_generators/ansible/install/nginx.py b/app/template_generators/ansible/install/nginx.py index 38175395..aed87f43 100644 --- a/app/template_generators/ansible/install/nginx.py +++ b/app/template_generators/ansible/install/nginx.py @@ -1,3 +1,4 @@ +import os def ansible_nginx_install_ubuntu(input): nginx_hosts = input.hosts @@ -10,120 +11,92 @@ def ansible_nginx_install_ubuntu(input): nginx_version_in_task = "nginx={{ nginx_version }}~{{ ansible_distribution_release }}" - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: + + + project_name = "app/media/MyAnsible" + ansible_dir = project_name + group_vars_dir = os.path.join(ansible_dir, "group_vars") + host_vars_dir = os.path.join(ansible_dir, "host_vars") + roles_dir = os.path.join(ansible_dir, "roles") + install_nginx_dir = os.path.join(roles_dir, "install_nginx") + tasks_dir = os.path.join(install_nginx_dir, "tasks") + vars_dir = os.path.join(install_nginx_dir, "vars") + defaults_dir = os.path.join(install_nginx_dir, "defaults") + files_dir = os.path.join(install_nginx_dir, "files") + handlers_dir = os.path.join(install_nginx_dir, "handlers") + templates_dir = os.path.join(install_nginx_dir, "templates") - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── nginx_nodes - │   - ├── hosts - ├── host_vars - ├── nginx_playbook.yml - └── roles - └── install_nginx - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "nginx_nodes" and the content of this file must be as follows: - ``` - ansible_port: {nginx_ansible_port} - ansible_user: {nginx_ansible_user} - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {nginx_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "nginx_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - install_nginx - ``` - - There is a directory called "roles" which a sub-directory called "install_nginx" (roles/install_nginx) - "install_nginx" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (install_nginx/tasks): This path has a file called "main.yml" which its content must be as follows: - ``` - --- - - name: Install CA certificates to ensure HTTPS connections work - apt: - name: ca-certificates - state: present + # Create project directories + os.makedirs(group_vars_dir, exist_ok=True) + os.makedirs(host_vars_dir, exist_ok=True) + os.makedirs(roles_dir, exist_ok=True) + os.makedirs(install_nginx_dir, exist_ok=True) + os.makedirs(tasks_dir, exist_ok=True) + os.makedirs(vars_dir, exist_ok=True) + os.makedirs(defaults_dir, exist_ok=True) + os.makedirs(files_dir, exist_ok=True) + os.makedirs(handlers_dir, exist_ok=True) + os.makedirs(templates_dir, exist_ok=True) - - name: Add Nginx signing key - apt_key: - url: "{nginx_repo_key_in_task}" - state: present + # Create ansible.cfg + with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") - - name: Add Nginx repository - apt_repository: - repo: "{nginx_repo_in_task}" - state: present - filename: nginx + # Create group_vars/nginx_nodes + with open(os.path.join(group_vars_dir, "nginx_nodes"), "w") as nginx_nodes: + nginx_nodes.write(f"ansible_port : {nginx_ansible_port}\n") + nginx_nodes.write(f"ansible_user : {nginx_ansible_user}\n") - - name: Update apt cache - apt: - update_cache: yes + # Create hosts + with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: + + hosts_file.write(f"{nginx_inventory}") + - - name: Install specific version of Nginx - apt: - name: "{nginx_version_in_task}" - state: present + # Create empty host_vars directory (already created) - - name: Ensure Nginx service is running and enabled - service: - name: nginx - state: started - enabled: yes - ``` - - (install_nginx/vars): This path has a file called "main.yml" which its content must be as follows: - ``` - nginx_repo_key_url: "https://nginx.org/keys/nginx_signing.key" - nginx_repo_url: "http://nginx.org/packages/mainline/ubuntu/" - nginx_version: "{nginx_version}" - ``` - - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") + # Create nginx_playbook.yml + with open(os.path.join(ansible_dir, "nginx_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_nginx\n") - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) + # Create install_nginx/tasks/main.yml + with open(os.path.join(tasks_dir, "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install CA certificates to ensure HTTPS connections work\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: ca-certificates\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx signing key\n") + tasks_file.write(" apt_key:\n") + tasks_file.write(" url: \"{ nginx_repo_key_url }\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx repository\n") + tasks_file.write(" apt_repository:\n") + tasks_file.write(" repo: \"deb {{ nginx_repo_url }} {{ ansible_distribution_release }} nginx\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" filename: nginx\n\n") + tasks_file.write("- name: Update apt cache\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n\n") + tasks_file.write("- name: Install specific version of Nginx\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"nginx={{ nginx_version }}~{{ ansible_distribution_release }}\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Ensure Nginx service is running and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: nginx\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt - + # Create install_nginx/vars/main.yml + with open(os.path.join(vars_dir, "main.yml"), "w") as vars_file: + vars_file.write("nginx_repo_key_url: \"https://nginx.org/keys/nginx_signing.key\"\n") + vars_file.write("nginx_repo_url: \"http://nginx.org/packages/mainline/ubuntu/\"\n") + vars_file.write(f"nginx_version: \"{nginx_version}\"\n") + def ansible_nginx_install(input): From d4e235aa7ecd873944c7b12ad8e103daf952c876 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 12 Dec 2024 12:28:22 +0330 Subject: [PATCH 20/25] fix(docker_ansible): fix docker ansible template generation --- app/directory_generators/ansible_generator.py | 119 ++++++----- ...nginx_playbook.yml => docker_playbook.yml} | 2 +- app/media/MyAnsible/group_vars/docker_nodes | 2 + app/media/MyAnsible/group_vars/nginx_nodes | 2 - app/media/MyAnsible/hosts | 5 +- .../roles/install_docker/tasks/main.yml | 35 +++ .../roles/install_docker/vars/main.yml | 14 ++ .../roles/install_nginx/tasks/main.yml | 31 --- .../roles/install_nginx/vars/main.yml | 3 - app/routes/ansible.py | 7 +- .../ansible/install/docker.py | 201 +++++++----------- 11 files changed, 199 insertions(+), 222 deletions(-) rename app/media/MyAnsible/{nginx_playbook.yml => docker_playbook.yml} (51%) create mode 100644 app/media/MyAnsible/group_vars/docker_nodes delete mode 100644 app/media/MyAnsible/group_vars/nginx_nodes create mode 100644 app/media/MyAnsible/roles/install_docker/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/install_docker/vars/main.yml delete mode 100644 app/media/MyAnsible/roles/install_nginx/tasks/main.yml delete mode 100644 app/media/MyAnsible/roles/install_nginx/vars/main.yml diff --git a/app/directory_generators/ansible_generator.py b/app/directory_generators/ansible_generator.py index 1e5f7dd4..06b13344 100644 --- a/app/directory_generators/ansible_generator.py +++ b/app/directory_generators/ansible_generator.py @@ -1,84 +1,87 @@ import os project_name = "app/media/MyAnsible" -ansible_dir = project_name -group_vars_dir = os.path.join(ansible_dir, "group_vars") -host_vars_dir = os.path.join(ansible_dir, "host_vars") -roles_dir = os.path.join(ansible_dir, "roles") -install_nginx_dir = os.path.join(roles_dir, "install_nginx") -tasks_dir = os.path.join(install_nginx_dir, "tasks") -vars_dir = os.path.join(install_nginx_dir, "vars") -defaults_dir = os.path.join(install_nginx_dir, "defaults") -files_dir = os.path.join(install_nginx_dir, "files") -handlers_dir = os.path.join(install_nginx_dir, "handlers") -templates_dir = os.path.join(install_nginx_dir, "templates") # Create project directories -os.makedirs(group_vars_dir, exist_ok=True) -os.makedirs(host_vars_dir, exist_ok=True) -os.makedirs(roles_dir, exist_ok=True) -os.makedirs(install_nginx_dir, exist_ok=True) -os.makedirs(tasks_dir, exist_ok=True) -os.makedirs(vars_dir, exist_ok=True) -os.makedirs(defaults_dir, exist_ok=True) -os.makedirs(files_dir, exist_ok=True) -os.makedirs(handlers_dir, exist_ok=True) -os.makedirs(templates_dir, exist_ok=True) +os.makedirs(os.path.join(project_name, "group_vars"), exist_ok=True) +os.makedirs(os.path.join(project_name, "host_vars"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "defaults"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "files"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "handlers"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "tasks"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "templates"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "vars"), exist_ok=True) # Create ansible.cfg -with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg: +with open(os.path.join(project_name, "ansible.cfg"), "w") as ansible_cfg: ansible_cfg.write("[defaults]\n") ansible_cfg.write("host_key_checking=false\n") -# Create group_vars/nginx_nodes -with open(os.path.join(group_vars_dir, "nginx_nodes"), "w") as nginx_nodes: - nginx_nodes.write("ansible_port: 22\n") - nginx_nodes.write("ansible_user: root\n") +# Create group_vars/docker_nodes +with open(os.path.join(project_name, "group_vars", "docker_nodes"), "w") as docker_nodes: + docker_nodes.write("ansible_port: 22\n") + docker_nodes.write("ansible_user: root\n") # Create hosts -with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: - hosts_file.write("[nginx_nodes]\n") +with open(os.path.join(project_name, "hosts"), "w") as hosts_file: + hosts_file.write("[docker_nodes]\n") hosts_file.write("www.example.com\n") -# Create empty host_vars directory (already created) - -# Create nginx_playbook.yml -with open(os.path.join(ansible_dir, "nginx_playbook.yml"), "w") as playbook: +# Create docker_playbook.yml +with open(os.path.join(project_name, "docker_playbook.yml"), "w") as playbook: playbook.write("- hosts: all\n") playbook.write(" roles:\n") - playbook.write(" - install_nginx\n") + playbook.write(" - install_docker\n") -# Create install_nginx/tasks/main.yml -with open(os.path.join(tasks_dir, "main.yml"), "w") as tasks_file: +# Create install_docker/tasks/main.yml +with open(os.path.join(project_name, "roles", "install_docker", "tasks", "main.yml"), "w") as tasks_file: tasks_file.write("---\n") - tasks_file.write("- name: Install CA certificates to ensure HTTPS connections work\n") + tasks_file.write("- name: Install prerequisite packages\n") tasks_file.write(" apt:\n") - tasks_file.write(" name: ca-certificates\n") - tasks_file.write(" state: present\n\n") - tasks_file.write("- name: Add Nginx signing key\n") - tasks_file.write(" apt_key:\n") - tasks_file.write(" url: \"{{ nginx_repo_key_url }}\"\n") - tasks_file.write(" state: present\n\n") - tasks_file.write("- name: Add Nginx repository\n") - tasks_file.write(" apt_repository:\n") - tasks_file.write(" repo: \"deb {{ nginx_repo_url }} {{ ansible_distribution_release }} nginx\"\n") + tasks_file.write(" name: \"{{ item }}\"\n") tasks_file.write(" state: present\n") - tasks_file.write(" filename: nginx\n\n") - tasks_file.write("- name: Update apt cache\n") + tasks_file.write(" loop: \"{{ prerequisite_packages }}\"\n") + tasks_file.write("- name: Create directory for Docker keyrings\n") + tasks_file.write(" file:\n") + tasks_file.write(" path: /etc/apt/keyrings\n") + tasks_file.write(" state: directory\n") + tasks_file.write(" mode: '0755'\n") + tasks_file.write("- name: Download Docker's official GPG key\n") + tasks_file.write(" get_url:\n") + tasks_file.write(" url: https://download.docker.com/linux/ubuntu/gpg\n") + tasks_file.write(" dest: /etc/apt/keyrings/docker.asc\n") + tasks_file.write(" mode: '0644'\n") + tasks_file.write("- name: Add Docker repository to apt sources\n") + tasks_file.write(" copy:\n") + tasks_file.write(" content: |\n") + tasks_file.write(" deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable\n") + tasks_file.write(" dest: /etc/apt/sources.list.d/docker.list\n") + tasks_file.write("- name: Update apt cache after adding Docker repo\n") tasks_file.write(" apt:\n") - tasks_file.write(" update_cache: yes\n\n") - tasks_file.write("- name: Install specific version of Nginx\n") + tasks_file.write(" update_cache: yes\n") + tasks_file.write("- name: Install Docker packages\n") tasks_file.write(" apt:\n") - tasks_file.write(" name: \"nginx={{ nginx_version }}~{{ ansible_distribution_release }}\"\n") - tasks_file.write(" state: present\n\n") - tasks_file.write("- name: Ensure Nginx service is running and enabled\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ docker_packages }}\"\n") + tasks_file.write("- name: Ensure Docker and containerd services are started and enabled\n") tasks_file.write(" service:\n") - tasks_file.write(" name: nginx\n") + tasks_file.write(" name: \"{{ item }}\"\n") tasks_file.write(" state: started\n") tasks_file.write(" enabled: yes\n") + tasks_file.write(" loop: \"{{ docker_services }}\"\n") -# Create install_nginx/vars/main.yml -with open(os.path.join(vars_dir, "main.yml"), "w") as vars_file: - vars_file.write("nginx_repo_key_url: \"https://nginx.org/keys/nginx_signing.key\"\n") - vars_file.write("nginx_repo_url: \"http://nginx.org/packages/mainline/ubuntu/\"\n") - vars_file.write("nginx_version: \"*\"\n") \ No newline at end of file +# Create install_docker/vars/main.yml +with open(os.path.join(project_name, "roles", "install_docker", "vars", "main.yml"), "w") as vars_file: + vars_file.write("prerequisite_packages:\n") + vars_file.write(" - ca-certificates\n") + vars_file.write(" - curl\n\n") + vars_file.write("docker_services:\n") + vars_file.write(" - docker\n") + vars_file.write(" - containerd\n\n") + vars_file.write("docker_packages:\n") + vars_file.write(" - docker-ce\n") + vars_file.write(" - docker-ce-cli\n") + vars_file.write(" - containerd.io\n") + vars_file.write(" - docker-buildx-plugin\n") + vars_file.write(" - docker-compose-plugin\n") \ No newline at end of file diff --git a/app/media/MyAnsible/nginx_playbook.yml b/app/media/MyAnsible/docker_playbook.yml similarity index 51% rename from app/media/MyAnsible/nginx_playbook.yml rename to app/media/MyAnsible/docker_playbook.yml index 475800e3..e3da6d2f 100644 --- a/app/media/MyAnsible/nginx_playbook.yml +++ b/app/media/MyAnsible/docker_playbook.yml @@ -1,3 +1,3 @@ - hosts: all roles: - - install_nginx + - install_docker diff --git a/app/media/MyAnsible/group_vars/docker_nodes b/app/media/MyAnsible/group_vars/docker_nodes new file mode 100644 index 00000000..0d1345c2 --- /dev/null +++ b/app/media/MyAnsible/group_vars/docker_nodes @@ -0,0 +1,2 @@ +ansible_port: 22 +ansible_user: root diff --git a/app/media/MyAnsible/group_vars/nginx_nodes b/app/media/MyAnsible/group_vars/nginx_nodes deleted file mode 100644 index dd1b275a..00000000 --- a/app/media/MyAnsible/group_vars/nginx_nodes +++ /dev/null @@ -1,2 +0,0 @@ -ansible_port : 23 -ansible_user : root diff --git a/app/media/MyAnsible/hosts b/app/media/MyAnsible/hosts index 33db3a29..cf97b06b 100644 --- a/app/media/MyAnsible/hosts +++ b/app/media/MyAnsible/hosts @@ -1,3 +1,4 @@ -[nginx_nodes] +[docker_nodes] www.example.com -dfgsg \ No newline at end of file +x +i diff --git a/app/media/MyAnsible/roles/install_docker/tasks/main.yml b/app/media/MyAnsible/roles/install_docker/tasks/main.yml new file mode 100644 index 00000000..87fb3c84 --- /dev/null +++ b/app/media/MyAnsible/roles/install_docker/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: Install prerequisite packages + apt: + name: "{{ item }}" + state: present + loop: "{{ prerequisite_packages }}" +- name: Create directory for Docker keyrings + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' +- name: Download Docker's official GPG key + get_url: + url: https://download.docker.com/linux/ubuntu/gpg + dest: /etc/apt/keyrings/docker.asc + mode: '0644' +- name: Add Docker repository to apt sources + copy: + content: | + deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable + dest: /etc/apt/sources.list.d/docker.list +- name: Update apt cache after adding Docker repo + apt: + update_cache: yes +- name: Install Docker packages + apt: + name: "{{ item }}" + state: present + loop: "{{ docker_packages }}" +- name: Ensure Docker and containerd services are started and enabled + service: + name: "{{ item }}" + state: started + enabled: yes + loop: "{{ docker_services }}" diff --git a/app/media/MyAnsible/roles/install_docker/vars/main.yml b/app/media/MyAnsible/roles/install_docker/vars/main.yml new file mode 100644 index 00000000..c0488ca6 --- /dev/null +++ b/app/media/MyAnsible/roles/install_docker/vars/main.yml @@ -0,0 +1,14 @@ +prerequisite_packages: + - ca-certificates + - curl + +docker_services: + - docker + - containerd + +docker_packages: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin diff --git a/app/media/MyAnsible/roles/install_nginx/tasks/main.yml b/app/media/MyAnsible/roles/install_nginx/tasks/main.yml deleted file mode 100644 index a2dd7f03..00000000 --- a/app/media/MyAnsible/roles/install_nginx/tasks/main.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Install CA certificates to ensure HTTPS connections work - apt: - name: ca-certificates - state: present - -- name: Add Nginx signing key - apt_key: - url: "{ nginx_repo_key_url }" - state: present - -- name: Add Nginx repository - apt_repository: - repo: "deb {{ nginx_repo_url }} {{ ansible_distribution_release }} nginx" - state: present - filename: nginx - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install specific version of Nginx - apt: - name: "nginx={{ nginx_version }}~{{ ansible_distribution_release }}" - state: present - -- name: Ensure Nginx service is running and enabled - service: - name: nginx - state: started - enabled: yes diff --git a/app/media/MyAnsible/roles/install_nginx/vars/main.yml b/app/media/MyAnsible/roles/install_nginx/vars/main.yml deleted file mode 100644 index 08839ccb..00000000 --- a/app/media/MyAnsible/roles/install_nginx/vars/main.yml +++ /dev/null @@ -1,3 +0,0 @@ -nginx_repo_key_url: "https://nginx.org/keys/nginx_signing.key" -nginx_repo_url: "http://nginx.org/packages/mainline/ubuntu/" -nginx_version: "*" diff --git a/app/routes/ansible.py b/app/routes/ansible.py index 7c546345..49c15370 100644 --- a/app/routes/ansible.py +++ b/app/routes/ansible.py @@ -14,7 +14,7 @@ async def ansible_install_generation_nginx(request:AnsibleInstallNginx) -> Outpu if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"nginx") + ansible_install_template(request,"nginx") return Output(output='output') @@ -25,11 +25,8 @@ async def ansible_install_generation_docker(request:AnsibleInstallDocker) -> Out if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"docker") + ansible_install_template(request,"docker") - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") return Output(output='output') diff --git a/app/template_generators/ansible/install/docker.py b/app/template_generators/ansible/install/docker.py index 74e362df..7445b3f3 100644 --- a/app/template_generators/ansible/install/docker.py +++ b/app/template_generators/ansible/install/docker.py @@ -1,3 +1,4 @@ +import os def ansible_docker_install(input): docker_hosts = input.hosts @@ -13,131 +14,91 @@ def ansible_docker_install(input): - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: + + - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── docker_nodes - │   - ├── hosts - ├── host_vars - ├── docker_playbook.yml - └── roles - └── install_docker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "docker_nodes" and the content of this file must be as follows: - ``` - ansible_port: {docker_ansible_port} - ansible_user: {docker_ansible_user} - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {docker_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "docker_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - install_docker - ``` - - There is a directory called "roles" which a sub-directory called "install_docker" (roles/install_docker) - "install_docker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (install_docker/tasks): This path has a file called "main.yml" which its content must be as follows: - ``` - --- - - name: Install prerequisite packages - apt: - name: "{docker_items_in_task}" - state: present - loop: "{docker_prerequisite_packages_in_task}"" - - name: Create directory for Docker keyrings - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' - - name: Download Docker's official GPG key - get_url: - url: https://download.docker.com/linux/ubuntu/gpg - dest: /etc/apt/keyrings/docker.asc - mode: '0644' - - name: Add Docker repository to apt sources - copy: - content: | - deb [arch={ansible_architecture_in_task} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {ansible_distribution_release_in_task} stable - dest: /etc/apt/sources.list.d/docker.list - - name: Update apt cache after adding Docker repo - apt: - update_cache: yes - - name: Install Docker packages - apt: - name: "{docker_items_in_task}" - state: present - loop: "{docker_packages_in_task}"" - - name: Ensure Docker and containerd services are started and enabled - service: - name: "{docker_items_in_task}" - state: started - enabled: yes - loop: "{docker_services_in_task}"" - ``` - - (install_docker/vars): This path has a file called "main.yml" which its content must be as follows: - ``` - prerequisite_packages: - - ca-certificates - - curl + project_name = "app/media/MyAnsible" - docker_services: - - docker - - containerd + # Create project directories + os.makedirs(os.path.join(project_name, "group_vars"), exist_ok=True) + os.makedirs(os.path.join(project_name, "host_vars"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "defaults"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "files"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "handlers"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "tasks"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "templates"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "vars"), exist_ok=True) - docker_packages: - - docker-ce - - docker-ce-cli - - containerd.io - - docker-buildx-plugin - - docker-compose-plugin - ``` + # Create ansible.cfg + with open(os.path.join(project_name, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! + # Create group_vars/docker_nodes + with open(os.path.join(project_name, "group_vars", "docker_nodes"), "w") as docker_nodes: + docker_nodes.write(f"ansible_port: {docker_ansible_port}\n") + docker_nodes.write(f"ansible_user: {docker_ansible_user}\n") - the python code you give me, must have structure like that: + # Create hosts + with open(os.path.join(project_name, "hosts"), "w") as hosts_file: + hosts_file.write(f"{docker_inventory}\n") + - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") + # Create docker_playbook.yml + with open(os.path.join(project_name, "docker_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_docker\n") - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) + # Create install_docker/tasks/main.yml + with open(os.path.join(project_name, "roles", "install_docker", "tasks", "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install prerequisite packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ prerequisite_packages }}\"\n") + tasks_file.write("- name: Create directory for Docker keyrings\n") + tasks_file.write(" file:\n") + tasks_file.write(" path: /etc/apt/keyrings\n") + tasks_file.write(" state: directory\n") + tasks_file.write(" mode: '0755'\n") + tasks_file.write("- name: Download Docker's official GPG key\n") + tasks_file.write(" get_url:\n") + tasks_file.write(" url: https://download.docker.com/linux/ubuntu/gpg\n") + tasks_file.write(" dest: /etc/apt/keyrings/docker.asc\n") + tasks_file.write(" mode: '0644'\n") + tasks_file.write("- name: Add Docker repository to apt sources\n") + tasks_file.write(" copy:\n") + tasks_file.write(" content: |\n") + tasks_file.write(" deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable\n") + tasks_file.write(" dest: /etc/apt/sources.list.d/docker.list\n") + tasks_file.write("- name: Update apt cache after adding Docker repo\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n") + tasks_file.write("- name: Install Docker packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ docker_packages }}\"\n") + tasks_file.write("- name: Ensure Docker and containerd services are started and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") + tasks_file.write(" loop: \"{{ docker_services }}\"\n") - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt + # Create install_docker/vars/main.yml + with open(os.path.join(project_name, "roles", "install_docker", "vars", "main.yml"), "w") as vars_file: + vars_file.write("prerequisite_packages:\n") + vars_file.write(" - ca-certificates\n") + vars_file.write(" - curl\n\n") + vars_file.write("docker_services:\n") + vars_file.write(" - docker\n") + vars_file.write(" - containerd\n\n") + vars_file.write("docker_packages:\n") + vars_file.write(" - docker-ce\n") + vars_file.write(" - docker-ce-cli\n") + vars_file.write(" - containerd.io\n") + vars_file.write(" - docker-buildx-plugin\n") + vars_file.write(" - docker-compose-plugin\n") From eb53b0f1c08dd8015e75cb40aa1928358e7db37d Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 12 Dec 2024 12:51:33 +0330 Subject: [PATCH 21/25] fix(ansible_kuber): fix ansible kuber section --- app/media/MyAnsible/docker_playbook.yml | 3 - app/media/MyAnsible/group_vars/all | 38 + app/media/MyAnsible/group_vars/docker_nodes | 2 - app/media/MyAnsible/hosts | 17 +- app/media/MyAnsible/kubernetes_playbook.yml | 39 + .../roles/init_k8s/defaults/main.yml | 0 .../MyAnsible/roles/init_k8s/files/sample.sh | 0 .../roles/init_k8s/handlers/main.yml | 0 .../MyAnsible/roles/init_k8s/tasks/cni.yml | 21 + .../roles/init_k8s/tasks/initk8s.yml | 65 + .../MyAnsible/roles/init_k8s/tasks/main.yml | 9 + .../init_k8s/templates/kubeadmcnf.yml.j2 | 13 + .../roles/install_docker/tasks/main.yml | 35 - .../roles/install_docker/vars/main.yml | 14 - .../roles/join_master/defaults/main.yml | 0 .../roles/join_master/files/join-command | 0 .../roles/join_master/handlers/main.yml | 0 .../roles/join_master/tasks/join_master.yml | 101 + .../roles/join_master/tasks/main.yml | 6 + .../templates/kubeadmcnf-join.yml.j2 | 12 + .../roles/join_worker/defaults/main.yml | 0 .../roles/join_worker/files/join-command | 0 .../roles/join_worker/handlers/main.yml | 0 .../roles/join_worker/tasks/join_worker.yml | 39 + .../roles/join_worker/tasks/main.yml | 6 + .../MyAnsible/roles/k8s/defaults/main.yml | 0 app/media/MyAnsible/roles/k8s/files/sample.sh | 0 .../MyAnsible/roles/k8s/handlers/main.yml | 13 + app/media/MyAnsible/roles/k8s/tasks/k8s.yml | 196 ++ app/media/MyAnsible/roles/k8s/tasks/main.yml | 4 + .../roles/preinstall/defaults/main.yml | 0 .../roles/preinstall/files/sample.sh | 0 .../roles/preinstall/handlers/main.yml | 0 .../roles/preinstall/tasks/basic.yml | 83 + .../MyAnsible/roles/preinstall/tasks/main.yml | 4 + .../roles/preinstall/templates/resolv.conf.j2 | 20 + app/routes/ansible.py | 6 +- .../ansible/install/kuber.py | 1626 ++++++++--------- 38 files changed, 1486 insertions(+), 886 deletions(-) delete mode 100644 app/media/MyAnsible/docker_playbook.yml create mode 100644 app/media/MyAnsible/group_vars/all delete mode 100644 app/media/MyAnsible/group_vars/docker_nodes create mode 100644 app/media/MyAnsible/kubernetes_playbook.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/defaults/main.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/files/sample.sh create mode 100644 app/media/MyAnsible/roles/init_k8s/handlers/main.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/cni.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 delete mode 100644 app/media/MyAnsible/roles/install_docker/tasks/main.yml delete mode 100644 app/media/MyAnsible/roles/install_docker/vars/main.yml create mode 100644 app/media/MyAnsible/roles/join_master/defaults/main.yml create mode 100644 app/media/MyAnsible/roles/join_master/files/join-command create mode 100644 app/media/MyAnsible/roles/join_master/handlers/main.yml create mode 100644 app/media/MyAnsible/roles/join_master/tasks/join_master.yml create mode 100644 app/media/MyAnsible/roles/join_master/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 create mode 100644 app/media/MyAnsible/roles/join_worker/defaults/main.yml create mode 100644 app/media/MyAnsible/roles/join_worker/files/join-command create mode 100644 app/media/MyAnsible/roles/join_worker/handlers/main.yml create mode 100644 app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml create mode 100644 app/media/MyAnsible/roles/join_worker/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/k8s/defaults/main.yml create mode 100644 app/media/MyAnsible/roles/k8s/files/sample.sh create mode 100644 app/media/MyAnsible/roles/k8s/handlers/main.yml create mode 100644 app/media/MyAnsible/roles/k8s/tasks/k8s.yml create mode 100644 app/media/MyAnsible/roles/k8s/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/preinstall/defaults/main.yml create mode 100644 app/media/MyAnsible/roles/preinstall/files/sample.sh create mode 100644 app/media/MyAnsible/roles/preinstall/handlers/main.yml create mode 100644 app/media/MyAnsible/roles/preinstall/tasks/basic.yml create mode 100644 app/media/MyAnsible/roles/preinstall/tasks/main.yml create mode 100644 app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 diff --git a/app/media/MyAnsible/docker_playbook.yml b/app/media/MyAnsible/docker_playbook.yml deleted file mode 100644 index e3da6d2f..00000000 --- a/app/media/MyAnsible/docker_playbook.yml +++ /dev/null @@ -1,3 +0,0 @@ -- hosts: all - roles: - - install_docker diff --git a/app/media/MyAnsible/group_vars/all b/app/media/MyAnsible/group_vars/all new file mode 100644 index 00000000..38a7e206 --- /dev/null +++ b/app/media/MyAnsible/group_vars/all @@ -0,0 +1,38 @@ +# General + install_ansible_modules: "true" + disable_transparent_huge_pages: "true" + + setup_interface: "false" + + # Network Calico see here for more details https://github.com/projectcalico/calico/releases + calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" + calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" + pod_network_cidr: "192.168.0.0/16" + + # DNS + resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + + # Sanction shekan + use_iran: "true" # change it to "false" if you are outside of iran + + # Docker + docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" + docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" + docker_apt_repo: "https://download.docker.com/linux/ubuntu" + + # Kubernetes + kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" + kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" + kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" + k8s_version: 1.31 # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + + # CRI + cri_socket: unix:///var/run/containerd/containerd.sock + + # Ansible Connection + ansible_user: root + ansible_port: 22 + ansible_python_interpreter: "/usr/bin/python3" + domain: "devopsgpt.com" + apiserver_url: "devopsgpt.com" + \ No newline at end of file diff --git a/app/media/MyAnsible/group_vars/docker_nodes b/app/media/MyAnsible/group_vars/docker_nodes deleted file mode 100644 index 0d1345c2..00000000 --- a/app/media/MyAnsible/group_vars/docker_nodes +++ /dev/null @@ -1,2 +0,0 @@ -ansible_port: 22 -ansible_user: root diff --git a/app/media/MyAnsible/hosts b/app/media/MyAnsible/hosts index cf97b06b..74638b9e 100644 --- a/app/media/MyAnsible/hosts +++ b/app/media/MyAnsible/hosts @@ -1,4 +1,13 @@ -[docker_nodes] -www.example.com -x -i +[all] +stringoooooooooooooooooo private_ip=x.x.x.x +stringppppppppp private_ip=x.x.x.x + +[k8s] +stringoooooooooooooooooo +stringppppppppp + +[k8s_masters] +stringoooooooooooooooooo + +[k8s_workers] +stringppppppppp \ No newline at end of file diff --git a/app/media/MyAnsible/kubernetes_playbook.yml b/app/media/MyAnsible/kubernetes_playbook.yml new file mode 100644 index 00000000..e502abc0 --- /dev/null +++ b/app/media/MyAnsible/kubernetes_playbook.yml @@ -0,0 +1,39 @@ +- hosts: all + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] + + - hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + + - hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + + - hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + + - hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/defaults/main.yml b/app/media/MyAnsible/roles/init_k8s/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/init_k8s/files/sample.sh b/app/media/MyAnsible/roles/init_k8s/files/sample.sh new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/init_k8s/handlers/main.yml b/app/media/MyAnsible/roles/init_k8s/handlers/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml new file mode 100644 index 00000000..613b1329 --- /dev/null +++ b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml @@ -0,0 +1,21 @@ +- block: + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml new file mode 100644 index 00000000..52603b08 --- /dev/null +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -0,0 +1,65 @@ +- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + + - name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + + - name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml new file mode 100644 index 00000000..d5c550c5 --- /dev/null +++ b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml @@ -0,0 +1,9 @@ +--- + # tasks file for init_k8s + + - name: Initialize kubernetes cluster + include_tasks: initk8s.yml + + - name: Initialize Calico CNI + include_tasks: cni.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 b/app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 new file mode 100644 index 00000000..f07c8d0c --- /dev/null +++ b/app/media/MyAnsible/roles/init_k8s/templates/kubeadmcnf.yml.j2 @@ -0,0 +1,13 @@ +kind: InitConfiguration +apiVersion: kubeadm.k8s.io/v1beta3 +nodeRegistration: + criSocket: {{ cri_socket }} + imagePullPolicy: IfNotPresent +--- +kind: ClusterConfiguration +apiVersion: kubeadm.k8s.io/v1beta3 +kubernetesVersion: "{{ k8s_version }}" +controlPlaneEndpoint: "{{ apiserver_url }}" +certificatesDir: /etc/kubernetes/pki +networking: + podSubnet: {{ pod_network_cidr }} diff --git a/app/media/MyAnsible/roles/install_docker/tasks/main.yml b/app/media/MyAnsible/roles/install_docker/tasks/main.yml deleted file mode 100644 index 87fb3c84..00000000 --- a/app/media/MyAnsible/roles/install_docker/tasks/main.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Install prerequisite packages - apt: - name: "{{ item }}" - state: present - loop: "{{ prerequisite_packages }}" -- name: Create directory for Docker keyrings - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' -- name: Download Docker's official GPG key - get_url: - url: https://download.docker.com/linux/ubuntu/gpg - dest: /etc/apt/keyrings/docker.asc - mode: '0644' -- name: Add Docker repository to apt sources - copy: - content: | - deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable - dest: /etc/apt/sources.list.d/docker.list -- name: Update apt cache after adding Docker repo - apt: - update_cache: yes -- name: Install Docker packages - apt: - name: "{{ item }}" - state: present - loop: "{{ docker_packages }}" -- name: Ensure Docker and containerd services are started and enabled - service: - name: "{{ item }}" - state: started - enabled: yes - loop: "{{ docker_services }}" diff --git a/app/media/MyAnsible/roles/install_docker/vars/main.yml b/app/media/MyAnsible/roles/install_docker/vars/main.yml deleted file mode 100644 index c0488ca6..00000000 --- a/app/media/MyAnsible/roles/install_docker/vars/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -prerequisite_packages: - - ca-certificates - - curl - -docker_services: - - docker - - containerd - -docker_packages: - - docker-ce - - docker-ce-cli - - containerd.io - - docker-buildx-plugin - - docker-compose-plugin diff --git a/app/media/MyAnsible/roles/join_master/defaults/main.yml b/app/media/MyAnsible/roles/join_master/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_master/files/join-command b/app/media/MyAnsible/roles/join_master/files/join-command new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_master/handlers/main.yml b/app/media/MyAnsible/roles/join_master/handlers/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml new file mode 100644 index 00000000..5b4b9cc1 --- /dev/null +++ b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml @@ -0,0 +1,101 @@ +- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + + - block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + + - name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + + - name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/tasks/main.yml b/app/media/MyAnsible/roles/join_master/tasks/main.yml new file mode 100644 index 00000000..0007d81d --- /dev/null +++ b/app/media/MyAnsible/roles/join_master/tasks/main.yml @@ -0,0 +1,6 @@ +--- + # tasks file for join_master + + - name: Join master(s) node to cluster + include_tasks: join_master.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 b/app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 new file mode 100644 index 00000000..967a6228 --- /dev/null +++ b/app/media/MyAnsible/roles/join_master/templates/kubeadmcnf-join.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +nodeRegistration: + criSocket: {{ cri_socket }} + +--- +kind: ClusterConfiguration +apiVersion: kubeadm.k8s.io/v1beta3 +kubernetesVersion: "{{ k8s_version }}" + + diff --git a/app/media/MyAnsible/roles/join_worker/defaults/main.yml b/app/media/MyAnsible/roles/join_worker/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_worker/files/join-command b/app/media/MyAnsible/roles/join_worker/files/join-command new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_worker/handlers/main.yml b/app/media/MyAnsible/roles/join_worker/handlers/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml new file mode 100644 index 00000000..b8f8a0c2 --- /dev/null +++ b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml @@ -0,0 +1,39 @@ +- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + + - block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_worker/tasks/main.yml b/app/media/MyAnsible/roles/join_worker/tasks/main.yml new file mode 100644 index 00000000..0c3717ec --- /dev/null +++ b/app/media/MyAnsible/roles/join_worker/tasks/main.yml @@ -0,0 +1,6 @@ +--- + # tasks file for join_worker + + - name: Join worker(s) node to cluster + include_tasks: join_worker.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/defaults/main.yml b/app/media/MyAnsible/roles/k8s/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/k8s/files/sample.sh b/app/media/MyAnsible/roles/k8s/files/sample.sh new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/k8s/handlers/main.yml b/app/media/MyAnsible/roles/k8s/handlers/main.yml new file mode 100644 index 00000000..658d4007 --- /dev/null +++ b/app/media/MyAnsible/roles/k8s/handlers/main.yml @@ -0,0 +1,13 @@ +--- + # handlers file for k8s + + - name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent + + - name: Restart kubelet + service: + name: kubelet + state: restarted + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml new file mode 100644 index 00000000..7ff78f52 --- /dev/null +++ b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml @@ -0,0 +1,196 @@ +- name: Disable SWAP since kubernetes can't work with swap enabled + shell: | + swapoff -a + + - name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + + - name: Check if ufw is installed + package_facts: + manager: "auto" + + - name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + + - name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + + - name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + + - name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + + - name: Reload sysctl settings + command: + cmd: sysctl --system + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + + - name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + + - name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + + - name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + + - name: Determine the architecture + command: dpkg --print-architecture + register: architecture + + - name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + + - name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + + - name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + + - name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + + - name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + + - name: Enable containerd service + systemd: + name: containerd + enabled: yes + + - name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + + - name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + + - name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + + - name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + + - name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + + - name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + + - name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + + - name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/main.yml b/app/media/MyAnsible/roles/k8s/tasks/main.yml new file mode 100644 index 00000000..2686e68a --- /dev/null +++ b/app/media/MyAnsible/roles/k8s/tasks/main.yml @@ -0,0 +1,4 @@ +--- + - name: Install kubernetes packages + include_tasks: k8s.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/defaults/main.yml b/app/media/MyAnsible/roles/preinstall/defaults/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/preinstall/files/sample.sh b/app/media/MyAnsible/roles/preinstall/files/sample.sh new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/preinstall/handlers/main.yml b/app/media/MyAnsible/roles/preinstall/handlers/main.yml new file mode 100644 index 00000000..e69de29b diff --git a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml new file mode 100644 index 00000000..4439a0ce --- /dev/null +++ b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml @@ -0,0 +1,83 @@ +- name: Set timezone to UTC + timezone: + name: Etc/UTC + + - name: Set hostname + command: hostnamectl set-hostname {{ inventory_hostname }} + + - name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" + + - name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" + + - name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\.0\.0\.1' + line: "127.0.0.1 {{ inventory_hostname }} localhost" + owner: root + group: root + mode: 0644 + + - name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree + + - name: Fix broken packages + apt: + state: fixed + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/tasks/main.yml b/app/media/MyAnsible/roles/preinstall/tasks/main.yml new file mode 100644 index 00000000..a943325c --- /dev/null +++ b/app/media/MyAnsible/roles/preinstall/tasks/main.yml @@ -0,0 +1,4 @@ +--- + - name: basic setup + include_tasks: basic.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 b/app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 new file mode 100644 index 00000000..b40011a6 --- /dev/null +++ b/app/media/MyAnsible/roles/preinstall/templates/resolv.conf.j2 @@ -0,0 +1,20 @@ + +# {{ ansible_managed }} + +{% if resolv_search is defined and resolv_search | length > 0 %} +search {{ resolv_search|join(' ') }} +{% endif %} +{% if resolv_domain is defined and resolv_domain != "" %} +domain {{ resolv_domain }} +{% endif %} +{% for ns in resolv_nameservers %} +nameserver {{ ns }} +{% endfor %} +{% if resolv_sortlist is defined and resolv_sortlist | length > 0 %} +{% for sl in resolv_sortlist %} +sortlist {{ sl }} +{% endfor %} +{% endif %} +{% if resolv_options is defined and resolv_options | length > 0 %} +options {{ resolv_options|join(' ') }} +{% endif %} diff --git a/app/routes/ansible.py b/app/routes/ansible.py index 49c15370..c55b9b45 100644 --- a/app/routes/ansible.py +++ b/app/routes/ansible.py @@ -35,11 +35,7 @@ async def ansible_install_generation_kuber(request:AnsibleInstallKuber) -> Outpu if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"kuber") - - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") + ansible_install_template(request,"kuber") add_files_to_folder(files = ['app/media/kuber_configs/resolv.conf.j2'] , folder='app/media/MyAnsible/roles/preinstall/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf.yml.j2'] , folder='app/media/MyAnsible/roles/init_k8s/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf-join.yml.j2'] , folder='app/media/MyAnsible/roles/join_master/templates/') diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index 5c01e4d0..95b8de84 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -1,4 +1,4 @@ - +import os def ansible_kuber_install(input): kubernetes_ansible_port = input.ansible_port @@ -14,826 +14,806 @@ def ansible_kuber_install(input): } kubernetes_inventory = "\n\n".join(f"{section}\n" + "\n".join(entries) for section, entries in sections.items()) - inventory_hostname = "{{ inventory_hostname }}" - item_in_task = "{{ item }}" - ufw_in_task = "'ufw'" - docker_gpg_key_path_in_task = "{{ docker_gpg_key_path }}" - docker_gpg_key_url_in_task = "{{ docker_gpg_key_url }}" - architecture_stdout_in_task = "{{ architecture.stdout }}" - docker_apt_repo_in_task = "{{ docker_apt_repo }}" - distribution_codename_stdout_in_task = "{{ distribution_codename.stdout }}" - kubernetes_gpg_keyring_path_in_task = "{{ kubernetes_gpg_keyring_path }}" - kubernetes_gpg_key_url_in_task = "{{ kubernetes_gpg_key_url }}" - kubernetes_apt_repo_in_task = "{{ kubernetes_apt_repo }}" - private_ip_in_task = "{{ private_ip }}" - hostvars_private_ip_in_task = "{{ hostvars[item].private_ip }}" - domain_in_task = "{{ domain }}" - groups_all_in_task = "{{ groups['all'] }}" - hostvars_groups_k8s_masters_private_ip_in_task = "{{ hostvars[groups['k8s_masters'][0]].private_ip }}" - apiserver_url_in_task = "{{ apiserver_url }}" - groups_k8s_masters_in_task = "{{ groups['k8s_masters'][0] }}" - calico_operator_url_in_task = "{{ calico_operator_url }}" - calico_crd_url_in_task = "{{ calico_crd_url }}" - join_command_stdout_lines_in_task = "{{ join_command.stdout_lines[0] }}" - kubeadm_cert_key_stdout_lines_in_task = "{{ kubeadm_cert_key.stdout_lines[2] }}" - hostvars_k8s_masters_control_plane_certkey_in_task = "{{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }}" - cri_socket_in_task = "{{ cri_socket }}" - - - - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: - - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── all - │   - ├── hosts - ├── host_vars - ├── kubernetes_playbook.yml - └── roles - └── preinstall - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── basic.yml - │   └── main.yml - ├── templates - │   └── resolv.conf.j2 - └── vars - | └── main.yml - k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── k8s.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - | └── main.yml - init_k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── cni.yml - │   └── initk8s.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf.yml.j2 - └── vars - | └── main.yml - join_master - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_master.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf-join.yml.j2 - └── vars - | └── main.yml - join_worker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_worker.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "all" and the content of this file must be as follows: - ``` - # General - install_ansible_modules: "true" - disable_transparent_huge_pages: "true" - - setup_interface: "false" - - # Network Calico see here for more details https://github.com/projectcalico/calico/releases - calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" - calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" - pod_network_cidr: "192.168.0.0/16" - - # DNS - resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - - # Sanction shekan - use_iran: "true" # change it to "false" if you are outside of iran - - # Docker - docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" - docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" - docker_apt_repo: "https://download.docker.com/linux/ubuntu" - - # Kubernetes - kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" - kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/Release.key" - kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/" - k8s_version: "{k8s_version}.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - - # CRI - cri_socket: unix:///var/run/containerd/containerd.sock - - # Ansible Connection - - ansible_user: {kubernetes_ansible_user} - ansible_port: {kubernetes_ansible_port} - ansible_python_interpreter: "/usr/bin/python3" - domain: "devopsgpt.com" - apiserver_url: "devopsgpt.com" - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {kubernetes_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "kubernetes_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - - - hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - - - hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - - - hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - - - hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] - ``` - - There is a directory called "roles" which a sub-directory called "preinstall" (roles/preinstall): - "preinstall" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (preinstall/tasks): This path has two files called "basic.yml" and "main.yml". - - 1. Create "preinstall/tasks/basic.yml" and it must be as follows:" - ``` - - name: Set timezone to UTC - timezone: - name: Etc/UTC - - - name: Set hostname - command: hostnamectl set-hostname {inventory_hostname} - - - name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - - - name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - - - name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\.0\.0\.1' - line: "127.0.0.1 {inventory_hostname} localhost" - owner: root - group: root - mode: 0644 - - - name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - - - name: Fix broken packages - apt: - state: fixed - ``` - - 2. Create preinstall/tasks/main.yml and it must be as follows:" - ``` - --- - - name: basic setup - include_tasks: basic.yml - ``` - - There is a directory called "roles" which a sub-directory called "k8s" (roles/k8s): - "k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (k8s/tasks): This path has two files called "k8s.yml" and "main.yml". - - 1. Create k8s/tasks/k8s.yml and it must be as follows:" - ``` - - name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - - - name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\sswap\s+sw\s+.*)$' - replace: '# \\1' - - - name: Check if ufw is installed - package_facts: - manager: "auto" - - - name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "{ufw_in_task} in ansible_facts.packages" - - - name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{item_in_task}" - create: yes - state: present - loop: - - overlay - - br_netfilter - - - name: Load kernel modules - command: - cmd: "modprobe {item_in_task}" - loop: - - overlay - - br_netfilter - - - name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {{mark}} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - - - name: Reload sysctl settings - command: - cmd: sysctl --system - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - - state: present - update_cache: yes - - - name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - - - name: Remove existing Docker GPG key if it exists - file: - path: '{docker_gpg_key_path_in_task}' - state: absent - - - name: Download Docker GPG key - shell: | - curl -fsSL {docker_gpg_key_url_in_task} | gpg --dearmor -o {docker_gpg_key_path_in_task} - - - name: Determine the architecture - command: dpkg --print-architecture - register: architecture - - - name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - - - name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={architecture_stdout_in_task} signed-by={docker_gpg_key_path_in_task}] {docker_apt_repo_in_task} {distribution_codename_stdout_in_task} stable" - state: present - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - - - name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - - - name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - - - name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - - - name: Enable containerd service - systemd: - name: containerd - enabled: yes - - name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{kubernetes_gpg_keyring_path_in_task}' - state: absent - - - name: Download Kubernetes GPG key - shell: | - curl -fsSL '{kubernetes_gpg_key_url_in_task}' | gpg --dearmor -o '{kubernetes_gpg_keyring_path_in_task}' - - - name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={kubernetes_gpg_keyring_path_in_task}] {kubernetes_apt_repo_in_task} /" - state: present - filename: kubernetes.list - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install Kubernetes packages - apt: - name: "{item_in_task}" - state: present - loop: - - kubeadm={k8s_version}.2-1.1 - - kubelet={k8s_version}.2-1.1 - - kubectl={k8s_version}.2-1.1 - - - name: Hold Kubernetes packages - dpkg_selections: - name: "{item_in_task}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={private_ip_in_task} - create: yes - state: present - notify: Restart kubelet - - - name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{hostvars_private_ip_in_task} {item_in_task} {item_in_task}.{domain_in_task}" - state: present - create: no - loop: "{groups_all_in_task}" - when: hostvars[item].private_ip is defined - - - name: Add apiserver_url to point to the masters temporary" - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: present - - - name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull - ``` - 2. Create k8s/tasks/main.yml and it must be as follows:" - ``` - --- - - name: Install kubernetes packages - include_tasks: k8s.yml - ``` - - (k8s/handlers): This path has a file called "main.yml". - - 3. Create k8s/handlers/main.yml and it must be as follows:" - ``` - --- - # handlers file for k8s - - - name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - - - name: Restart kubelet - service: - name: kubelet - state: restarted - ``` - - There is a directory called "roles" which a sub-directory called "init_k8s" (roles/init_k8s): - "init_k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (init_k8s/tasks): This path has three files called "cni.yml", "initk8s.yml" and "main.yml". - - 1. Create init_k8s/tasks/cni.yml and it must be as follows:" - ``` - - block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_operator_url_in_task} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_crd_url_in_task} - retries: 3 - delay: 3 - delegate_to: "{groups_k8s_masters_in_task}" - when: calico_crd_check.rc != 0 - run_once: true - ``` - 2. Create init_k8s/tasks/initk8s.yml and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." - ``` - 3. Create init_k8s/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for init_k8s - - - name: Initialize kubernetes cluster - include_tasks: initk8s.yml - - - name: Initialize Calico CNI - include_tasks: cni.yml - ``` - - There is a directory called "roles" which a sub-directory called "join_master" (roles/join_master): - "join_master" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_master/tasks): This path has two files called "join_master.yml" and "main.yml". - - 1. Create "join_master/tasks/join_master.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{kubeadm_cert_key_stdout_lines_in_task}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{kubeadm_cert_key_stdout_lines_in_task}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - run_once: false - delegate_facts: true - - - name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={hostvars_k8s_masters_control_plane_certkey_in_task} --cri-socket={cri_socket_in_task}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: absent - - - - name: Add apiserver_url to point to the masters" - lineinfile: - dest: /etc/hosts - line: "{private_ip_in_task} {apiserver_url_in_task}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] - ``` - 2. Create join_master/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_master - - - name: Join master(s) node to cluster - include_tasks: join_master.yml - - ``` - - There is a directory called "roles" which a sub-directory called "join_worker" (roles/join_worker): - "join_worker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_worker/tasks): This path has two files called "join_worker.yml" and "main.yml". - - 1. Create "join_worker/tasks/join_worker.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - ``` - 2. Create join_worker/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_worker - - - name: Join worker(s) node to cluster - include_tasks: join_worker.yml - ``` - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") - - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) - - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt + + project_name = "app/media/MyAnsible" + ansible_dir = project_name + group_vars_dir = os.path.join(ansible_dir, "group_vars") + host_vars_dir = os.path.join(ansible_dir, "host_vars") + roles_dir = os.path.join(ansible_dir, "roles") + + # Create project directories + os.makedirs(group_vars_dir, exist_ok=True) + os.makedirs(host_vars_dir, exist_ok=True) + os.makedirs(roles_dir, exist_ok=True) + + preinstall_dir = os.path.join(roles_dir, "preinstall") + k8s_dir = os.path.join(roles_dir, "k8s") + init_k8s_dir = os.path.join(roles_dir, "init_k8s") + join_master_dir = os.path.join(roles_dir, "join_master") + join_worker_dir = os.path.join(roles_dir, "join_worker") + + os.makedirs(preinstall_dir, exist_ok=True) + os.makedirs(k8s_dir, exist_ok=True) + os.makedirs(init_k8s_dir, exist_ok=True) + os.makedirs(join_master_dir, exist_ok=True) + os.makedirs(join_worker_dir, exist_ok=True) + + # Create ansible.cfg + with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg_file: + ansible_cfg_file.write("[defaults]\nhost_key_checking=false\n") + + # Create group_vars/all + with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: + group_vars_file.write(f"""# General + install_ansible_modules: "true" + disable_transparent_huge_pages: "true" + + setup_interface: "false" + + # Network Calico see here for more details https://github.com/projectcalico/calico/releases + calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" + calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" + pod_network_cidr: "192.168.0.0/16" + + # DNS + resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + + # Sanction shekan + use_iran: "true" # change it to "false" if you are outside of iran + + # Docker + docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" + docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" + docker_apt_repo: "https://download.docker.com/linux/ubuntu" + + # Kubernetes + kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" + kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" + kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" + k8s_version: {k8s_version} # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + + # CRI + cri_socket: unix:///var/run/containerd/containerd.sock + + # Ansible Connection + ansible_user: {kubernetes_ansible_user} + ansible_port: {kubernetes_ansible_port} + ansible_python_interpreter: "/usr/bin/python3" + domain: "devopsgpt.com" + apiserver_url: "devopsgpt.com" + """) + + # Create hosts + with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: + hosts_file.write(f"""{kubernetes_inventory}""") + + # Create kubernetes_playbook.yml + with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: + playbook_file.write("""- hosts: all + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] + + - hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + + - hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + + - hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + + - hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] + """) + + # Create preinstall files + preinstall_defaults_dir = os.path.join(preinstall_dir, "defaults") + preinstall_files_dir = os.path.join(preinstall_dir, "files") + preinstall_handlers_dir = os.path.join(preinstall_dir, "handlers") + preinstall_tasks_dir = os.path.join(preinstall_dir, "tasks") + preinstall_templates_dir = os.path.join(preinstall_dir, "templates") + preinstall_vars_dir = os.path.join(preinstall_dir, "vars") + + os.makedirs(preinstall_defaults_dir, exist_ok=True) + os.makedirs(preinstall_files_dir, exist_ok=True) + os.makedirs(preinstall_handlers_dir, exist_ok=True) + os.makedirs(preinstall_tasks_dir, exist_ok=True) + os.makedirs(preinstall_templates_dir, exist_ok=True) + os.makedirs(preinstall_vars_dir, exist_ok=True) + + with open(os.path.join(preinstall_defaults_dir, "main.yml"), "w") as defaults_file: + defaults_file.write("") + + with open(os.path.join(preinstall_files_dir, "sample.sh"), "w") as files_file: + files_file.write("") + + with open(os.path.join(preinstall_handlers_dir, "main.yml"), "w") as handlers_file: + handlers_file.write("") + + with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: + basic_tasks_file.write("""- name: Set timezone to UTC + timezone: + name: Etc/UTC + + - name: Set hostname + command: hostnamectl set-hostname {{ inventory_hostname }} + + - name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" + + - name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" + + - name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\\.0\\.0\\.1' + line: "127.0.0.1 {{ inventory_hostname }} localhost" + owner: root + group: root + mode: 0644 + + - name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree + + - name: Fix broken packages + apt: + state: fixed + """) + + with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: + tasks_main_file.write("""--- + - name: basic setup + include_tasks: basic.yml + """) + + # Create k8s files + k8s_defaults_dir = os.path.join(k8s_dir, "defaults") + k8s_files_dir = os.path.join(k8s_dir, "files") + k8s_handlers_dir = os.path.join(k8s_dir, "handlers") + k8s_tasks_dir = os.path.join(k8s_dir, "tasks") + k8s_templates_dir = os.path.join(k8s_dir, "templates") + k8s_vars_dir = os.path.join(k8s_dir, "vars") + + os.makedirs(k8s_defaults_dir, exist_ok=True) + os.makedirs(k8s_files_dir, exist_ok=True) + os.makedirs(k8s_handlers_dir, exist_ok=True) + os.makedirs(k8s_tasks_dir, exist_ok=True) + os.makedirs(k8s_templates_dir, exist_ok=True) + os.makedirs(k8s_vars_dir, exist_ok=True) + + with open(os.path.join(k8s_defaults_dir, "main.yml"), "w") as k8s_defaults_file: + k8s_defaults_file.write("") + + with open(os.path.join(k8s_files_dir, "sample.sh"), "w") as k8s_files_file: + k8s_files_file.write("") + + with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: + k8s_handlers_file.write("""--- + # handlers file for k8s + + - name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent + + - name: Restart kubelet + service: + name: kubelet + state: restarted + """) + + with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: + k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled + shell: | + swapoff -a + + - name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' + replace: '# \\1' + + - name: Check if ufw is installed + package_facts: + manager: "auto" + + - name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + + - name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + + - name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + + - name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + + - name: Reload sysctl settings + command: + cmd: sysctl --system + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + + - name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + + - name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + + - name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + + - name: Determine the architecture + command: dpkg --print-architecture + register: architecture + + - name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + + - name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + + - name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + + - name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + + - name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + + - name: Enable containerd service + systemd: + name: containerd + enabled: yes + + - name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + + - name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + + - name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + + - name: Update apt cache + apt: + update_cache: yes + + - name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + + - name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + + - name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + + - name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + + - name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + + - name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull + """) + + with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: + k8s_tasks_main_file.write("""--- + - name: Install kubernetes packages + include_tasks: k8s.yml + """) + + # Create init_k8s files + init_k8s_defaults_dir = os.path.join(init_k8s_dir, "defaults") + init_k8s_files_dir = os.path.join(init_k8s_dir, "files") + init_k8s_handlers_dir = os.path.join(init_k8s_dir, "handlers") + init_k8s_tasks_dir = os.path.join(init_k8s_dir, "tasks") + init_k8s_templates_dir = os.path.join(init_k8s_dir, "templates") + init_k8s_vars_dir = os.path.join(init_k8s_dir, "vars") + + os.makedirs(init_k8s_defaults_dir, exist_ok=True) + os.makedirs(init_k8s_files_dir, exist_ok=True) + os.makedirs(init_k8s_handlers_dir, exist_ok=True) + os.makedirs(init_k8s_tasks_dir, exist_ok=True) + os.makedirs(init_k8s_templates_dir, exist_ok=True) + os.makedirs(init_k8s_vars_dir, exist_ok=True) + + with open(os.path.join(init_k8s_defaults_dir, "main.yml"), "w") as init_k8s_defaults_file: + init_k8s_defaults_file.write("") + + with open(os.path.join(init_k8s_files_dir, "sample.sh"), "w") as init_k8s_files_file: + init_k8s_files_file.write("") + + with open(os.path.join(init_k8s_handlers_dir, "main.yml"), "w") as init_k8s_handlers_file: + init_k8s_handlers_file.write("") + + with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: + init_k8s_tasks_cni_file.write("""- block: + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true + """) + + with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: + init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + + - name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + + - name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." + """) + + with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: + init_k8s_tasks_main_file.write("""--- + # tasks file for init_k8s + + - name: Initialize kubernetes cluster + include_tasks: initk8s.yml + + - name: Initialize Calico CNI + include_tasks: cni.yml + """) + + # Create join_master files + join_master_defaults_dir = os.path.join(join_master_dir, "defaults") + join_master_files_dir = os.path.join(join_master_dir, "files") + join_master_handlers_dir = os.path.join(join_master_dir, "handlers") + join_master_tasks_dir = os.path.join(join_master_dir, "tasks") + join_master_templates_dir = os.path.join(join_master_dir, "templates") + join_master_vars_dir = os.path.join(join_master_dir, "vars") + + os.makedirs(join_master_defaults_dir, exist_ok=True) + os.makedirs(join_master_files_dir, exist_ok=True) + os.makedirs(join_master_handlers_dir, exist_ok=True) + os.makedirs(join_master_tasks_dir, exist_ok=True) + os.makedirs(join_master_templates_dir, exist_ok=True) + os.makedirs(join_master_vars_dir, exist_ok=True) + + with open(os.path.join(join_master_defaults_dir, "main.yml"), "w") as join_master_defaults_file: + join_master_defaults_file.write("") + + with open(os.path.join(join_master_files_dir, "join-command"), "w") as join_master_files_file: + join_master_files_file.write("") + + with open(os.path.join(join_master_handlers_dir, "main.yml"), "w") as join_master_handlers_file: + join_master_handlers_file.write("") + + with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: + join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + + - block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + + - name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + + - name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] + """) + + with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: + join_master_tasks_main_file.write("""--- + # tasks file for join_master + + - name: Join master(s) node to cluster + include_tasks: join_master.yml + """) + + # Create join_worker files + join_worker_defaults_dir = os.path.join(join_worker_dir, "defaults") + join_worker_files_dir = os.path.join(join_worker_dir, "files") + join_worker_handlers_dir = os.path.join(join_worker_dir, "handlers") + join_worker_tasks_dir = os.path.join(join_worker_dir, "tasks") + join_worker_templates_dir = os.path.join(join_worker_dir, "templates") + join_worker_vars_dir = os.path.join(join_worker_dir, "vars") + + os.makedirs(join_worker_defaults_dir, exist_ok=True) + os.makedirs(join_worker_files_dir, exist_ok=True) + os.makedirs(join_worker_handlers_dir, exist_ok=True) + os.makedirs(join_worker_tasks_dir, exist_ok=True) + os.makedirs(join_worker_templates_dir, exist_ok=True) + os.makedirs(join_worker_vars_dir, exist_ok=True) + + with open(os.path.join(join_worker_defaults_dir, "main.yml"), "w") as join_worker_defaults_file: + join_worker_defaults_file.write("") + + with open(os.path.join(join_worker_files_dir, "join-command"), "w") as join_worker_files_file: + join_worker_files_file.write("") + + with open(os.path.join(join_worker_handlers_dir, "main.yml"), "w") as join_worker_handlers_file: + join_worker_handlers_file.write("") + + with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: + join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + + - block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + + - name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + """) + + with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: + join_worker_tasks_main_file.write("""--- + # tasks file for join_worker + + - name: Join worker(s) node to cluster + include_tasks: join_worker.yml + """) \ No newline at end of file From fa71e9129e380591a74b3f7edfa175465c5e5993 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 12 Dec 2024 19:02:25 +0330 Subject: [PATCH 22/25] fix(kuber playbook): fix kuber playbook --- app/media/MyAnsible/hosts | 12 +-- app/media/MyAnsible/kubernetes_playbook.yml | 68 ++++++++-------- app/routes/ansible.py | 22 +++++- .../ansible/install/kuber.py | 78 ++++++++++--------- 4 files changed, 102 insertions(+), 78 deletions(-) diff --git a/app/media/MyAnsible/hosts b/app/media/MyAnsible/hosts index 74638b9e..dec5110a 100644 --- a/app/media/MyAnsible/hosts +++ b/app/media/MyAnsible/hosts @@ -1,13 +1,13 @@ [all] -stringoooooooooooooooooo private_ip=x.x.x.x -stringppppppppp private_ip=x.x.x.x +string private_ip=x.x.x.x +string private_ip=x.x.x.x [k8s] -stringoooooooooooooooooo -stringppppppppp +string +string [k8s_masters] -stringoooooooooooooooooo +string [k8s_workers] -stringppppppppp \ No newline at end of file +string \ No newline at end of file diff --git a/app/media/MyAnsible/kubernetes_playbook.yml b/app/media/MyAnsible/kubernetes_playbook.yml index e502abc0..d674b26f 100644 --- a/app/media/MyAnsible/kubernetes_playbook.yml +++ b/app/media/MyAnsible/kubernetes_playbook.yml @@ -1,39 +1,41 @@ + - hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] - - hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] +- hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] - - hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] +- hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] - - hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] +- hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] - - hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] +- hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] + \ No newline at end of file diff --git a/app/routes/ansible.py b/app/routes/ansible.py index c55b9b45..367f543c 100644 --- a/app/routes/ansible.py +++ b/app/routes/ansible.py @@ -8,12 +8,20 @@ from app.template_generators.ansible.install.main import ansible_install_template import os +import shutil @app.post("/api/ansible-install/nginx/") async def ansible_install_generation_nginx(request:AnsibleInstallNginx) -> Output: + + if os.environ.get("TEST"): return Output(output='output') + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + ansible_install_template(request,"nginx") @@ -22,9 +30,15 @@ async def ansible_install_generation_nginx(request:AnsibleInstallNginx) -> Outpu @app.post("/api/ansible-install/docker/") async def ansible_install_generation_docker(request:AnsibleInstallDocker) -> Output: + if os.environ.get("TEST"): return Output(output='output') + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + ansible_install_template(request,"docker") return Output(output='output') @@ -32,9 +46,15 @@ async def ansible_install_generation_docker(request:AnsibleInstallDocker) -> Out @app.post("/api/ansible-install/kuber/") async def ansible_install_generation_kuber(request:AnsibleInstallKuber) -> Output: - + + if os.environ.get("TEST"): return Output(output='output') + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + ansible_install_template(request,"kuber") add_files_to_folder(files = ['app/media/kuber_configs/resolv.conf.j2'] , folder='app/media/MyAnsible/roles/preinstall/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf.yml.j2'] , folder='app/media/MyAnsible/roles/init_k8s/templates/') diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index 95b8de84..309664e2 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -90,44 +90,46 @@ def ansible_kuber_install(input): # Create kubernetes_playbook.yml with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: - playbook_file.write("""- hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - - - hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - - - hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - - - hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - - - hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] + playbook_file.write(""" +- hosts: all + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] + +- hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + +- hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + +- hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + +- hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] + """) # Create preinstall files From 58b94e14667db7e5f9692b93811ce9e916e8ee0d Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 12 Dec 2024 19:17:49 +0330 Subject: [PATCH 23/25] fix(kuber playbook): fix kuber playbook --- .../MyAnsible/roles/init_k8s/tasks/cni.yml | 35 +- .../roles/init_k8s/tasks/initk8s.yml | 106 +- .../MyAnsible/roles/init_k8s/tasks/main.yml | 10 +- .../roles/join_master/tasks/join_master.yml | 198 ++-- .../roles/join_master/tasks/main.yml | 6 +- .../roles/join_worker/tasks/join_worker.yml | 60 +- .../roles/join_worker/tasks/main.yml | 6 +- .../MyAnsible/roles/k8s/handlers/main.yml | 18 +- app/media/MyAnsible/roles/k8s/tasks/k8s.yml | 388 +++--- app/media/MyAnsible/roles/k8s/tasks/main.yml | 4 +- .../roles/preinstall/tasks/basic.yml | 150 +-- .../MyAnsible/roles/preinstall/tasks/main.yml | 4 +- .../ansible/install/kuber.py | 1035 +++++++++-------- 13 files changed, 1011 insertions(+), 1009 deletions(-) diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml index 613b1329..b6902c92 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml @@ -1,21 +1,22 @@ - block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{{ groups['k8s_masters'][0] }}" + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true +delegate_to: "{{ groups['k8s_masters'][0] }}" - - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 +- block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 +delegate_to: "{{ groups['k8s_masters'][0] }}" +when: calico_crd_check.rc != 0 +run_once: true - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 - delegate_to: "{{ groups['k8s_masters'][0] }}" - when: calico_crd_check.rc != 0 - run_once: true \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml index 52603b08..669d693d 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -1,65 +1,65 @@ - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml + - block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost + - name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" + - name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" + - name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." + - name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml index d5c550c5..10fa230e 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml @@ -1,9 +1,9 @@ --- - # tasks file for init_k8s +# tasks file for init_k8s - - name: Initialize kubernetes cluster - include_tasks: initk8s.yml +- name: Initialize kubernetes cluster + include_tasks: initk8s.yml - - name: Initialize Calico CNI - include_tasks: cni.yml +- name: Initialize Calico CNI + include_tasks: cni.yml \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml index 5b4b9cc1..b6855cbf 100644 --- a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml +++ b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml @@ -1,101 +1,101 @@ - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - run_once: false - delegate_facts: true - - - name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: absent - - - name: Add apiserver_url to point to the masters - lineinfile: - dest: /etc/hosts - line: "{{ private_ip }} {{ apiserver_url }}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + +- name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + +- name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/tasks/main.yml b/app/media/MyAnsible/roles/join_master/tasks/main.yml index 0007d81d..a5bf581f 100644 --- a/app/media/MyAnsible/roles/join_master/tasks/main.yml +++ b/app/media/MyAnsible/roles/join_master/tasks/main.yml @@ -1,6 +1,6 @@ --- - # tasks file for join_master +# tasks file for join_master - - name: Join master(s) node to cluster - include_tasks: join_master.yml +- name: Join master(s) node to cluster + include_tasks: join_master.yml \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml index b8f8a0c2..899a0522 100644 --- a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml +++ b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml @@ -1,39 +1,39 @@ - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" - - block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" +- block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists - - name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists +- name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_worker/tasks/main.yml b/app/media/MyAnsible/roles/join_worker/tasks/main.yml index 0c3717ec..2cf615b3 100644 --- a/app/media/MyAnsible/roles/join_worker/tasks/main.yml +++ b/app/media/MyAnsible/roles/join_worker/tasks/main.yml @@ -1,6 +1,6 @@ --- - # tasks file for join_worker +# tasks file for join_worker - - name: Join worker(s) node to cluster - include_tasks: join_worker.yml +- name: Join worker(s) node to cluster + include_tasks: join_worker.yml \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/handlers/main.yml b/app/media/MyAnsible/roles/k8s/handlers/main.yml index 658d4007..989212ab 100644 --- a/app/media/MyAnsible/roles/k8s/handlers/main.yml +++ b/app/media/MyAnsible/roles/k8s/handlers/main.yml @@ -1,13 +1,13 @@ --- - # handlers file for k8s +# handlers file for k8s - - name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent +- name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent - - name: Restart kubelet - service: - name: kubelet - state: restarted +- name: Restart kubelet + service: + name: kubelet + state: restarted \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml index 7ff78f52..d91bbf41 100644 --- a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml +++ b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml @@ -1,196 +1,196 @@ - name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - - - name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\sswap\s+sw\s+.*)$' - replace: '# \1' - - - name: Check if ufw is installed - package_facts: - manager: "auto" - - - name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "'ufw' in ansible_facts.packages" - - - name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{{ item }}" - create: yes - state: present - loop: - - overlay - - br_netfilter - - - name: Load kernel modules - command: - cmd: "modprobe {{ item }}" - loop: - - overlay - - br_netfilter - - - name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {mark} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - - - name: Reload sysctl settings - command: - cmd: sysctl --system - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - state: present - update_cache: yes - - - name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - - - name: Remove existing Docker GPG key if it exists - file: - path: '{{ docker_gpg_key_path }}' - state: absent - - - name: Download Docker GPG key - shell: | - curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} - - - name: Determine the architecture - command: dpkg --print-architecture - register: architecture - - - name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - - - name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" - state: present - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - - - name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - - - name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - - - name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - - - name: Enable containerd service - systemd: - name: containerd - enabled: yes - - - name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{{ kubernetes_gpg_keyring_path }}' - state: absent - - - name: Download Kubernetes GPG key - shell: | - curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' - - - name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" - state: present - filename: kubernetes.list - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install Kubernetes packages - apt: - name: "{{ item }}" - state: present - loop: - - kubeadm=1.31.2-1.1 - - kubelet=1.31.2-1.1 - - kubectl=1.31.2-1.1 - - - name: Hold Kubernetes packages - dpkg_selections: - name: "{{ item }}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} - create: yes - state: present - notify: Restart kubelet - - - name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" - state: present - create: no - loop: "{{ groups['all'] }}" - when: hostvars[item].private_ip is defined - - - name: Add apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: present - - - name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull + shell: | + swapoff -a + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\sswap\s+sw\s+.*)$' + replace: '# \1' + +- name: Check if ufw is installed + package_facts: + manager: "auto" + +- name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + +- name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + +- name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + +- name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + +- name: Reload sysctl settings + command: + cmd: sysctl --system + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + +- name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + +- name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + +- name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + +- name: Determine the architecture + command: dpkg --print-architecture + register: architecture + +- name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + +- name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + +- name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + +- name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + +- name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + +- name: Enable containerd service + systemd: + name: containerd + enabled: yes + +- name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + +- name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + +- name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + +- name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + +- name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + +- name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + +- name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + +- name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/main.yml b/app/media/MyAnsible/roles/k8s/tasks/main.yml index 2686e68a..ddd388cb 100644 --- a/app/media/MyAnsible/roles/k8s/tasks/main.yml +++ b/app/media/MyAnsible/roles/k8s/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Install kubernetes packages - include_tasks: k8s.yml +- name: Install kubernetes packages + include_tasks: k8s.yml \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml index 4439a0ce..46c59409 100644 --- a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml +++ b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml @@ -1,83 +1,83 @@ - name: Set timezone to UTC - timezone: - name: Etc/UTC + timezone: + name: Etc/UTC - - name: Set hostname - command: hostnamectl set-hostname {{ inventory_hostname }} +- name: Set hostname + command: hostnamectl set-hostname {{ inventory_hostname }} - - name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" +- name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" - - name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" +- name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" - - name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\.0\.0\.1' - line: "127.0.0.1 {{ inventory_hostname }} localhost" - owner: root - group: root - mode: 0644 +- name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\.0\.0\.1' + line: "127.0.0.1 {{ inventory_hostname }} localhost" + owner: root + group: root + mode: 0644 - - name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree +- name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree - - name: Fix broken packages - apt: - state: fixed +- name: Fix broken packages + apt: + state: fixed \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/tasks/main.yml b/app/media/MyAnsible/roles/preinstall/tasks/main.yml index a943325c..38be7807 100644 --- a/app/media/MyAnsible/roles/preinstall/tasks/main.yml +++ b/app/media/MyAnsible/roles/preinstall/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: basic setup - include_tasks: basic.yml +- name: basic setup + include_tasks: basic.yml \ No newline at end of file diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index 309664e2..eb4d2467 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -158,93 +158,93 @@ def ansible_kuber_install(input): with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: basic_tasks_file.write("""- name: Set timezone to UTC - timezone: - name: Etc/UTC - - - name: Set hostname - command: hostnamectl set-hostname {{ inventory_hostname }} - - - name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - - - name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - - - name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\\.0\\.0\\.1' - line: "127.0.0.1 {{ inventory_hostname }} localhost" - owner: root - group: root - mode: 0644 - - - name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - - - name: Fix broken packages - apt: - state: fixed + timezone: + name: Etc/UTC + +- name: Set hostname + command: hostnamectl set-hostname {{ inventory_hostname }} + +- name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" + +- name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" + +- name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\\.0\\.0\\.1' + line: "127.0.0.1 {{ inventory_hostname }} localhost" + owner: root + group: root + mode: 0644 + +- name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree + +- name: Fix broken packages + apt: + state: fixed """) with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: tasks_main_file.write("""--- - - name: basic setup - include_tasks: basic.yml +- name: basic setup + include_tasks: basic.yml """) # Create k8s files @@ -270,221 +270,221 @@ def ansible_kuber_install(input): with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: k8s_handlers_file.write("""--- - # handlers file for k8s +# handlers file for k8s - - name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent +- name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent - - name: Restart kubelet - service: - name: kubelet - state: restarted +- name: Restart kubelet + service: + name: kubelet + state: restarted """) with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - - - name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' - replace: '# \\1' - - - name: Check if ufw is installed - package_facts: - manager: "auto" - - - name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "'ufw' in ansible_facts.packages" - - - name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{{ item }}" - create: yes - state: present - loop: - - overlay - - br_netfilter - - - name: Load kernel modules - command: - cmd: "modprobe {{ item }}" - loop: - - overlay - - br_netfilter - - - name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {mark} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - - - name: Reload sysctl settings - command: - cmd: sysctl --system - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - state: present - update_cache: yes - - - name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - - - name: Remove existing Docker GPG key if it exists - file: - path: '{{ docker_gpg_key_path }}' - state: absent - - - name: Download Docker GPG key - shell: | - curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} - - - name: Determine the architecture - command: dpkg --print-architecture - register: architecture - - - name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - - - name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" - state: present - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - - - name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - - - name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - - - name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - - - name: Enable containerd service - systemd: - name: containerd - enabled: yes - - - name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{{ kubernetes_gpg_keyring_path }}' - state: absent - - - name: Download Kubernetes GPG key - shell: | - curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' - - - name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" - state: present - filename: kubernetes.list - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install Kubernetes packages - apt: - name: "{{ item }}" - state: present - loop: - - kubeadm=1.31.2-1.1 - - kubelet=1.31.2-1.1 - - kubectl=1.31.2-1.1 - - - name: Hold Kubernetes packages - dpkg_selections: - name: "{{ item }}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} - create: yes - state: present - notify: Restart kubelet - - - name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" - state: present - create: no - loop: "{{ groups['all'] }}" - when: hostvars[item].private_ip is defined - - - name: Add apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: present - - - name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull + shell: | + swapoff -a + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' + replace: '# \\1' + +- name: Check if ufw is installed + package_facts: + manager: "auto" + +- name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + +- name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + +- name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + +- name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + +- name: Reload sysctl settings + command: + cmd: sysctl --system + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + +- name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + +- name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + +- name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + +- name: Determine the architecture + command: dpkg --print-architecture + register: architecture + +- name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + +- name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + +- name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + +- name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + +- name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + +- name: Enable containerd service + systemd: + name: containerd + enabled: yes + +- name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + +- name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + +- name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + +- name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + +- name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + +- name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + +- name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + +- name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull """) with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: k8s_tasks_main_file.write("""--- - - name: Install kubernetes packages - include_tasks: k8s.yml +- name: Install kubernetes packages + include_tasks: k8s.yml """) # Create init_k8s files @@ -513,103 +513,104 @@ def ansible_kuber_install(input): with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: init_k8s_tasks_cni_file.write("""- block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 - delegate_to: "{{ groups['k8s_masters'][0] }}" - when: calico_crd_check.rc != 0 - run_once: true + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true +delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 +delegate_to: "{{ groups['k8s_masters'][0] }}" +when: calico_crd_check.rc != 0 +run_once: true + """) with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + + - name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + + - name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." """) with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: init_k8s_tasks_main_file.write("""--- - # tasks file for init_k8s +# tasks file for init_k8s - - name: Initialize kubernetes cluster - include_tasks: initk8s.yml +- name: Initialize kubernetes cluster + include_tasks: initk8s.yml - - name: Initialize Calico CNI - include_tasks: cni.yml +- name: Initialize Calico CNI + include_tasks: cni.yml """) # Create join_master files @@ -638,113 +639,113 @@ def ansible_kuber_install(input): with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - run_once: false - delegate_facts: true - - - name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: absent - - - name: Add apiserver_url to point to the masters - lineinfile: - dest: /etc/hosts - line: "{{ private_ip }} {{ apiserver_url }}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + +- name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + +- name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] """) with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: join_master_tasks_main_file.write("""--- - # tasks file for join_master +# tasks file for join_master - - name: Join master(s) node to cluster - include_tasks: join_master.yml +- name: Join master(s) node to cluster + include_tasks: join_master.yml """) # Create join_worker files @@ -773,49 +774,49 @@ def ansible_kuber_install(input): with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists """) with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: join_worker_tasks_main_file.write("""--- - # tasks file for join_worker +# tasks file for join_worker - - name: Join worker(s) node to cluster - include_tasks: join_worker.yml +- name: Join worker(s) node to cluster + include_tasks: join_worker.yml """) \ No newline at end of file From d65058f6349f4b40eeb12cb428d517ae6f7a0d87 Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 12 Dec 2024 19:55:19 +0330 Subject: [PATCH 24/25] fix(ansible kuber): fix yaml formats --- app/media/MyAnsible/group_vars/all | 72 +++--- .../MyAnsible/roles/init_k8s/tasks/cni.yml | 32 +-- .../roles/init_k8s/tasks/initk8s.yml | 100 ++++---- .../ansible/install/kuber.py | 226 +++++++++--------- 4 files changed, 215 insertions(+), 215 deletions(-) diff --git a/app/media/MyAnsible/group_vars/all b/app/media/MyAnsible/group_vars/all index 38a7e206..eb422e93 100644 --- a/app/media/MyAnsible/group_vars/all +++ b/app/media/MyAnsible/group_vars/all @@ -1,38 +1,38 @@ # General - install_ansible_modules: "true" - disable_transparent_huge_pages: "true" - - setup_interface: "false" - - # Network Calico see here for more details https://github.com/projectcalico/calico/releases - calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" - calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" - pod_network_cidr: "192.168.0.0/16" - - # DNS - resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - - # Sanction shekan - use_iran: "true" # change it to "false" if you are outside of iran - - # Docker - docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" - docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" - docker_apt_repo: "https://download.docker.com/linux/ubuntu" - - # Kubernetes - kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" - kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" - kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" - k8s_version: 1.31 # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - - # CRI - cri_socket: unix:///var/run/containerd/containerd.sock - - # Ansible Connection - ansible_user: root - ansible_port: 22 - ansible_python_interpreter: "/usr/bin/python3" - domain: "devopsgpt.com" - apiserver_url: "devopsgpt.com" +install_ansible_modules: "true" +disable_transparent_huge_pages: "true" + +setup_interface: "false" + +# Network Calico see here for more details https://github.com/projectcalico/calico/releases +calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" +calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" +pod_network_cidr: "192.168.0.0/16" + +# DNS +resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + +# Sanction shekan +use_iran: "true" # change it to "false" if you are outside of iran + +# Docker +docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" +docker_apt_repo: "https://download.docker.com/linux/ubuntu" + +# Kubernetes +kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" +kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" +kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" +k8s_version: 1.31 # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + +# CRI +cri_socket: unix:///var/run/containerd/containerd.sock + +# Ansible Connection +ansible_user: root +ansible_port: 22 +ansible_python_interpreter: "/usr/bin/python3" +domain: "devopsgpt.com" +apiserver_url: "devopsgpt.com" \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml index b6902c92..c12926c6 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml @@ -1,22 +1,22 @@ - block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true -delegate_to: "{{ groups['k8s_masters'][0] }}" + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 -delegate_to: "{{ groups['k8s_masters'][0] }}" -when: calico_crd_check.rc != 0 -run_once: true + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml index 669d693d..6f905635 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -2,64 +2,64 @@ stat: path: "/var/lib/kubelet/config.yaml" register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml +- block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost + - name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" + - name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" + - name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." + - name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." \ No newline at end of file diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index eb4d2467..650b483b 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -46,42 +46,42 @@ def ansible_kuber_install(input): # Create group_vars/all with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: group_vars_file.write(f"""# General - install_ansible_modules: "true" - disable_transparent_huge_pages: "true" - - setup_interface: "false" - - # Network Calico see here for more details https://github.com/projectcalico/calico/releases - calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" - calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" - pod_network_cidr: "192.168.0.0/16" - - # DNS - resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - - # Sanction shekan - use_iran: "true" # change it to "false" if you are outside of iran - - # Docker - docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" - docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" - docker_apt_repo: "https://download.docker.com/linux/ubuntu" - - # Kubernetes - kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" - kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" - kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" - k8s_version: {k8s_version} # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - - # CRI - cri_socket: unix:///var/run/containerd/containerd.sock - - # Ansible Connection - ansible_user: {kubernetes_ansible_user} - ansible_port: {kubernetes_ansible_port} - ansible_python_interpreter: "/usr/bin/python3" - domain: "devopsgpt.com" - apiserver_url: "devopsgpt.com" +install_ansible_modules: "true" +disable_transparent_huge_pages: "true" + +setup_interface: "false" + +# Network Calico see here for more details https://github.com/projectcalico/calico/releases +calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" +calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" +pod_network_cidr: "192.168.0.0/16" + +# DNS +resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + +# Sanction shekan +use_iran: "true" # change it to "false" if you are outside of iran + +# Docker +docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" +docker_apt_repo: "https://download.docker.com/linux/ubuntu" + +# Kubernetes +kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" +kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" +kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" +k8s_version: {k8s_version} # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + +# CRI +cri_socket: unix:///var/run/containerd/containerd.sock + +# Ansible Connection +ansible_user: {kubernetes_ansible_user} +ansible_port: {kubernetes_ansible_port} +ansible_python_interpreter: "/usr/bin/python3" +domain: "devopsgpt.com" +apiserver_url: "devopsgpt.com" """) # Create hosts @@ -513,25 +513,25 @@ def ansible_kuber_install(input): with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: init_k8s_tasks_cni_file.write("""- block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true -delegate_to: "{{ groups['k8s_masters'][0] }}" + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 -delegate_to: "{{ groups['k8s_masters'][0] }}" -when: calico_crd_check.rc != 0 -run_once: true + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true """) @@ -540,66 +540,66 @@ def ansible_kuber_install(input): stat: path: "/var/lib/kubelet/config.yaml" register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" - - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + + - name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" + + - name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + + - name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." """) with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: From f62327fbf0721dde82cc27a100f98af28ca9006d Mon Sep 17 00:00:00 2001 From: abolfazl1381 Date: Thu, 12 Dec 2024 20:13:20 +0330 Subject: [PATCH 25/25] fix(ansible kuber): fix yaml formats --- .../roles/init_k8s/tasks/initk8s.yml | 32 +++++++++---------- .../ansible/install/kuber.py | 32 +++++++++---------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml index 6f905635..3d616552 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -1,7 +1,7 @@ - name: Init cluster | Check if kubeadm has already run stat: path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run + register: kubeadm_already_run when: inventory_hostname == groups['k8s_masters'][0] delegate_to: "{{ groups['k8s_masters'][0] }}" @@ -41,25 +41,25 @@ when: inventory_hostname == groups['k8s_masters'][0] delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 +- name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 delegate_to: localhost - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes +- name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 +- name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 delegate_to: localhost - # when: use_iran == "true" + # when: use_iran == "true" - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." +- name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." \ No newline at end of file diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index 650b483b..9bcf8362 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -539,7 +539,7 @@ def ansible_kuber_install(input): init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run stat: path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run + register: kubeadm_already_run when: inventory_hostname == groups['k8s_masters'][0] delegate_to: "{{ groups['k8s_masters'][0] }}" @@ -579,27 +579,27 @@ def ansible_kuber_install(input): when: inventory_hostname == groups['k8s_masters'][0] delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 +- name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 delegate_to: localhost - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes +- name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes delegate_to: "{{ groups['k8s_masters'][0] }}" - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 +- name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 delegate_to: localhost - # when: use_iran == "true" + # when: use_iran == "true" - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." +- name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." """) with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: