diff --git a/admin-panel b/admin-panel index 5b9c0c12..bfa06012 160000 --- a/admin-panel +++ b/admin-panel @@ -1 +1 @@ -Subproject commit 5b9c0c123018e42b185681bb955c7a8b48b6b7f8 +Subproject commit bfa06012cc943bdb1a59fde5fe235be06840005d diff --git a/app/directory_generators/ansible_generator.py b/app/directory_generators/ansible_generator.py index e3965501..06b13344 100644 --- a/app/directory_generators/ansible_generator.py +++ b/app/directory_generators/ansible_generator.py @@ -1,816 +1,87 @@ import os project_name = "app/media/MyAnsible" -ansible_dir = project_name -group_vars_dir = os.path.join(ansible_dir, "group_vars") -host_vars_dir = os.path.join(ansible_dir, "host_vars") -roles_dir = os.path.join(ansible_dir, "roles") # Create project directories -os.makedirs(group_vars_dir, exist_ok=True) -os.makedirs(host_vars_dir, exist_ok=True) -os.makedirs(roles_dir, exist_ok=True) - -preinstall_dir = os.path.join(roles_dir, "preinstall") -k8s_dir = os.path.join(roles_dir, "k8s") -init_k8s_dir = os.path.join(roles_dir, "init_k8s") -join_master_dir = os.path.join(roles_dir, "join_master") -join_worker_dir = os.path.join(roles_dir, "join_worker") - -os.makedirs(preinstall_dir, exist_ok=True) -os.makedirs(k8s_dir, exist_ok=True) -os.makedirs(init_k8s_dir, exist_ok=True) -os.makedirs(join_master_dir, exist_ok=True) -os.makedirs(join_worker_dir, exist_ok=True) +os.makedirs(os.path.join(project_name, "group_vars"), exist_ok=True) +os.makedirs(os.path.join(project_name, "host_vars"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "defaults"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "files"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "handlers"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "tasks"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "templates"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "vars"), exist_ok=True) # Create ansible.cfg -with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg_file: - ansible_cfg_file.write("[defaults]\nhost_key_checking=false\n") - -# Create group_vars/all -with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: - group_vars_file.write("""# General -install_ansible_modules: "true" -disable_transparent_huge_pages: "true" - -setup_interface: "false" - -# Network Calico see here for more details https://github.com/projectcalico/calico/releases -calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" -calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" -pod_network_cidr: "192.168.0.0/16" - -# DNS -resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - -# Sanction shekan -use_iran: "true" # change it to "false" if you are outside of iran - -# Docker -docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" -docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" -docker_apt_repo: "https://download.docker.com/linux/ubuntu" - -# Kubernetes -kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" -kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" -kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" -k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases +with open(os.path.join(project_name, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") -# CRI -cri_socket: unix:///var/run/containerd/containerd.sock - -# Ansible Connection -ansible_user: root -ansible_port: 22 -ansible_python_interpreter: "/usr/bin/python3" -domain: "devopsgpt.com" -apiserver_url: "devopsgpt.com" -""") +# Create group_vars/docker_nodes +with open(os.path.join(project_name, "group_vars", "docker_nodes"), "w") as docker_nodes: + docker_nodes.write("ansible_port: 22\n") + docker_nodes.write("ansible_user: root\n") # Create hosts -with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: - hosts_file.write("""[all] -string private_ip=x.x.x.x -string private_ip=x.x.x.x - -[k8s] -string -string - -[k8s_masters] -string - -[k8s_workers] -string -""") - -# Create kubernetes_playbook.yml -with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: - playbook_file.write("""- hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - -- hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - -- hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - -- hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - -- hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] -""") - -# Create preinstall files -preinstall_defaults_dir = os.path.join(preinstall_dir, "defaults") -preinstall_files_dir = os.path.join(preinstall_dir, "files") -preinstall_handlers_dir = os.path.join(preinstall_dir, "handlers") -preinstall_tasks_dir = os.path.join(preinstall_dir, "tasks") -preinstall_templates_dir = os.path.join(preinstall_dir, "templates") -preinstall_vars_dir = os.path.join(preinstall_dir, "vars") - -os.makedirs(preinstall_defaults_dir, exist_ok=True) -os.makedirs(preinstall_files_dir, exist_ok=True) -os.makedirs(preinstall_handlers_dir, exist_ok=True) -os.makedirs(preinstall_tasks_dir, exist_ok=True) -os.makedirs(preinstall_templates_dir, exist_ok=True) -os.makedirs(preinstall_vars_dir, exist_ok=True) - -with open(os.path.join(preinstall_defaults_dir, "main.yml"), "w") as defaults_file: - defaults_file.write("") - -with open(os.path.join(preinstall_files_dir, "sample.sh"), "w") as files_file: - files_file.write("") - -with open(os.path.join(preinstall_handlers_dir, "main.yml"), "w") as handlers_file: - handlers_file.write("") - -with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: - basic_tasks_file.write("""- name: Set timezone to UTC - timezone: - name: Etc/UTC - -- name: Set hostname - command: hostnamectl set-hostname {{ inventory_hostname }} - -- name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - -- name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - -- name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\\.0\\.0\\.1' - line: "127.0.0.1 {{ inventory_hostname }} localhost" - owner: root - group: root - mode: 0644 - -- name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - -- name: Fix broken packages - apt: - state: fixed -""") - -with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: - tasks_main_file.write("""--- -- name: basic setup - include_tasks: basic.yml -""") - -# Create k8s files -k8s_defaults_dir = os.path.join(k8s_dir, "defaults") -k8s_files_dir = os.path.join(k8s_dir, "files") -k8s_handlers_dir = os.path.join(k8s_dir, "handlers") -k8s_tasks_dir = os.path.join(k8s_dir, "tasks") -k8s_templates_dir = os.path.join(k8s_dir, "templates") -k8s_vars_dir = os.path.join(k8s_dir, "vars") - -os.makedirs(k8s_defaults_dir, exist_ok=True) -os.makedirs(k8s_files_dir, exist_ok=True) -os.makedirs(k8s_handlers_dir, exist_ok=True) -os.makedirs(k8s_tasks_dir, exist_ok=True) -os.makedirs(k8s_templates_dir, exist_ok=True) -os.makedirs(k8s_vars_dir, exist_ok=True) - -with open(os.path.join(k8s_defaults_dir, "main.yml"), "w") as k8s_defaults_file: - k8s_defaults_file.write("") - -with open(os.path.join(k8s_files_dir, "sample.sh"), "w") as k8s_files_file: - k8s_files_file.write("") - -with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: - k8s_handlers_file.write("""--- -# handlers file for k8s - -- name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - -- name: Restart kubelet - service: - name: kubelet - state: restarted -""") - -with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: - k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - -- name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' - replace: '# \\1' - -- name: Check if ufw is installed - package_facts: - manager: "auto" - -- name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "'ufw' in ansible_facts.packages" - -- name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{{ item }}" - create: yes - state: present - loop: - - overlay - - br_netfilter - -- name: Load kernel modules - command: - cmd: "modprobe {{ item }}" - loop: - - overlay - - br_netfilter - -- name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {mark} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - -- name: Reload sysctl settings - command: - cmd: sysctl --system - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - state: present - update_cache: yes - -- name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - -- name: Remove existing Docker GPG key if it exists - file: - path: '{{ docker_gpg_key_path }}' - state: absent - -- name: Download Docker GPG key - shell: | - curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} - -- name: Determine the architecture - command: dpkg --print-architecture - register: architecture - -- name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - -- name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" - state: present - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - -- name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - -- name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - -- name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - -- name: Enable containerd service - systemd: - name: containerd - enabled: yes - -- name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{{ kubernetes_gpg_keyring_path }}' - state: absent - -- name: Download Kubernetes GPG key - shell: | - curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' - -- name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" - state: present - filename: kubernetes.list - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install Kubernetes packages - apt: - name: "{{ item }}" - state: present - loop: - - kubeadm=1.31.2-1.1 - - kubelet=1.31.2-1.1 - - kubectl=1.31.2-1.1 - -- name: Hold Kubernetes packages - dpkg_selections: - name: "{{ item }}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - -- name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} - create: yes - state: present - notify: Restart kubelet - -- name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" - state: present - create: no - loop: "{{ groups['all'] }}" - when: hostvars[item].private_ip is defined - -- name: Add apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: present - -- name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull -""") - -with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: - k8s_tasks_main_file.write("""--- -- name: Install kubernetes packages - include_tasks: k8s.yml -""") - -# Create init_k8s files -init_k8s_defaults_dir = os.path.join(init_k8s_dir, "defaults") -init_k8s_files_dir = os.path.join(init_k8s_dir, "files") -init_k8s_handlers_dir = os.path.join(init_k8s_dir, "handlers") -init_k8s_tasks_dir = os.path.join(init_k8s_dir, "tasks") -init_k8s_templates_dir = os.path.join(init_k8s_dir, "templates") -init_k8s_vars_dir = os.path.join(init_k8s_dir, "vars") - -os.makedirs(init_k8s_defaults_dir, exist_ok=True) -os.makedirs(init_k8s_files_dir, exist_ok=True) -os.makedirs(init_k8s_handlers_dir, exist_ok=True) -os.makedirs(init_k8s_tasks_dir, exist_ok=True) -os.makedirs(init_k8s_templates_dir, exist_ok=True) -os.makedirs(init_k8s_vars_dir, exist_ok=True) - -with open(os.path.join(init_k8s_defaults_dir, "main.yml"), "w") as init_k8s_defaults_file: - init_k8s_defaults_file.write("") - -with open(os.path.join(init_k8s_files_dir, "sample.sh"), "w") as init_k8s_files_file: - init_k8s_files_file.write("") - -with open(os.path.join(init_k8s_handlers_dir, "main.yml"), "w") as init_k8s_handlers_file: - init_k8s_handlers_file.write("") - -with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: - init_k8s_tasks_cni_file.write("""- block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 - delegate_to: "{{ groups['k8s_masters'][0] }}" - when: calico_crd_check.rc != 0 - run_once: true -""") - -with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: - init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - -- name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - -- name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." -""") - -with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: - init_k8s_tasks_main_file.write("""--- -# tasks file for init_k8s - -- name: Initialize kubernetes cluster - include_tasks: initk8s.yml - -- name: Initialize Calico CNI - include_tasks: cni.yml -""") - -# Create join_master files -join_master_defaults_dir = os.path.join(join_master_dir, "defaults") -join_master_files_dir = os.path.join(join_master_dir, "files") -join_master_handlers_dir = os.path.join(join_master_dir, "handlers") -join_master_tasks_dir = os.path.join(join_master_dir, "tasks") -join_master_templates_dir = os.path.join(join_master_dir, "templates") -join_master_vars_dir = os.path.join(join_master_dir, "vars") - -os.makedirs(join_master_defaults_dir, exist_ok=True) -os.makedirs(join_master_files_dir, exist_ok=True) -os.makedirs(join_master_handlers_dir, exist_ok=True) -os.makedirs(join_master_tasks_dir, exist_ok=True) -os.makedirs(join_master_templates_dir, exist_ok=True) -os.makedirs(join_master_vars_dir, exist_ok=True) - -with open(os.path.join(join_master_defaults_dir, "main.yml"), "w") as join_master_defaults_file: - join_master_defaults_file.write("") - -with open(os.path.join(join_master_files_dir, "join-command"), "w") as join_master_files_file: - join_master_files_file.write("") - -with open(os.path.join(join_master_handlers_dir, "main.yml"), "w") as join_master_handlers_file: - join_master_handlers_file.write("") - -with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: - join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - run_once: false - delegate_facts: true - -- name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: absent - -- name: Add apiserver_url to point to the masters - lineinfile: - dest: /etc/hosts - line: "{{ private_ip }} {{ apiserver_url }}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] -""") - -with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: - join_master_tasks_main_file.write("""--- -# tasks file for join_master - -- name: Join master(s) node to cluster - include_tasks: join_master.yml -""") - -# Create join_worker files -join_worker_defaults_dir = os.path.join(join_worker_dir, "defaults") -join_worker_files_dir = os.path.join(join_worker_dir, "files") -join_worker_handlers_dir = os.path.join(join_worker_dir, "handlers") -join_worker_tasks_dir = os.path.join(join_worker_dir, "tasks") -join_worker_templates_dir = os.path.join(join_worker_dir, "templates") -join_worker_vars_dir = os.path.join(join_worker_dir, "vars") - -os.makedirs(join_worker_defaults_dir, exist_ok=True) -os.makedirs(join_worker_files_dir, exist_ok=True) -os.makedirs(join_worker_handlers_dir, exist_ok=True) -os.makedirs(join_worker_tasks_dir, exist_ok=True) -os.makedirs(join_worker_templates_dir, exist_ok=True) -os.makedirs(join_worker_vars_dir, exist_ok=True) - -with open(os.path.join(join_worker_defaults_dir, "main.yml"), "w") as join_worker_defaults_file: - join_worker_defaults_file.write("") - -with open(os.path.join(join_worker_files_dir, "join-command"), "w") as join_worker_files_file: - join_worker_files_file.write("") - -with open(os.path.join(join_worker_handlers_dir, "main.yml"), "w") as join_worker_handlers_file: - join_worker_handlers_file.write("") - -with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: - join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists -""") - -with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: - join_worker_tasks_main_file.write("""--- -# tasks file for join_worker - -- name: Join worker(s) node to cluster - include_tasks: join_worker.yml -""") \ No newline at end of file +with open(os.path.join(project_name, "hosts"), "w") as hosts_file: + hosts_file.write("[docker_nodes]\n") + hosts_file.write("www.example.com\n") + +# Create docker_playbook.yml +with open(os.path.join(project_name, "docker_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_docker\n") + +# Create install_docker/tasks/main.yml +with open(os.path.join(project_name, "roles", "install_docker", "tasks", "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install prerequisite packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ prerequisite_packages }}\"\n") + tasks_file.write("- name: Create directory for Docker keyrings\n") + tasks_file.write(" file:\n") + tasks_file.write(" path: /etc/apt/keyrings\n") + tasks_file.write(" state: directory\n") + tasks_file.write(" mode: '0755'\n") + tasks_file.write("- name: Download Docker's official GPG key\n") + tasks_file.write(" get_url:\n") + tasks_file.write(" url: https://download.docker.com/linux/ubuntu/gpg\n") + tasks_file.write(" dest: /etc/apt/keyrings/docker.asc\n") + tasks_file.write(" mode: '0644'\n") + tasks_file.write("- name: Add Docker repository to apt sources\n") + tasks_file.write(" copy:\n") + tasks_file.write(" content: |\n") + tasks_file.write(" deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable\n") + tasks_file.write(" dest: /etc/apt/sources.list.d/docker.list\n") + tasks_file.write("- name: Update apt cache after adding Docker repo\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n") + tasks_file.write("- name: Install Docker packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ docker_packages }}\"\n") + tasks_file.write("- name: Ensure Docker and containerd services are started and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") + tasks_file.write(" loop: \"{{ docker_services }}\"\n") + +# Create install_docker/vars/main.yml +with open(os.path.join(project_name, "roles", "install_docker", "vars", "main.yml"), "w") as vars_file: + vars_file.write("prerequisite_packages:\n") + vars_file.write(" - ca-certificates\n") + vars_file.write(" - curl\n\n") + vars_file.write("docker_services:\n") + vars_file.write(" - docker\n") + vars_file.write(" - containerd\n\n") + vars_file.write("docker_packages:\n") + vars_file.write(" - docker-ce\n") + vars_file.write(" - docker-ce-cli\n") + vars_file.write(" - containerd.io\n") + vars_file.write(" - docker-buildx-plugin\n") + vars_file.write(" - docker-compose-plugin\n") \ No newline at end of file diff --git a/app/media/MyAnsible/group_vars/all b/app/media/MyAnsible/group_vars/all index 03bf2832..eb422e93 100644 --- a/app/media/MyAnsible/group_vars/all +++ b/app/media/MyAnsible/group_vars/all @@ -24,7 +24,7 @@ docker_apt_repo: "https://download.docker.com/linux/ubuntu" kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" -k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases +k8s_version: 1.31 # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases # CRI cri_socket: unix:///var/run/containerd/containerd.sock @@ -35,3 +35,4 @@ ansible_port: 22 ansible_python_interpreter: "/usr/bin/python3" domain: "devopsgpt.com" apiserver_url: "devopsgpt.com" + \ No newline at end of file diff --git a/app/media/MyAnsible/hosts b/app/media/MyAnsible/hosts index 79eace5b..dec5110a 100644 --- a/app/media/MyAnsible/hosts +++ b/app/media/MyAnsible/hosts @@ -10,4 +10,4 @@ string string [k8s_workers] -string +string \ No newline at end of file diff --git a/app/media/MyAnsible/kubernetes_playbook.yml b/app/media/MyAnsible/kubernetes_playbook.yml index ea5f7985..d674b26f 100644 --- a/app/media/MyAnsible/kubernetes_playbook.yml +++ b/app/media/MyAnsible/kubernetes_playbook.yml @@ -1,3 +1,4 @@ + - hosts: all roles: - role: preinstall @@ -36,3 +37,5 @@ gather_facts: yes any_errors_fatal: true tags: [join_worker] + + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml index 516dbff3..c12926c6 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml @@ -18,3 +18,5 @@ delegate_to: "{{ groups['k8s_masters'][0] }}" when: calico_crd_check.rc != 0 run_once: true + + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml index a1836485..3d616552 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -62,3 +62,4 @@ - name: Example Task After Reboot debug: msg: "Server back online and ready for tasks." + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml index bb40ddec..10fa230e 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml @@ -6,3 +6,4 @@ - name: Initialize Calico CNI include_tasks: cni.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml index f82dbee0..b6855cbf 100644 --- a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml +++ b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml @@ -98,3 +98,4 @@ state: present when: - inventory_hostname in groups['k8s_masters'] + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/tasks/main.yml b/app/media/MyAnsible/roles/join_master/tasks/main.yml index 316b5b1d..a5bf581f 100644 --- a/app/media/MyAnsible/roles/join_master/tasks/main.yml +++ b/app/media/MyAnsible/roles/join_master/tasks/main.yml @@ -3,3 +3,4 @@ - name: Join master(s) node to cluster include_tasks: join_master.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml index b9b94947..899a0522 100644 --- a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml +++ b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml @@ -36,3 +36,4 @@ when: - inventory_hostname not in groups['k8s_masters'] - not kubeadm_already_run.stat.exists + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_worker/tasks/main.yml b/app/media/MyAnsible/roles/join_worker/tasks/main.yml index a43175cc..2cf615b3 100644 --- a/app/media/MyAnsible/roles/join_worker/tasks/main.yml +++ b/app/media/MyAnsible/roles/join_worker/tasks/main.yml @@ -3,3 +3,4 @@ - name: Join worker(s) node to cluster include_tasks: join_worker.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/handlers/main.yml b/app/media/MyAnsible/roles/k8s/handlers/main.yml index de036f51..989212ab 100644 --- a/app/media/MyAnsible/roles/k8s/handlers/main.yml +++ b/app/media/MyAnsible/roles/k8s/handlers/main.yml @@ -10,3 +10,4 @@ service: name: kubelet state: restarted + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml index 4620eef3..d91bbf41 100644 --- a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml +++ b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml @@ -193,3 +193,4 @@ - name: Pull Kubernetes images | If you got error check your dns and sanction command: cmd: kubeadm config images pull + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/main.yml b/app/media/MyAnsible/roles/k8s/tasks/main.yml index a0ac6054..ddd388cb 100644 --- a/app/media/MyAnsible/roles/k8s/tasks/main.yml +++ b/app/media/MyAnsible/roles/k8s/tasks/main.yml @@ -1,3 +1,4 @@ --- - name: Install kubernetes packages include_tasks: k8s.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml index 43fae8cd..46c59409 100644 --- a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml +++ b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml @@ -80,3 +80,4 @@ - name: Fix broken packages apt: state: fixed + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/tasks/main.yml b/app/media/MyAnsible/roles/preinstall/tasks/main.yml index 56a88e66..38be7807 100644 --- a/app/media/MyAnsible/roles/preinstall/tasks/main.yml +++ b/app/media/MyAnsible/roles/preinstall/tasks/main.yml @@ -1,3 +1,4 @@ --- - name: basic setup include_tasks: basic.yml + \ No newline at end of file diff --git a/app/routes/ansible.py b/app/routes/ansible.py index f1db3387..367f543c 100644 --- a/app/routes/ansible.py +++ b/app/routes/ansible.py @@ -8,43 +8,54 @@ from app.template_generators.ansible.install.main import ansible_install_template import os +import shutil @app.post("/api/ansible-install/nginx/") async def ansible_install_generation_nginx(request:AnsibleInstallNginx) -> Output: + + if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"nginx") + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + + ansible_install_template(request,"nginx") - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") + return Output(output='output') @app.post("/api/ansible-install/docker/") async def ansible_install_generation_docker(request:AnsibleInstallDocker) -> Output: + if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"docker") + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + + ansible_install_template(request,"docker") - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") return Output(output='output') @app.post("/api/ansible-install/kuber/") async def ansible_install_generation_kuber(request:AnsibleInstallKuber) -> Output: - + + if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"kuber") - - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + + ansible_install_template(request,"kuber") add_files_to_folder(files = ['app/media/kuber_configs/resolv.conf.j2'] , folder='app/media/MyAnsible/roles/preinstall/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf.yml.j2'] , folder='app/media/MyAnsible/roles/init_k8s/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf-join.yml.j2'] , folder='app/media/MyAnsible/roles/join_master/templates/') diff --git a/app/template_generators/ansible/install/docker.py b/app/template_generators/ansible/install/docker.py index 74e362df..7445b3f3 100644 --- a/app/template_generators/ansible/install/docker.py +++ b/app/template_generators/ansible/install/docker.py @@ -1,3 +1,4 @@ +import os def ansible_docker_install(input): docker_hosts = input.hosts @@ -13,131 +14,91 @@ def ansible_docker_install(input): - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: + + - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── docker_nodes - │   - ├── hosts - ├── host_vars - ├── docker_playbook.yml - └── roles - └── install_docker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "docker_nodes" and the content of this file must be as follows: - ``` - ansible_port: {docker_ansible_port} - ansible_user: {docker_ansible_user} - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {docker_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "docker_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - install_docker - ``` - - There is a directory called "roles" which a sub-directory called "install_docker" (roles/install_docker) - "install_docker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (install_docker/tasks): This path has a file called "main.yml" which its content must be as follows: - ``` - --- - - name: Install prerequisite packages - apt: - name: "{docker_items_in_task}" - state: present - loop: "{docker_prerequisite_packages_in_task}"" - - name: Create directory for Docker keyrings - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' - - name: Download Docker's official GPG key - get_url: - url: https://download.docker.com/linux/ubuntu/gpg - dest: /etc/apt/keyrings/docker.asc - mode: '0644' - - name: Add Docker repository to apt sources - copy: - content: | - deb [arch={ansible_architecture_in_task} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {ansible_distribution_release_in_task} stable - dest: /etc/apt/sources.list.d/docker.list - - name: Update apt cache after adding Docker repo - apt: - update_cache: yes - - name: Install Docker packages - apt: - name: "{docker_items_in_task}" - state: present - loop: "{docker_packages_in_task}"" - - name: Ensure Docker and containerd services are started and enabled - service: - name: "{docker_items_in_task}" - state: started - enabled: yes - loop: "{docker_services_in_task}"" - ``` - - (install_docker/vars): This path has a file called "main.yml" which its content must be as follows: - ``` - prerequisite_packages: - - ca-certificates - - curl + project_name = "app/media/MyAnsible" - docker_services: - - docker - - containerd + # Create project directories + os.makedirs(os.path.join(project_name, "group_vars"), exist_ok=True) + os.makedirs(os.path.join(project_name, "host_vars"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "defaults"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "files"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "handlers"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "tasks"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "templates"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "vars"), exist_ok=True) - docker_packages: - - docker-ce - - docker-ce-cli - - containerd.io - - docker-buildx-plugin - - docker-compose-plugin - ``` + # Create ansible.cfg + with open(os.path.join(project_name, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! + # Create group_vars/docker_nodes + with open(os.path.join(project_name, "group_vars", "docker_nodes"), "w") as docker_nodes: + docker_nodes.write(f"ansible_port: {docker_ansible_port}\n") + docker_nodes.write(f"ansible_user: {docker_ansible_user}\n") - the python code you give me, must have structure like that: + # Create hosts + with open(os.path.join(project_name, "hosts"), "w") as hosts_file: + hosts_file.write(f"{docker_inventory}\n") + - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") + # Create docker_playbook.yml + with open(os.path.join(project_name, "docker_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_docker\n") - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) + # Create install_docker/tasks/main.yml + with open(os.path.join(project_name, "roles", "install_docker", "tasks", "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install prerequisite packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ prerequisite_packages }}\"\n") + tasks_file.write("- name: Create directory for Docker keyrings\n") + tasks_file.write(" file:\n") + tasks_file.write(" path: /etc/apt/keyrings\n") + tasks_file.write(" state: directory\n") + tasks_file.write(" mode: '0755'\n") + tasks_file.write("- name: Download Docker's official GPG key\n") + tasks_file.write(" get_url:\n") + tasks_file.write(" url: https://download.docker.com/linux/ubuntu/gpg\n") + tasks_file.write(" dest: /etc/apt/keyrings/docker.asc\n") + tasks_file.write(" mode: '0644'\n") + tasks_file.write("- name: Add Docker repository to apt sources\n") + tasks_file.write(" copy:\n") + tasks_file.write(" content: |\n") + tasks_file.write(" deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable\n") + tasks_file.write(" dest: /etc/apt/sources.list.d/docker.list\n") + tasks_file.write("- name: Update apt cache after adding Docker repo\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n") + tasks_file.write("- name: Install Docker packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ docker_packages }}\"\n") + tasks_file.write("- name: Ensure Docker and containerd services are started and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") + tasks_file.write(" loop: \"{{ docker_services }}\"\n") - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt + # Create install_docker/vars/main.yml + with open(os.path.join(project_name, "roles", "install_docker", "vars", "main.yml"), "w") as vars_file: + vars_file.write("prerequisite_packages:\n") + vars_file.write(" - ca-certificates\n") + vars_file.write(" - curl\n\n") + vars_file.write("docker_services:\n") + vars_file.write(" - docker\n") + vars_file.write(" - containerd\n\n") + vars_file.write("docker_packages:\n") + vars_file.write(" - docker-ce\n") + vars_file.write(" - docker-ce-cli\n") + vars_file.write(" - containerd.io\n") + vars_file.write(" - docker-buildx-plugin\n") + vars_file.write(" - docker-compose-plugin\n") diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index 5c01e4d0..9bcf8362 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -1,4 +1,4 @@ - +import os def ansible_kuber_install(input): kubernetes_ansible_port = input.ansible_port @@ -14,826 +14,809 @@ def ansible_kuber_install(input): } kubernetes_inventory = "\n\n".join(f"{section}\n" + "\n".join(entries) for section, entries in sections.items()) - inventory_hostname = "{{ inventory_hostname }}" - item_in_task = "{{ item }}" - ufw_in_task = "'ufw'" - docker_gpg_key_path_in_task = "{{ docker_gpg_key_path }}" - docker_gpg_key_url_in_task = "{{ docker_gpg_key_url }}" - architecture_stdout_in_task = "{{ architecture.stdout }}" - docker_apt_repo_in_task = "{{ docker_apt_repo }}" - distribution_codename_stdout_in_task = "{{ distribution_codename.stdout }}" - kubernetes_gpg_keyring_path_in_task = "{{ kubernetes_gpg_keyring_path }}" - kubernetes_gpg_key_url_in_task = "{{ kubernetes_gpg_key_url }}" - kubernetes_apt_repo_in_task = "{{ kubernetes_apt_repo }}" - private_ip_in_task = "{{ private_ip }}" - hostvars_private_ip_in_task = "{{ hostvars[item].private_ip }}" - domain_in_task = "{{ domain }}" - groups_all_in_task = "{{ groups['all'] }}" - hostvars_groups_k8s_masters_private_ip_in_task = "{{ hostvars[groups['k8s_masters'][0]].private_ip }}" - apiserver_url_in_task = "{{ apiserver_url }}" - groups_k8s_masters_in_task = "{{ groups['k8s_masters'][0] }}" - calico_operator_url_in_task = "{{ calico_operator_url }}" - calico_crd_url_in_task = "{{ calico_crd_url }}" - join_command_stdout_lines_in_task = "{{ join_command.stdout_lines[0] }}" - kubeadm_cert_key_stdout_lines_in_task = "{{ kubeadm_cert_key.stdout_lines[2] }}" - hostvars_k8s_masters_control_plane_certkey_in_task = "{{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }}" - cri_socket_in_task = "{{ cri_socket }}" - - - - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: - - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── all - │   - ├── hosts - ├── host_vars - ├── kubernetes_playbook.yml - └── roles - └── preinstall - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── basic.yml - │   └── main.yml - ├── templates - │   └── resolv.conf.j2 - └── vars - | └── main.yml - k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── k8s.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - | └── main.yml - init_k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── cni.yml - │   └── initk8s.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf.yml.j2 - └── vars - | └── main.yml - join_master - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_master.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf-join.yml.j2 - └── vars - | └── main.yml - join_worker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_worker.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "all" and the content of this file must be as follows: - ``` - # General - install_ansible_modules: "true" - disable_transparent_huge_pages: "true" - - setup_interface: "false" - - # Network Calico see here for more details https://github.com/projectcalico/calico/releases - calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" - calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" - pod_network_cidr: "192.168.0.0/16" - - # DNS - resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - - # Sanction shekan - use_iran: "true" # change it to "false" if you are outside of iran - - # Docker - docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" - docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" - docker_apt_repo: "https://download.docker.com/linux/ubuntu" - - # Kubernetes - kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" - kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/Release.key" - kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/" - k8s_version: "{k8s_version}.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - - # CRI - cri_socket: unix:///var/run/containerd/containerd.sock - - # Ansible Connection - - ansible_user: {kubernetes_ansible_user} - ansible_port: {kubernetes_ansible_port} - ansible_python_interpreter: "/usr/bin/python3" - domain: "devopsgpt.com" - apiserver_url: "devopsgpt.com" - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {kubernetes_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "kubernetes_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - - - hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - - - hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - - - hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - - - hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] - ``` - - There is a directory called "roles" which a sub-directory called "preinstall" (roles/preinstall): - "preinstall" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (preinstall/tasks): This path has two files called "basic.yml" and "main.yml". - - 1. Create "preinstall/tasks/basic.yml" and it must be as follows:" - ``` - - name: Set timezone to UTC - timezone: - name: Etc/UTC - - - name: Set hostname - command: hostnamectl set-hostname {inventory_hostname} - - - name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - - - name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - - - name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\.0\.0\.1' - line: "127.0.0.1 {inventory_hostname} localhost" - owner: root - group: root - mode: 0644 - - - name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - - - name: Fix broken packages - apt: - state: fixed - ``` - - 2. Create preinstall/tasks/main.yml and it must be as follows:" - ``` - --- - - name: basic setup - include_tasks: basic.yml - ``` - - There is a directory called "roles" which a sub-directory called "k8s" (roles/k8s): - "k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (k8s/tasks): This path has two files called "k8s.yml" and "main.yml". - - 1. Create k8s/tasks/k8s.yml and it must be as follows:" - ``` - - name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - - - name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\sswap\s+sw\s+.*)$' - replace: '# \\1' - - - name: Check if ufw is installed - package_facts: - manager: "auto" - - - name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "{ufw_in_task} in ansible_facts.packages" - - - name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{item_in_task}" - create: yes - state: present - loop: - - overlay - - br_netfilter - - - name: Load kernel modules - command: - cmd: "modprobe {item_in_task}" - loop: - - overlay - - br_netfilter - - - name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {{mark}} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - - - name: Reload sysctl settings - command: - cmd: sysctl --system - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - - state: present - update_cache: yes - - - name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - - - name: Remove existing Docker GPG key if it exists - file: - path: '{docker_gpg_key_path_in_task}' - state: absent - - - name: Download Docker GPG key - shell: | - curl -fsSL {docker_gpg_key_url_in_task} | gpg --dearmor -o {docker_gpg_key_path_in_task} - - - name: Determine the architecture - command: dpkg --print-architecture - register: architecture - - - name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - - - name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={architecture_stdout_in_task} signed-by={docker_gpg_key_path_in_task}] {docker_apt_repo_in_task} {distribution_codename_stdout_in_task} stable" - state: present - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - - - name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - - - name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - - - name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - - - name: Enable containerd service - systemd: - name: containerd - enabled: yes - - name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{kubernetes_gpg_keyring_path_in_task}' - state: absent - - - name: Download Kubernetes GPG key - shell: | - curl -fsSL '{kubernetes_gpg_key_url_in_task}' | gpg --dearmor -o '{kubernetes_gpg_keyring_path_in_task}' - - - name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={kubernetes_gpg_keyring_path_in_task}] {kubernetes_apt_repo_in_task} /" - state: present - filename: kubernetes.list - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install Kubernetes packages - apt: - name: "{item_in_task}" - state: present - loop: - - kubeadm={k8s_version}.2-1.1 - - kubelet={k8s_version}.2-1.1 - - kubectl={k8s_version}.2-1.1 - - - name: Hold Kubernetes packages - dpkg_selections: - name: "{item_in_task}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={private_ip_in_task} - create: yes - state: present - notify: Restart kubelet - - - name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{hostvars_private_ip_in_task} {item_in_task} {item_in_task}.{domain_in_task}" - state: present - create: no - loop: "{groups_all_in_task}" - when: hostvars[item].private_ip is defined - - - name: Add apiserver_url to point to the masters temporary" - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: present - - - name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull - ``` - 2. Create k8s/tasks/main.yml and it must be as follows:" - ``` - --- - - name: Install kubernetes packages - include_tasks: k8s.yml - ``` - - (k8s/handlers): This path has a file called "main.yml". - - 3. Create k8s/handlers/main.yml and it must be as follows:" - ``` - --- - # handlers file for k8s - - - name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - - - name: Restart kubelet - service: - name: kubelet - state: restarted - ``` - - There is a directory called "roles" which a sub-directory called "init_k8s" (roles/init_k8s): - "init_k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (init_k8s/tasks): This path has three files called "cni.yml", "initk8s.yml" and "main.yml". - - 1. Create init_k8s/tasks/cni.yml and it must be as follows:" - ``` - - block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_operator_url_in_task} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_crd_url_in_task} - retries: 3 - delay: 3 - delegate_to: "{groups_k8s_masters_in_task}" - when: calico_crd_check.rc != 0 - run_once: true - ``` - 2. Create init_k8s/tasks/initk8s.yml and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." - ``` - 3. Create init_k8s/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for init_k8s - - - name: Initialize kubernetes cluster - include_tasks: initk8s.yml - - - name: Initialize Calico CNI - include_tasks: cni.yml - ``` - - There is a directory called "roles" which a sub-directory called "join_master" (roles/join_master): - "join_master" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_master/tasks): This path has two files called "join_master.yml" and "main.yml". - - 1. Create "join_master/tasks/join_master.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{kubeadm_cert_key_stdout_lines_in_task}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{kubeadm_cert_key_stdout_lines_in_task}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - run_once: false - delegate_facts: true - - - name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={hostvars_k8s_masters_control_plane_certkey_in_task} --cri-socket={cri_socket_in_task}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: absent - - - - name: Add apiserver_url to point to the masters" - lineinfile: - dest: /etc/hosts - line: "{private_ip_in_task} {apiserver_url_in_task}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] - ``` - 2. Create join_master/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_master - - - name: Join master(s) node to cluster - include_tasks: join_master.yml - - ``` - - There is a directory called "roles" which a sub-directory called "join_worker" (roles/join_worker): - "join_worker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_worker/tasks): This path has two files called "join_worker.yml" and "main.yml". - - 1. Create "join_worker/tasks/join_worker.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - ``` - 2. Create join_worker/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_worker - - - name: Join worker(s) node to cluster - include_tasks: join_worker.yml - ``` - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") - - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) - - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt + + project_name = "app/media/MyAnsible" + ansible_dir = project_name + group_vars_dir = os.path.join(ansible_dir, "group_vars") + host_vars_dir = os.path.join(ansible_dir, "host_vars") + roles_dir = os.path.join(ansible_dir, "roles") + + # Create project directories + os.makedirs(group_vars_dir, exist_ok=True) + os.makedirs(host_vars_dir, exist_ok=True) + os.makedirs(roles_dir, exist_ok=True) + + preinstall_dir = os.path.join(roles_dir, "preinstall") + k8s_dir = os.path.join(roles_dir, "k8s") + init_k8s_dir = os.path.join(roles_dir, "init_k8s") + join_master_dir = os.path.join(roles_dir, "join_master") + join_worker_dir = os.path.join(roles_dir, "join_worker") + + os.makedirs(preinstall_dir, exist_ok=True) + os.makedirs(k8s_dir, exist_ok=True) + os.makedirs(init_k8s_dir, exist_ok=True) + os.makedirs(join_master_dir, exist_ok=True) + os.makedirs(join_worker_dir, exist_ok=True) + + # Create ansible.cfg + with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg_file: + ansible_cfg_file.write("[defaults]\nhost_key_checking=false\n") + + # Create group_vars/all + with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: + group_vars_file.write(f"""# General +install_ansible_modules: "true" +disable_transparent_huge_pages: "true" + +setup_interface: "false" + +# Network Calico see here for more details https://github.com/projectcalico/calico/releases +calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" +calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" +pod_network_cidr: "192.168.0.0/16" + +# DNS +resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + +# Sanction shekan +use_iran: "true" # change it to "false" if you are outside of iran + +# Docker +docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" +docker_apt_repo: "https://download.docker.com/linux/ubuntu" + +# Kubernetes +kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" +kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" +kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" +k8s_version: {k8s_version} # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + +# CRI +cri_socket: unix:///var/run/containerd/containerd.sock + +# Ansible Connection +ansible_user: {kubernetes_ansible_user} +ansible_port: {kubernetes_ansible_port} +ansible_python_interpreter: "/usr/bin/python3" +domain: "devopsgpt.com" +apiserver_url: "devopsgpt.com" + """) + + # Create hosts + with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: + hosts_file.write(f"""{kubernetes_inventory}""") + + # Create kubernetes_playbook.yml + with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: + playbook_file.write(""" +- hosts: all + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] + +- hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + +- hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + +- hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + +- hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] + + """) + + # Create preinstall files + preinstall_defaults_dir = os.path.join(preinstall_dir, "defaults") + preinstall_files_dir = os.path.join(preinstall_dir, "files") + preinstall_handlers_dir = os.path.join(preinstall_dir, "handlers") + preinstall_tasks_dir = os.path.join(preinstall_dir, "tasks") + preinstall_templates_dir = os.path.join(preinstall_dir, "templates") + preinstall_vars_dir = os.path.join(preinstall_dir, "vars") + + os.makedirs(preinstall_defaults_dir, exist_ok=True) + os.makedirs(preinstall_files_dir, exist_ok=True) + os.makedirs(preinstall_handlers_dir, exist_ok=True) + os.makedirs(preinstall_tasks_dir, exist_ok=True) + os.makedirs(preinstall_templates_dir, exist_ok=True) + os.makedirs(preinstall_vars_dir, exist_ok=True) + + with open(os.path.join(preinstall_defaults_dir, "main.yml"), "w") as defaults_file: + defaults_file.write("") + + with open(os.path.join(preinstall_files_dir, "sample.sh"), "w") as files_file: + files_file.write("") + + with open(os.path.join(preinstall_handlers_dir, "main.yml"), "w") as handlers_file: + handlers_file.write("") + + with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: + basic_tasks_file.write("""- name: Set timezone to UTC + timezone: + name: Etc/UTC + +- name: Set hostname + command: hostnamectl set-hostname {{ inventory_hostname }} + +- name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" + +- name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" + +- name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\\.0\\.0\\.1' + line: "127.0.0.1 {{ inventory_hostname }} localhost" + owner: root + group: root + mode: 0644 + +- name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree + +- name: Fix broken packages + apt: + state: fixed + """) + + with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: + tasks_main_file.write("""--- +- name: basic setup + include_tasks: basic.yml + """) + + # Create k8s files + k8s_defaults_dir = os.path.join(k8s_dir, "defaults") + k8s_files_dir = os.path.join(k8s_dir, "files") + k8s_handlers_dir = os.path.join(k8s_dir, "handlers") + k8s_tasks_dir = os.path.join(k8s_dir, "tasks") + k8s_templates_dir = os.path.join(k8s_dir, "templates") + k8s_vars_dir = os.path.join(k8s_dir, "vars") + + os.makedirs(k8s_defaults_dir, exist_ok=True) + os.makedirs(k8s_files_dir, exist_ok=True) + os.makedirs(k8s_handlers_dir, exist_ok=True) + os.makedirs(k8s_tasks_dir, exist_ok=True) + os.makedirs(k8s_templates_dir, exist_ok=True) + os.makedirs(k8s_vars_dir, exist_ok=True) + + with open(os.path.join(k8s_defaults_dir, "main.yml"), "w") as k8s_defaults_file: + k8s_defaults_file.write("") + + with open(os.path.join(k8s_files_dir, "sample.sh"), "w") as k8s_files_file: + k8s_files_file.write("") + + with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: + k8s_handlers_file.write("""--- +# handlers file for k8s + +- name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent + +- name: Restart kubelet + service: + name: kubelet + state: restarted + """) + + with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: + k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled + shell: | + swapoff -a + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' + replace: '# \\1' + +- name: Check if ufw is installed + package_facts: + manager: "auto" + +- name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + +- name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + +- name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + +- name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + +- name: Reload sysctl settings + command: + cmd: sysctl --system + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + +- name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + +- name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + +- name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + +- name: Determine the architecture + command: dpkg --print-architecture + register: architecture + +- name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + +- name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + +- name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + +- name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + +- name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + +- name: Enable containerd service + systemd: + name: containerd + enabled: yes + +- name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + +- name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + +- name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + +- name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + +- name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + +- name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + +- name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + +- name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull + """) + + with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: + k8s_tasks_main_file.write("""--- +- name: Install kubernetes packages + include_tasks: k8s.yml + """) + + # Create init_k8s files + init_k8s_defaults_dir = os.path.join(init_k8s_dir, "defaults") + init_k8s_files_dir = os.path.join(init_k8s_dir, "files") + init_k8s_handlers_dir = os.path.join(init_k8s_dir, "handlers") + init_k8s_tasks_dir = os.path.join(init_k8s_dir, "tasks") + init_k8s_templates_dir = os.path.join(init_k8s_dir, "templates") + init_k8s_vars_dir = os.path.join(init_k8s_dir, "vars") + + os.makedirs(init_k8s_defaults_dir, exist_ok=True) + os.makedirs(init_k8s_files_dir, exist_ok=True) + os.makedirs(init_k8s_handlers_dir, exist_ok=True) + os.makedirs(init_k8s_tasks_dir, exist_ok=True) + os.makedirs(init_k8s_templates_dir, exist_ok=True) + os.makedirs(init_k8s_vars_dir, exist_ok=True) + + with open(os.path.join(init_k8s_defaults_dir, "main.yml"), "w") as init_k8s_defaults_file: + init_k8s_defaults_file.write("") + + with open(os.path.join(init_k8s_files_dir, "sample.sh"), "w") as init_k8s_files_file: + init_k8s_files_file.write("") + + with open(os.path.join(init_k8s_handlers_dir, "main.yml"), "w") as init_k8s_handlers_file: + init_k8s_handlers_file.write("") + + with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: + init_k8s_tasks_cni_file.write("""- block: + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true + + """) + + with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: + init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + +- name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + +- name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." + """) + + with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: + init_k8s_tasks_main_file.write("""--- +# tasks file for init_k8s + +- name: Initialize kubernetes cluster + include_tasks: initk8s.yml + +- name: Initialize Calico CNI + include_tasks: cni.yml + """) + + # Create join_master files + join_master_defaults_dir = os.path.join(join_master_dir, "defaults") + join_master_files_dir = os.path.join(join_master_dir, "files") + join_master_handlers_dir = os.path.join(join_master_dir, "handlers") + join_master_tasks_dir = os.path.join(join_master_dir, "tasks") + join_master_templates_dir = os.path.join(join_master_dir, "templates") + join_master_vars_dir = os.path.join(join_master_dir, "vars") + + os.makedirs(join_master_defaults_dir, exist_ok=True) + os.makedirs(join_master_files_dir, exist_ok=True) + os.makedirs(join_master_handlers_dir, exist_ok=True) + os.makedirs(join_master_tasks_dir, exist_ok=True) + os.makedirs(join_master_templates_dir, exist_ok=True) + os.makedirs(join_master_vars_dir, exist_ok=True) + + with open(os.path.join(join_master_defaults_dir, "main.yml"), "w") as join_master_defaults_file: + join_master_defaults_file.write("") + + with open(os.path.join(join_master_files_dir, "join-command"), "w") as join_master_files_file: + join_master_files_file.write("") + + with open(os.path.join(join_master_handlers_dir, "main.yml"), "w") as join_master_handlers_file: + join_master_handlers_file.write("") + + with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: + join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + +- name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + +- name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] + """) + + with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: + join_master_tasks_main_file.write("""--- +# tasks file for join_master + +- name: Join master(s) node to cluster + include_tasks: join_master.yml + """) + + # Create join_worker files + join_worker_defaults_dir = os.path.join(join_worker_dir, "defaults") + join_worker_files_dir = os.path.join(join_worker_dir, "files") + join_worker_handlers_dir = os.path.join(join_worker_dir, "handlers") + join_worker_tasks_dir = os.path.join(join_worker_dir, "tasks") + join_worker_templates_dir = os.path.join(join_worker_dir, "templates") + join_worker_vars_dir = os.path.join(join_worker_dir, "vars") + + os.makedirs(join_worker_defaults_dir, exist_ok=True) + os.makedirs(join_worker_files_dir, exist_ok=True) + os.makedirs(join_worker_handlers_dir, exist_ok=True) + os.makedirs(join_worker_tasks_dir, exist_ok=True) + os.makedirs(join_worker_templates_dir, exist_ok=True) + os.makedirs(join_worker_vars_dir, exist_ok=True) + + with open(os.path.join(join_worker_defaults_dir, "main.yml"), "w") as join_worker_defaults_file: + join_worker_defaults_file.write("") + + with open(os.path.join(join_worker_files_dir, "join-command"), "w") as join_worker_files_file: + join_worker_files_file.write("") + + with open(os.path.join(join_worker_handlers_dir, "main.yml"), "w") as join_worker_handlers_file: + join_worker_handlers_file.write("") + + with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: + join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + """) + + with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: + join_worker_tasks_main_file.write("""--- +# tasks file for join_worker + +- name: Join worker(s) node to cluster + include_tasks: join_worker.yml + """) \ No newline at end of file diff --git a/app/template_generators/ansible/install/nginx.py b/app/template_generators/ansible/install/nginx.py index 38175395..aed87f43 100644 --- a/app/template_generators/ansible/install/nginx.py +++ b/app/template_generators/ansible/install/nginx.py @@ -1,3 +1,4 @@ +import os def ansible_nginx_install_ubuntu(input): nginx_hosts = input.hosts @@ -10,120 +11,92 @@ def ansible_nginx_install_ubuntu(input): nginx_version_in_task = "nginx={{ nginx_version }}~{{ ansible_distribution_release }}" - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: + + + project_name = "app/media/MyAnsible" + ansible_dir = project_name + group_vars_dir = os.path.join(ansible_dir, "group_vars") + host_vars_dir = os.path.join(ansible_dir, "host_vars") + roles_dir = os.path.join(ansible_dir, "roles") + install_nginx_dir = os.path.join(roles_dir, "install_nginx") + tasks_dir = os.path.join(install_nginx_dir, "tasks") + vars_dir = os.path.join(install_nginx_dir, "vars") + defaults_dir = os.path.join(install_nginx_dir, "defaults") + files_dir = os.path.join(install_nginx_dir, "files") + handlers_dir = os.path.join(install_nginx_dir, "handlers") + templates_dir = os.path.join(install_nginx_dir, "templates") - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── nginx_nodes - │   - ├── hosts - ├── host_vars - ├── nginx_playbook.yml - └── roles - └── install_nginx - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "nginx_nodes" and the content of this file must be as follows: - ``` - ansible_port: {nginx_ansible_port} - ansible_user: {nginx_ansible_user} - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {nginx_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "nginx_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - install_nginx - ``` - - There is a directory called "roles" which a sub-directory called "install_nginx" (roles/install_nginx) - "install_nginx" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (install_nginx/tasks): This path has a file called "main.yml" which its content must be as follows: - ``` - --- - - name: Install CA certificates to ensure HTTPS connections work - apt: - name: ca-certificates - state: present + # Create project directories + os.makedirs(group_vars_dir, exist_ok=True) + os.makedirs(host_vars_dir, exist_ok=True) + os.makedirs(roles_dir, exist_ok=True) + os.makedirs(install_nginx_dir, exist_ok=True) + os.makedirs(tasks_dir, exist_ok=True) + os.makedirs(vars_dir, exist_ok=True) + os.makedirs(defaults_dir, exist_ok=True) + os.makedirs(files_dir, exist_ok=True) + os.makedirs(handlers_dir, exist_ok=True) + os.makedirs(templates_dir, exist_ok=True) - - name: Add Nginx signing key - apt_key: - url: "{nginx_repo_key_in_task}" - state: present + # Create ansible.cfg + with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") - - name: Add Nginx repository - apt_repository: - repo: "{nginx_repo_in_task}" - state: present - filename: nginx + # Create group_vars/nginx_nodes + with open(os.path.join(group_vars_dir, "nginx_nodes"), "w") as nginx_nodes: + nginx_nodes.write(f"ansible_port : {nginx_ansible_port}\n") + nginx_nodes.write(f"ansible_user : {nginx_ansible_user}\n") - - name: Update apt cache - apt: - update_cache: yes + # Create hosts + with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: + + hosts_file.write(f"{nginx_inventory}") + - - name: Install specific version of Nginx - apt: - name: "{nginx_version_in_task}" - state: present + # Create empty host_vars directory (already created) - - name: Ensure Nginx service is running and enabled - service: - name: nginx - state: started - enabled: yes - ``` - - (install_nginx/vars): This path has a file called "main.yml" which its content must be as follows: - ``` - nginx_repo_key_url: "https://nginx.org/keys/nginx_signing.key" - nginx_repo_url: "http://nginx.org/packages/mainline/ubuntu/" - nginx_version: "{nginx_version}" - ``` - - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") + # Create nginx_playbook.yml + with open(os.path.join(ansible_dir, "nginx_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_nginx\n") - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) + # Create install_nginx/tasks/main.yml + with open(os.path.join(tasks_dir, "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install CA certificates to ensure HTTPS connections work\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: ca-certificates\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx signing key\n") + tasks_file.write(" apt_key:\n") + tasks_file.write(" url: \"{ nginx_repo_key_url }\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx repository\n") + tasks_file.write(" apt_repository:\n") + tasks_file.write(" repo: \"deb {{ nginx_repo_url }} {{ ansible_distribution_release }} nginx\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" filename: nginx\n\n") + tasks_file.write("- name: Update apt cache\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n\n") + tasks_file.write("- name: Install specific version of Nginx\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"nginx={{ nginx_version }}~{{ ansible_distribution_release }}\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Ensure Nginx service is running and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: nginx\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt - + # Create install_nginx/vars/main.yml + with open(os.path.join(vars_dir, "main.yml"), "w") as vars_file: + vars_file.write("nginx_repo_key_url: \"https://nginx.org/keys/nginx_signing.key\"\n") + vars_file.write("nginx_repo_url: \"http://nginx.org/packages/mainline/ubuntu/\"\n") + vars_file.write(f"nginx_version: \"{nginx_version}\"\n") + def ansible_nginx_install(input): diff --git a/crawl/content_parser.py b/crawl/content_parser.py deleted file mode 100644 index 9e03e97c..00000000 --- a/crawl/content_parser.py +++ /dev/null @@ -1,105 +0,0 @@ -import requests -from bs4 import BeautifulSoup -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.util.retry import Retry - -class WebContentParser: - def __init__(self, url): - self.url = url - self.headers = { - 'User-Agent': ( - 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) ' - 'AppleWebKit/537.36 (KHTML, like Gecko) ' - 'Chrome/50.0.2661.102 Safari/537.36' - ) - } - self.session = self._initialize_session() - self.main_response = None - self.all_page_data = [] - - def _initialize_session(self): - """Set up the session with retry strategy.""" - retry_strategy = Retry( - total=5, - backoff_factor=8, - ) - adapter = HTTPAdapter(max_retries=retry_strategy) - adapter.max_retries.respect_retry_after_header = False - - session = requests.Session() - session.mount("https://", adapter) - session.mount("http://", adapter) - return session - - def fetch_content(self): - """Fetch the main content from the URL.""" - try: - self.main_response = self.session.get( - self.url, verify=False, timeout=30, headers=self.headers - ) - print(f'URL fetched: {self.url}') - return self.main_response - except requests.RequestException as e: - print(f"Failed to fetch the URL: {e}") - return None - - def parse_content(self): - """Parse the fetched HTML content.""" - if not self.main_response: - print("No response available to parse.") - return [] - - main_soup = BeautifulSoup(self.main_response.content, 'html.parser') - datas = main_soup.find('main', {'id': 'main'}) - if not datas: - print("No 'main' element found.") - return [] - - all_tag = datas.find_all(['h1', 'h2', 'h3', 'p', 'blockquote', 'ul']) - each_title_data = {} - - for tag in all_tag: - if tag.name in ['h1', 'h2']: - if each_title_data: - self.all_page_data.append(each_title_data) - each_title_data = {} - each_title_data['metadata'] = tag.text.strip() - - elif tag.name == 'h3': - if tag.text.strip() == 'Resources': - each_title_data[tag.text.strip()] = '' - else: - if each_title_data: - self.all_page_data.append(each_title_data) - each_title_data = {} - each_title_data['metadata'] = tag.text.strip() - - elif tag.name in ['p', 'blockquote']: - num = len(each_title_data) - key = f'content {num}' - if tag.text.strip(): - each_title_data[key] = tag.text.strip() - - elif tag.name == 'ul': - text = ' '.join( - li.text.strip() - for li in tag.find_all('li', {'class': 'mdx-lists_listItem__nkqhg'}) - ) - if 'Resources' in each_title_data: - each_title_data['Resources'] = text - else: - num = len(each_title_data) - key = f'content {num}' - if text: - each_title_data[key] = text - - if each_title_data: - self.all_page_data.append(each_title_data) - - return self.all_page_data - - def get_data(self): - """Main method to fetch and parse content.""" - self.fetch_content() - return self.parse_content() - diff --git a/crawl/crawled_data/Amazon EC2 instance types - Amazon EC2.txt b/crawl/crawled_data/Amazon EC2 instance types - Amazon EC2.txt new file mode 100644 index 00000000..861c4cda --- /dev/null +++ b/crawl/crawled_data/Amazon EC2 instance types - Amazon EC2.txt @@ -0,0 +1,79 @@ +Title: Amazon EC2 instance types - Amazon EC2 + +When you launch an EC2 instance, the instance type that you specify + determines the hardware of the host computer used for your instance. Each instance type + offers different compute, memory, and storage capabilities, and is grouped in an instance + family based on these capabilities. Select an instance type based on the requirements of the + application or software that you plan to run on your instance. +Amazon EC2 dedicates some resources of the host computer, such as CPU, memory, and instance + storage, to a particular instance. Amazon EC2 shares other resources of the host computer, such + as the network and the disk subsystem, among instances. If each instance on a host computer + tries to use as much of one of these shared resources as possible, each receives an equal + share of that resource. However, when a resource is underused, an instance can consume a + higher share of that resource while it's available. +Each instance type provides higher or lower minimum performance from a shared resource. + For example, instance types with high I/O performance have a larger allocation of shared resources. + Allocating a larger share of shared resources also reduces the variance of I/O performance. + For most applications, moderate I/O performance is more than enough. However, for + applications that require greater or more consistent I/O performance, consider + an instance type with higher I/O performance. +Current generation instances +Previous generation instances +Amazon EC2 instance type naming conventions +Amazon EC2 instance type specifications +Instances built on the AWS Nitro System +Amazon EC2 instance type quotas +For the best performance, we recommend that you use the following instance types + when you launch new instances. For more information, see Amazon EC2 Instance Types. +General purpose: M5 | M5a | M5ad | M5d | M5dn | M5n | M5zn | M6a | M6g | M6gd | M6i | M6id | M6idn | M6in | M7a | M7g | M7gd | M7i | M7i-flex | M8g | Mac1 | Mac2 | Mac2-m1ultra | Mac2-m2 | Mac2-m2pro | T2 | T3 | T3a | T4g +Compute optimized: C5 | C5a | C5ad | C5d | C5n | C6a | C6g | C6gd | C6gn | C6i | C6id | C6in | C7a | C7g | C7gd | C7gn | C7i | C7i-flex | C8g +Memory optimized: R5 | R5a | R5ad | R5b | R5d | R5dn | R5n | R6a | R6g | R6gd | R6i | R6idn | R6in | R6id | R7a | R7g | R7gd | R7i | R7iz | R8g | U-3tb1 | U-6tb1 | U-9tb1 | U-12tb1 | U-18tb1 | U-24tb1 | U7i-6tb | U7i-8tb | U7i-12tb | U7in-16tb | U7in-24tb | U7in-32tb | X1 | X1e | X2gd | X2idn | X2iedn | X2iezn | X8g | z1d +Storage optimized: D2 | D3 | D3en | H1 | I3 | I3en | I4g | I4i | I7ie | I8g | Im4gn | Is4gen +Accelerated computing: DL1 | DL2q | F1 | G4ad | G4dn | G5 | G5g | G6 | G6e | Gr6 | Inf1 | Inf2 | P2 | P3 | P3dn | P4d | P4de | P5 | P5e | P5en | Trn1 | Trn1n | Trn2 | Trn2u | VT1 +High-performance computing: Hpc6a | Hpc6id | Hpc7a | Hpc7g +Amazon Web Services offers previous generation instance types for users who have optimized their + applications around them and have yet to upgrade. We encourage you to use current generation + instance types to get the best performance, but we continue to support the following previous + generation instance types. For more information about which current + generation instance type would be a suitable upgrade, see + Previous Generation Instances. +General purpose: A1 | M1 | M2 | M3 | M4 | T1 +Compute optimized: C1 | C3 | C4 +Memory optimized: R3 | R4 +Storage optimized: I2 +Accelerated computing: G3 +Fixed performance instances provide fixed CPU resources. These instances can + deliver and sustain full CPU performance at any time, and for as long as a workload + needs it. If you need consistently high CPU performance for applications such as + video encoding, high volume websites, or HPC applications, we recommend that you use + fixed performance instances. +Burstable performance (T) instances provide a baseline level of CPU + performance with the ability to burst above the baseline. The baseline CPU is + designed to meet the needs of the majority of general purpose workloads, such as + large-scale micro-services, web servers, small and medium databases, data logging, + code repositories, virtual desktops, and development and test environments. +The baseline utilization and ability to burst are governed by CPU credits. Each + burstable performance instance continuously earns credits when it stays below the CPU + baseline, and continuously spends credits when it bursts above the baseline. For more + information, see Burstable + performance instances in the Amazon EC2 User Guide. +M7i-flex and C7i-flex instances offer a balance of compute, memory, and network + resources, and they provide the most cost-effective way to run a broad spectrum of + general purpose applications. These instances provide reliable CPU resources to + deliver a baseline CPU performance of 40 percent, which is designed to meet the + compute requirements for a majority of general purpose workloads. When more + performance is needed, these instances provide the ability to exceed the baseline + CPU performance and deliver up to 100 percent CPU performance for 95 percent of the + time over a 24-hour window. +M7i-flex and C7i-flex instances running at a high CPU utilization that is consistently + above the baseline for long periods of time might see a gradual reduction in the maximum + burst CPU throughput. For more information, see M7i-flex instances and C7i-flex instances. +For pricing information, see Amazon EC2 Pricing. + Javascript is disabled or is unavailable in your browser. +To use the Amazon Web Services Documentation, Javascript must be enabled. Please refer to your browser's Help pages for instructions. +Thanks for letting us know we're doing a good job! +If you've got a moment, please tell us what we did right so we can do more of it. + +Thanks for letting us know this page needs work. We're sorry we let you down. +If you've got a moment, please tell us how we can make the documentation better. + diff --git a/crawl/crawled_data/What is Amazon EC2? - Amazon Elastic Compute Cloud.txt b/crawl/crawled_data/What is Amazon EC2? - Amazon Elastic Compute Cloud.txt new file mode 100644 index 00000000..d0e78fd3 --- /dev/null +++ b/crawl/crawled_data/What is Amazon EC2? - Amazon Elastic Compute Cloud.txt @@ -0,0 +1,151 @@ +Title: What is Amazon EC2? - Amazon Elastic Compute Cloud + +Amazon Elastic Compute Cloud (Amazon EC2) provides on-demand, scalable computing capacity in the Amazon Web + Services (AWS) Cloud. Using Amazon EC2 reduces hardware costs so you can develop and deploy + applications faster. You can use Amazon EC2 to launch as many or as few virtual servers as you + need, configure security and networking, and manage storage. You can add capacity (scale up) + to handle compute-heavy tasks, such as monthly or yearly processes, or spikes in website + traffic. When usage decreases, you can reduce capacity (scale down) again. +An EC2 instance is a virtual server in the AWS Cloud. When you launch an EC2 instance, + the instance type that you specify determines the hardware available to your instance. + Each instance type offers a different balance of compute, memory, network, and storage + resources. For more information, see the Amazon EC2 Instance Types Guide. +Amazon EC2 provides the following high-level features: +Virtual servers. +Preconfigured templates for your instances that package the components you + need for your server (including the operating system and additional + software). +Various configurations of CPU, memory, storage, networking capacity, and + graphics hardware for your instances. +Persistent storage volumes for your data using Amazon Elastic Block Store (Amazon EBS). +Storage volumes for temporary data that is deleted when you stop, + hibernate, or terminate your instance. +Secure login information for your instances. AWS stores the public key + and you store the private key in a secure place. +A virtual firewall that allows you to specify the protocols, ports, and + source IP ranges that can reach your instances, and the destination IP + ranges to which your instances can connect. +Amazon EC2 supports the processing, storage, and transmission +of credit card data by a merchant or service provider, and has been +validated as being compliant with Payment Card Industry (PCI) Data Security Standard (DSS). +For more information about PCI DSS, including how to request a copy of the AWS PCI Compliance Package, +see PCI DSS Level 1. + +You can use other AWS services with the instances that you deploy using Amazon EC2. +Helps ensure you have the correct number of Amazon EC2 instances available to + handle the load for your application. +Automate backing up your Amazon EC2 instances and the Amazon EBS volumes attached to + them. +Monitor your instances and Amazon EBS volumes. +Automatically distribute incoming application traffic across multiple + instances. +Detect potentially unauthorized or malicious use of your EC2 instances. +Automate the creation, management, and deployment of customized, secure, and + up-to-date server images. +Size, configure, and deploy AWS resources for third-party applications + without having to manually identify and provision individual AWS + resources. +Perform operations at scale on EC2 instances with this secure end-to-end + management solution. +You can launch instances using another AWS compute service instead of using Amazon EC2. +Build websites or web applications using Amazon Lightsail, a cloud platform + that provides the resources that you need to deploy your project quickly, for + a low, predictable monthly price. To compare Amazon EC2 and Lightsail, see + Amazon Lightsail or Amazon EC2. +Deploy, manage, and scale containerized applications on a cluster of EC2 + instances. For more information, see Choosing an AWS container service. +Run your Kubernetes applications on AWS. For more information, see + Choosing an AWS container service. +You can create and manage your Amazon EC2 instances using the following interfaces: +A simple web interface to create and manage Amazon EC2 instances and resources. + If you've signed up for an AWS account, you can access the Amazon EC2 console + by signing into the AWS Management Console and selecting EC2 from + the console home page. +Enables you to interact with AWS services using commands in your command-line shell. It + is supported on Windows, Mac, and Linux. For more information about the + AWS CLI , see AWS Command Line Interface User Guide. You can find the Amazon EC2 commands in the AWS CLI Command Reference. +Amazon EC2 supports creating resources using AWS CloudFormation. You create a template, in JSON or YAML + format, that describes your AWS resources, and AWS CloudFormation provisions and + configures those resources for you. You can reuse your CloudFormation + templates to provision the same resources multiple times, whether in the + same Region and account or in multiple Regions and accounts. For more + information about supported resource types and properties for Amazon EC2, see + EC2 resource type + reference in the AWS CloudFormation User Guide. +If you prefer to build applications using language-specific APIs instead + of submitting a request over HTTP or HTTPS, AWS provides libraries, sample + code, tutorials, and other resources for software developers. These + libraries provide basic functions that automate tasks such as + cryptographically signing your requests, retrying requests, and handling + error responses, making it easier for you to get started. For more + information, see + Tools to Build + on AWS. +A set of PowerShell modules that are built on the functionality exposed by + the AWS SDK for .NET. The Tools for PowerShell enable you to script operations on your AWS + resources from the PowerShell command line. To get started, see the + AWS Tools for Windows PowerShell User Guide. You can find the cmdlets for Amazon EC2, in the AWS Tools for PowerShell Cmdlet Reference. +Amazon EC2 provides a Query API. These requests are HTTP or HTTPS requests that + use the HTTP verbs GET or POST and a Query parameter named + Action. For more information about the API actions for + Amazon EC2, see Actions in the + Amazon EC2 API Reference. +Amazon EC2 provides the following pricing options: +You can get started with Amazon EC2 for free. To explore the Free Tier options, + see AWS Free Tier. +Pay for the instances that you use by the second, with a minimum of 60 + seconds, with no long-term commitments or upfront payments. +You can reduce your Amazon EC2 costs by making a commitment to a consistent + amount of usage, in USD per hour, for a term of 1 or 3 years. +You can reduce your Amazon EC2 costs by making a commitment to a specific + instance configuration, including instance type and Region, for a term of 1 + or 3 years. +Request unused EC2 instances, which can reduce your Amazon EC2 costs + significantly. +Reduce costs by using a physical EC2 server that is fully dedicated for + your use, either On-Demand or as part of a Savings Plan. You can use your + existing server-bound software licenses and get help meeting compliance + requirements. +Reserve compute capacity for your EC2 instances in a specific Availability + Zone for any duration of time. +Removes the cost of unused minutes and seconds from your bill. +For a complete list of charges and prices for Amazon EC2 and more information about the purchase + models, see Amazon EC2 pricing. +To create estimates for your AWS use cases, use the AWS Pricing Calculator. +To estimate the cost of transforming Microsoft + workloads to a modern architecture that uses open source and + cloud-native services deployed on AWS, use the AWS + Modernization Calculator for Microsoft Workloads. +To see your bill, go to the Billing and Cost Management + Dashboard in the AWS Billing and Cost Management + console. Your bill contains links to usage reports that provide details + about your bill. To learn more about AWS account billing, see AWS Billing and Cost Management User + Guide. +If you have questions concerning AWS billing, accounts, and events, contact AWS Support. +To calculate the cost of a sample provisioned + environment, see Cloud Economics + Center. When calculating the cost of a provisioned + environment, remember to include incidental costs such as snapshot storage for EBS + volumes. +You can optimize the cost, security, and performance of your AWS environment + using AWS Trusted Advisor. +You can use AWS Cost Explorer to analyze the cost and usage of your EC2 instances. You can view + data up to the last 13 months, and forecast how much you are likely to spend for the next + 12 months. For more information, see + Analyzing your costs with + AWS Cost Explorer in the AWS Cost Management User Guide. +Amazon EC2 features +AWS re:Post +AWS Skill Builder +AWS Support +Hands-on Tutorials +Web Hosting +Windows on AWS + Javascript is disabled or is unavailable in your browser. +To use the Amazon Web Services Documentation, Javascript must be enabled. Please refer to your browser's Help pages for instructions. +Thanks for letting us know we're doing a good job! +If you've got a moment, please tell us what we did right so we can do more of it. + +Thanks for letting us know this page needs work. We're sorry we let you down. +If you've got a moment, please tell us how we can make the documentation better. + diff --git a/crawl/main.py b/crawl/main.py index f86e632e..3a4621e3 100644 --- a/crawl/main.py +++ b/crawl/main.py @@ -1,92 +1,43 @@ - -import argparse -import csv -import logging import requests from bs4 import BeautifulSoup -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.util.retry import Retry -from content_parser import WebContentParser - - -def setup_logging(): - logging.basicConfig( - level=logging.INFO, - format='%(asctime)s - %(levelname)s - %(message)s', - handlers=[logging.StreamHandler()] - ) - - -def setup_http_session(): - retry_strategy = Retry( - total=5, - backoff_factor=8, - ) - adapter = HTTPAdapter(max_retries=retry_strategy) - adapter.max_retries.respect_retry_after_header = False - session = requests.Session() - session.mount("https://", adapter) - session.mount("http://", adapter) - return session - - -def process_urls(file_path, save_result): - http = setup_http_session() - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'} - - with open(file_path, 'r') as file: - csv_reader = csv.reader(file) - for row in csv_reader: - if row: # Check if the row is not empty - main_url = row[0] - try: - main_response = http.get(main_url, verify=False, timeout=30, headers=headers) - logging.info(f'Fetched URL: {main_url}') - except requests.RequestException as e: - logging.error(f"Failed to fetch URL {main_url}: {e}") - continue - - main_soup = BeautifulSoup(main_response.content, 'html.parser') - products = main_soup.find('div', {'class': 'marketing-content_root__DE3hU'}).find_all('div', {'class': 'card-grid-block_root__yDdm_'}) - logging.info(f'Found {len(products)} products on page: {main_url}') - all_data = [] - for product in products: - # Get org title - title = product.find('h2').text - sub_content_link=[] - all_sub_title = product.find_all('li') - for res in all_sub_title: - sub_part_content = {} - sub_part_content['main_title'] = title - sub_title = res.find('span', {'class': 'card-title_text__F97Wj'}).get_text() - sub_part_content['sub_title'] = sub_title - sub_title_link = 'https://developer.hashicorp.com' + res.find('a').attrs['href'] - sub_part_content['sub_title_link'] = sub_title_link - - parser = WebContentParser(sub_title_link) - data = parser.get_data() - sub_part_content['all_data_info'] = data - - logging.info(f'Parsed content for sub-title: {sub_title}') - sub_content_link.append(sub_part_content) - all_data.append(sub_content_link) - if save_result: - # Logic to save sub_part_content goes here (e.g., writing to a file or database) - logging.info(f'Saving result for: {all_data}') - else: - print(all_data) - - -def main(): - setup_logging() - - parser = argparse.ArgumentParser(description='Process URLs from a CSV file.') - parser.add_argument('--csv_path', type=str, default='./urls.csv', help='Path to the CSV file containing URLs') - parser.add_argument('--save_result', type=bool, default=False, help='Flag to indicate if the results should be saved') - args = parser.parse_args() - - process_urls(args.csv_path, args.save_result) - - -if __name__ == '__main__': - main() +import os + +# List of URLs to crawl +urls = [ + "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts.html", + "https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-types.html#current-gen-instances" +] + +# Directory to save the files +save_dir = "crawled_data" +os.makedirs(save_dir, exist_ok=True) + +def fetch_and_save(url): + try: + response = requests.get(url) + response.raise_for_status() # Check if the request was successful + + # Parse the HTML content + soup = BeautifulSoup(response.text, 'html.parser') + + # For demonstration, we are fetching the page title and all paragraphs + title = soup.title.string if soup.title else "no_title" + paragraphs = soup.find_all('p') + + # Prepare the file name + file_name = os.path.join(save_dir, f"{title}.txt") + + # Write the content to the file + with open(file_name, 'w', encoding='utf-8') as file: + file.write(f"Title: {title}\n\n") + for para in paragraphs: + file.write(para.get_text() + "\n") + + print(f"Saved content from {url} to {file_name}") + + except requests.RequestException as e: + print(f"Failed to fetch {url}: {e}") + +# Fetch and save data from each URL +for url in urls: + fetch_and_save(url) diff --git a/crawl/readme.md b/crawl/readme.md deleted file mode 100644 index 93e44d57..00000000 --- a/crawl/readme.md +++ /dev/null @@ -1,39 +0,0 @@ -# Documentation for Web Content Scraper - -## Overview -This script is designed to scrape data from a list of URLs provided in a CSV file. It fetches the content, extracts specific product information, and logs the operations performed. Optionally, the extracted content can also be saved. The script utilizes various libraries such as `requests`, `BeautifulSoup`, and `argparse` to ensure efficient and robust operation. - -## Prerequisites -Make sure the following Python packages are installed: -- `requests` -- `beautifulsoup4` -- `urllib3` - -To install the dependencies, run the following command: -```sh -pip install requests beautifulsoup4 -``` -## How to Use -Arguments -The script accepts command-line arguments that allow customization of behavior: ---csv_path: The path to the CSV file containing URLs to scrape. The default value is ./urls.csv. ---save_result: A boolean flag indicating whether to save the scraped results. The default value is False. -## Running the Script -You can run the script by using the following command: - -```sh -Copy code -python main.py --csv_path --save_result -``` -For example: -```sh -Copy code -python main.py --csv_path ./urls.csv --save_result True -``` -## CSV File Format -The CSV file should contain a list of URLs, with each URL on a new line. Here is an example: -``` -https://example.com/page1 -https://example.com/page2 -``` - diff --git a/crawl/urls.csv b/crawl/urls.csv deleted file mode 100644 index 46e1afd1..00000000 --- a/crawl/urls.csv +++ /dev/null @@ -1 +0,0 @@ -https://developer.hashicorp.com/terraform/docs \ No newline at end of file