diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml index b01389d..25edb57 100644 --- a/.github/workflows/ansible-lint.yml +++ b/.github/workflows/ansible-lint.yml @@ -8,7 +8,7 @@ jobs: name: Ansible Lint runs-on: ubuntu-latest container: - image: ghcr.io/ansible/creator-ee:v0.20.0 + image: ghcr.io/ansible/creator-ee:v0.21.0 steps: - uses: actions/checkout@v3 with: diff --git a/Taskfile.yml b/Taskfile.yml index 40bb6ee..f968a6a 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -21,12 +21,12 @@ tasks: ansible:adhoc: desc: | Run Ansible adhoc command. - Example: task ansible:adhoc -- -m ping + Example: task ansible:adhoc -- proxmox -m ping dir: "{{.USER_WORKING_DIR}}" cmds: - task: op:vault - task: venv:source - - ansible all {{.CLI_ARGS}} + - ansible {{.CLI_ARGS}} <<: *ansible-preconditions ansible:play: @@ -61,7 +61,7 @@ tasks: dir: "{{.USER_WORKING_DIR}}" cmds: - task: venv:source - - ansible-galaxy install --force -r requirements.yml + - ansible-galaxy install {{ .CLI_ARGS }} -r requirements.yml preconditions: - sh: command -v ansible-galaxy msg: ansible-galaxy is not installed. Have you ran `task venv`? diff --git a/ansible/README.md b/ansible/README.md index d3835c7..1fa4462 100644 --- a/ansible/README.md +++ b/ansible/README.md @@ -7,5 +7,7 @@ The Ansible playbooks in this repository are used to configure my homelab server - [minecraft-playbook.yml](playbooks/minecraft-playbook.yml): Deploy Minecraft server. - [proxmox-create-vm-template.yml](playbooks/proxmox-create-vm-template.yml): Create VM templates. - [proxmox-download-iso-playbook.yml](playbooks/proxmox-download-iso-playbook.yml): Download ISOs to Proxmox. +- [proxmox-storage-playbook.yml](playbooks/proxmox-storage-playbook.yml): Provision Proxmox LVM Storage. +- [proxmox-external-vote.yml](playbooks/proxmox-external-vote.yml): Cluster External Vote Support. - [pihole-playbook.yml](playbooks/pihole-playbook.yml): Deploy Pi-hole on Raspberry Pi 3. - [tailscale-playbook.yml](playbooks/tailscale-playbook.yml): Install or update Tailscale. diff --git a/ansible/files/proxmox_tailscale_cert.sh b/ansible/files/proxmox_tailscale_cert.sh new file mode 100644 index 0000000..12ef6ce --- /dev/null +++ b/ansible/files/proxmox_tailscale_cert.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Used on Proxmox node(s) to generate a certificate for the node's Tailscale FQDN +# This allows for the Proxmox Web GUI to be accessed via the Tailscale FQDN +# Requires jq and tailscale to be installed +# Checks every 60 days if the certificate needs to be renewed + +# File where the last run date is stored +LAST_RUN_FILE="${HOME}/proxmox_tailscale_cert.last_run" + +# Read the last run date from the file +if [[ -f "${LAST_RUN_FILE}" ]]; then + last_run=$(cat "${LAST_RUN_FILE}") +else + last_run=$(date -d "60 days ago" +%F) +fi + +# Calculate the next run date (60 days after the last run) +next_run=$(date -d "${last_run} + 60 days" +%F) +today=$(date +%F) + +# Run the task if today is the next run date +if [[ "${today}" == "${next_run}" ]]; then + # Snippet below taken from: https://tailscale.com/kb/1133/proxmox#enable-https-access-to-the-proxmox-web-ui + NAME="$(tailscale status --json | jq '.Self.DNSName | .[:-1]' -r)" + tailscale cert "${NAME}" + pvenode cert set "${NAME}.crt" "${NAME}.key" --force --restart + # Update the last run date + echo "${today}" > "${LAST_RUN_FILE}" +fi diff --git a/ansible/group_vars/control.yml b/ansible/group_vars/control.yml new file mode 100644 index 0000000..31acde7 --- /dev/null +++ b/ansible/group_vars/control.yml @@ -0,0 +1,22 @@ +--- +# control group_vars +k3s_control_node: true +k3s_server: + node-ip: "{{ ansible_host }}" + tls-san: + - "{{ kube_vip_address }}" + disable-cloud-controller: true + write-kubeconfig-mode: "644" + disable: + - traefik + - servicelb +k3s_server_manifests_urls: + # kube-vip rbac - https://kube-vip.io/docs/usage/k3s/#step-2-upload-kube-vip-rbac-manifest + - url: https://raw.githubusercontent.com/kube-vip/kube-vip/main/docs/manifests/rbac.yaml + filename: kube-vip-rbac.yaml + # kube-vip cloud controller - https://kube-vip.io/docs/usage/cloud-provider/ + - url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml + filename: kube-vip-cloud-controller.yaml +k3s_server_manifests_templates: + - kube-vip-daemonset.yml.j2 + - kubevip-configmap.yml.j2 diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/docker.yml similarity index 96% rename from ansible/group_vars/all.yml rename to ansible/group_vars/docker.yml index 49734cc..ba067be 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/docker.yml @@ -1,5 +1,5 @@ --- -# all group_vars +# docker group_vars # geerlingguy.pip vars # https://github.com/geerlingguy/ansible-role-pip#role-variables pip_install_packages: diff --git a/ansible/group_vars/workers.yml b/ansible/group_vars/workers.yml new file mode 100644 index 0000000..cf046c1 --- /dev/null +++ b/ansible/group_vars/workers.yml @@ -0,0 +1,6 @@ +--- +# workers group_vars +k3s_control_node: false +k3s_agent: + node-ip: "{{ ansible_host }}" +longhorn_disk: /dev/sdb diff --git a/ansible/inventory.yml b/ansible/inventory.yml index 217497d..0d914d0 100644 --- a/ansible/inventory.yml +++ b/ansible/inventory.yml @@ -8,12 +8,37 @@ all: - pihole proxmox01.net.dbren.uk: ansible_user: root + proxmox02.net.dbren.uk: + ansible_user: root minecraft.net.dbren.uk: + control01.net.dbren.uk: + worker01.net.dbren.uk: + worker02.net.dbren.uk: + worker03.net.dbren.uk: children: + docker: + hosts: + pihole.net.dbren.uk: + minecraft.net.dbren.uk: tailscale: hosts: pihole.net.dbren.uk: + # Overrides group_vars/tailscale.yml + tailscale_args: --accept-dns=false --ssh minecraft.net.dbren.uk: + proxmox01.net.dbren.uk: + proxmox02.net.dbren.uk: proxmox: hosts: proxmox01.net.dbren.uk: + proxmox02.net.dbren.uk: + k3s: + children: + control: + hosts: + control01.net.dbren.uk: + workers: + hosts: + worker01.net.dbren.uk: + worker02.net.dbren.uk: + worker03.net.dbren.uk: diff --git a/ansible/playbooks/k3s-playbook.yml b/ansible/playbooks/k3s-playbook.yml new file mode 100644 index 0000000..ce6602d --- /dev/null +++ b/ansible/playbooks/k3s-playbook.yml @@ -0,0 +1,83 @@ +--- +- name: Build K3s cluster + hosts: k3s + vars_files: + - ../vars/k3s.yml + pre_tasks: + - name: Longhorn tasks + when: not k3s_control_node + tags: + - longhorn + block: + - name: Install open-iscsi and nfs-common for Longhorn + ansible.builtin.apt: + name: + - open-iscsi + - nfs-common + state: present + become: true + + - name: Create /mnt/longhorn directory + ansible.builtin.file: + path: /mnt/longhorn + state: directory + mode: u=rwx,g=rx,o=rx + become: true + + - name: "Ensure disk partition exists on {{ longhorn_disk }}" + community.general.parted: + device: "{{ longhorn_disk }}" + number: 1 + label: gpt + part_start: 0% + part_end: 100% + part_type: primary + fs_type: ext4 + state: present + become: true + + - name: "Ensure ext4 filesystem exists on {{ longhorn_disk }}1" # noqa name[template] + community.general.filesystem: + fstype: ext4 + dev: "{{ longhorn_disk }}1" + become: true + + - name: "Get UUID for {{ longhorn_disk }}1" # noqa name[template] + ansible.builtin.command: + cmd: "blkid {{ longhorn_disk }}1 -s UUID -o value" + register: longhorn_block_device_part_uuid + changed_when: false + become: true + + - name: "Mount /mnt/longhorn on {{ longhorn_block_device_part_uuid.stdout }}" + ansible.posix.mount: + path: /mnt/longhorn + src: "UUID={{ longhorn_block_device_part_uuid.stdout }}" + fstype: ext4 + state: mounted + become: true + roles: + - role: xanmanning.k3s + post_tasks: + - name: Copy kubeconfig to local machine + when: k3s_control_node + block: + - name: Ensure ~/.kube directory exists + ansible.builtin.file: + path: ~/.kube + state: directory + mode: u=rwx,g=,o= + delegate_to: localhost + + - name: Copy kubeconfig from control node to local machine + ansible.builtin.fetch: + src: /etc/rancher/k3s/k3s.yaml + dest: ~/.kube/config + flat: true + + - name: Replace localhost with control node IP + ansible.builtin.replace: + path: ~/.kube/config + regexp: '127.0.0.1' + replace: "{{ kube_vip_address }}" + delegate_to: localhost diff --git a/ansible/playbooks/proxmox-create-vm-template.yml b/ansible/playbooks/proxmox-create-vm-template.yml index 8a5793a..ca42232 100644 --- a/ansible/playbooks/proxmox-create-vm-template.yml +++ b/ansible/playbooks/proxmox-create-vm-template.yml @@ -1,6 +1,6 @@ --- - name: Create Proxmox VM Templates - hosts: proxmox + hosts: proxmox01.net.dbren.uk become: true gather_facts: false vars: @@ -109,7 +109,7 @@ - name: Attach disk to Proxmox VM ansible.builtin.command: - cmd: "qm set {{ (proxmox_next_vm_id | int) + index }} --scsihw virtio-scsi-pci --scsi0 {{ proxmox_storage_name }}:vm-{{ (proxmox_next_vm_id | int) + index }}-disk-0,discard=on" + cmd: "qm set {{ (proxmox_next_vm_id | int) + index }} --scsihw virtio-scsi-single --scsi0 {{ proxmox_storage_name }}:vm-{{ (proxmox_next_vm_id | int) + index }}-disk-0,discard=on,iothread=1" loop: "{{ images }}" loop_control: index_var: index diff --git a/ansible/playbooks/proxmox-download-iso-playbook.yml b/ansible/playbooks/proxmox-download-iso-playbook.yml index 353188f..c49d562 100644 --- a/ansible/playbooks/proxmox-download-iso-playbook.yml +++ b/ansible/playbooks/proxmox-download-iso-playbook.yml @@ -1,6 +1,6 @@ --- - name: Download ISOs to Proxmox - hosts: proxmox + hosts: proxmox01.net.dbren.uk become: true gather_facts: false vars: diff --git a/ansible/playbooks/proxmox-external-vote.yml b/ansible/playbooks/proxmox-external-vote.yml new file mode 100644 index 0000000..1ddbe95 --- /dev/null +++ b/ansible/playbooks/proxmox-external-vote.yml @@ -0,0 +1,28 @@ +--- +# https://pve.proxmox.com/wiki/Cluster_Manager#_corosync_external_vote_support +# https://www.apalrd.net/posts/2022/cluster_qdevice/ +- name: Proxmox Nodes - Cluster External Vote Support + hosts: proxmox + become: true + tasks: + - name: Install corosync-qdevice + ansible.builtin.apt: + name: + - corosync-qdevice + state: present + +- name: Raspberry Pi - Cluster External Vote Support + hosts: pihole.net.dbren.uk + become: true + tasks: + - name: Install corosync-qnetd + ansible.builtin.apt: + name: + - corosync-qnetd + state: present + + - name: Next steps + ansible.builtin.debug: + msg: | + Run the following command on the Proxmox primary node: + pvecm qdevice setup diff --git a/ansible/playbooks/proxmox-storage-playbook.yml b/ansible/playbooks/proxmox-storage-playbook.yml new file mode 100644 index 0000000..f5aa443 --- /dev/null +++ b/ansible/playbooks/proxmox-storage-playbook.yml @@ -0,0 +1,60 @@ +--- +- name: Provision Proxmox LVM Storage + hosts: proxmox + become: true + vars: + ssds: + # Crucial SSD + - device: /dev/sdb + partition_name: pv-ssd-crucial + vg_name: vg-ssd-crucial + lv_name: lv-ssd-crucial + # Samsung SSD + - device: /dev/sdc + partition_name: pv-ssd-samsung + vg_name: vg-ssd-samsung + lv_name: lv-ssd-samsung + tasks: + - name: Create | LVM Physical Volume Partition + loop: "{{ ssds }}" + community.general.parted: + device: "{{ item.device }}" + name: "{{ item.partition_name }}" + label: gpt + number: 1 + part_start: 0% + part_end: 100% + flags: + - lvm + state: present + + - name: Create | LVM Volume Group + loop: "{{ ssds }}" + community.general.lvg: + vg: "{{ item.vg_name }}" + pvs: "{{ item.device }}1" + state: present + + - name: Create | LVM Logical Volume + loop: "{{ ssds }}" + when: item.lv_name not in ansible_lvm.lvs + community.general.lvol: + vg: "{{ item.vg_name }}" + thinpool: "{{ item.lv_name }}" + size: 100%FREE + state: present + + - name: Proxmox | Configure LVM Logical Volumes + loop: "{{ ssds }}" + loop_control: + index_var: index + ansible.builtin.blockinfile: + path: /etc/pve/storage.cfg + backup: true + marker: "\n# {mark} ANSIBLE MANAGED BLOCK {{ index }}" + block: | + lvmthin: {{ item.lv_name }} + thinpool {{ item.lv_name }} + vgname {{ item.vg_name }} + content rootdir,images + state: present diff --git a/ansible/playbooks/tailscale-playbook.yml b/ansible/playbooks/tailscale-playbook.yml index 287ed6d..464c96d 100644 --- a/ansible/playbooks/tailscale-playbook.yml +++ b/ansible/playbooks/tailscale-playbook.yml @@ -3,3 +3,30 @@ hosts: tailscale roles: - role: artis3n.tailscale + post_tasks: + # https://tailscale.com/kb/1133/proxmox + - name: Proxmox | Enable HTTPS Access for Proxmox Web UI + when: inventory_hostname in groups['proxmox'] + tags: + - proxmox + block: + - name: Proxmox | Install jq + ansible.builtin.apt: + name: jq + state: present + + - name: Proxmox | Copy Tailscale script to Proxmox node + ansible.builtin.copy: + src: ../files/proxmox_tailscale_cert.sh + dest: /usr/local/bin/proxmox_tailscale_cert.sh + owner: root + group: root + mode: "0755" + + - name: Proxmox | Create cronjob to generate HTTPS certificate for Proxmox Tailscale FQDN + ansible.builtin.cron: + name: "Generate HTTPS Certificate for Proxmox Tailscale FQDN" + minute: "0" + hour: "0" + job: /usr/local/bin/proxmox_tailscale_cert.sh + state: present diff --git a/ansible/requirements.yml b/ansible/requirements.yml index a745aba..8f16e06 100644 --- a/ansible/requirements.yml +++ b/ansible/requirements.yml @@ -2,6 +2,8 @@ collections: - name: community.docker version: 3.4.8 + - name: community.general + version: 8.1.0 roles: - name: dbrennand.caddy_docker version: 3.0.2 @@ -14,3 +16,5 @@ roles: - name: geerlingguy.security src: https://github.com/dbrennand/ansible-role-security version: refactor/ssh-regexp + - name: xanmanning.k3s + version: v3.4.3 diff --git a/ansible/templates/kube-vip-daemonset.yml.j2 b/ansible/templates/kube-vip-daemonset.yml.j2 new file mode 100644 index 0000000..cada1e4 --- /dev/null +++ b/ansible/templates/kube-vip-daemonset.yml.j2 @@ -0,0 +1,90 @@ +--- +# https://kube-vip.io/docs/installation/daemonset/#generating-a-manifest +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kube-vip-ds + app.kubernetes.io/version: v0.6.4 + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + app.kubernetes.io/name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kube-vip-ds + app.kubernetes.io/version: v0.6.4 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: svc_leasename + value: plndr-svcs-lock + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: address + value: "{{ kube_vip_address }}" + - name: prometheus_server + value: :2112 + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + hostNetwork: true + serviceAccountName: kube-vip + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 diff --git a/ansible/templates/kubevip-configmap.yml.j2 b/ansible/templates/kubevip-configmap.yml.j2 new file mode 100644 index 0000000..8a366d0 --- /dev/null +++ b/ansible/templates/kubevip-configmap.yml.j2 @@ -0,0 +1,9 @@ +--- +# https://kube-vip.io/docs/usage/cloud-provider/ +apiVersion: v1 +data: + range-global: "{{ kube_vip_cloud_controller_range_global }}" +kind: ConfigMap +metadata: + name: kubevip + namespace: kube-system diff --git a/ansible/vars/k3s.yml b/ansible/vars/k3s.yml new file mode 100644 index 0000000..ddee31b --- /dev/null +++ b/ansible/vars/k3s.yml @@ -0,0 +1,14 @@ +--- +# vars file for k3s-playbook.yml +# The VIP address for kube-vip to use for the control plane +kube_vip_address: "192.168.0.6" +# The IP address range for the kube-vip-cloud-controller to hand out for services of type `LoadBalancer` +# https://kube-vip.io/docs/usage/cloud-provider/ +kube_vip_cloud_controller_range_global: "192.168.0.21-192.168.0.49" + +# xanmanning.k3s role vars +# https://github.com/PyratLabs/ansible-role-k3s#role-variables +k3s_release_version: v1.29.0+k3s1 +k3s_install_hard_links: true +k3s_become: true +k3s_registration_address: "{{ kube_vip_address }}" diff --git a/ansible/vars/pihole.yml b/ansible/vars/pihole.yml index 5bfa515..24ef158 100644 --- a/ansible/vars/pihole.yml +++ b/ansible/vars/pihole.yml @@ -13,7 +13,7 @@ pihole_domain: net.dbren.uk pihole_local_dns: - domain: "pihole.{{ pihole_domain }}" ip: "192.168.0.2" - - domain: "proxmoxbkp01.{{ pihole_domain }}" + - domain: "proxmox02.{{ pihole_domain }}" ip: "192.168.0.3" - domain: "proxmox01.{{ pihole_domain }}" ip: "192.168.0.4" @@ -60,9 +60,17 @@ pihole_query_logging: "true" # Pi-hole admin dashboard theme pihole_web_theme: default-dark -# artis3n.tailscale role vars -# https://github.com/artis3n/ansible-role-tailscale -tailscale_args: --accept-dns=false --ssh +# geerlingguy.security vars - Overrides group_vars/docker.yml +# https://github.com/geerlingguy/ansible-role-security#role-variables +security_ssh_password_authentication: "yes" +security_ssh_permit_root_login: "yes" +security_ssh_allowed_users: + - root + - "{{ ansible_user }}" +security_sudoers_passwordless: + - "{{ ansible_user }}" + +# artis3n.tailscale role vars - group_vars/tailscale.yml is overridden in inventory.yml # dbrennand.caddy_docker role vars # https://github.com/dbrennand/ansible-role-caddy-docker diff --git a/docs/ansible/minecraft.md b/docs/ansible/minecraft.md index fbfe15c..80583b2 100644 --- a/docs/ansible/minecraft.md +++ b/docs/ansible/minecraft.md @@ -1,4 +1,4 @@ -# Ansible - Minecraft +# Minecraft The Ansible [minecraft-playbook.yml](https://github.com/dbrennand/home-ops/blob/dev/ansible/playbooks/minecraft-playbook.yml) is used to deploy a Minecraft server on Ubuntu Server 22.04 LTS in my Homelab. @@ -51,7 +51,7 @@ The variables for this playbook are located in [ansible/vars/minecraft.yml](http 3. Verify Ansible can connect to the server: ```bash - task ansible:adhoc -- -m ping + task ansible:adhoc -- minecraft.net.dbren.uk -m ping ``` 4. Run the playbook: diff --git a/docs/ansible/pihole.md b/docs/ansible/pihole.md index ad9ae7f..93766ef 100644 --- a/docs/ansible/pihole.md +++ b/docs/ansible/pihole.md @@ -1,4 +1,4 @@ -# Ansible - Pi-hole on Raspberry Pi 3 +# Pi-hole The Ansible [pihole-playbook.yml](https://github.com/dbrennand/home-ops/blob/dev/ansible/playbooks/pihole-playbook.yml) is used to deploy and configure Pi-hole on my Raspberry Pi 3 in my Homelab. @@ -77,7 +77,7 @@ The variables for this playbook are located in [ansible/vars/pihole.yml](https:/ 3. Verify Ansible can connect to the server: ```bash - task ansible:adhoc -- -m ping + task ansible:adhoc -- pihole.net.dbren.uk -m ping ``` 4. Run the playbook: diff --git a/docs/ansible/proxmox.md b/docs/ansible/proxmox.md index 894aada..460db34 100644 --- a/docs/ansible/proxmox.md +++ b/docs/ansible/proxmox.md @@ -1,9 +1,11 @@ -# Ansible - Proxmox VE +# Proxmox VE The following playbooks are used to configure Proxmox VE in my Homelab: - [proxmox-create-vm-template.yml](https://github.com/dbrennand/home-ops/blob/dev/ansible/playbooks/proxmox-create-vm-template.yml): Create VM templates. - [proxmox-download-iso-playbook.yml](https://github.com/dbrennand/home-ops/blob/dev/ansible/playbooks/proxmox-download-iso-playbook.yml): Download ISOs to Proxmox. +- [proxmox-storage-playbook.yml](https://github.com/dbrennand/home-ops/blob/dev/ansible/playbooks/proxmox-storage-playbook.yml): Provision Proxmox LVM Storage. +- [proxmox-external-vote.yml](https://github.com/dbrennand/home-ops/blob/dev/ansible/playbooks/proxmox-external-vote.yml): Proxmox Nodes - Cluster External Vote Support. See [dbrennand | home-ops - Proxmox](https://homeops.danielbrennand.com/infrastructure/Proxmox/) for further details. @@ -21,15 +23,17 @@ See [dbrennand | home-ops - Proxmox](https://homeops.danielbrennand.com/infrastr task venv ``` -3. Verify Ansible can connect to the Proxmox VE server: +3. Verify Ansible can connect to the Proxmox VE servers: ```bash - task ansible:adhoc -- -m ping + task ansible:adhoc -- proxmox -m ping ``` 4. Run the playbook(s): ```bash + task ansible:play -- playbooks/proxmox-storage-playbook.yml + task ansible:play -- playbooks/proxmox-external-vote.yml task ansible:play -- playbooks/proxmox-download-iso-playbook.yml task ansible:play -- playbooks/proxmox-create-vm-template.yml ``` diff --git a/docs/infrastructure/Proxmox/index.md b/docs/infrastructure/Proxmox/index.md index df4ee45..7118400 100644 --- a/docs/infrastructure/Proxmox/index.md +++ b/docs/infrastructure/Proxmox/index.md @@ -6,10 +6,12 @@ ## Proxmox VE Specs +### Node 1 (Primary) + [Minisforum Venus Series UN1265](https://store.minisforum.uk/collections/intel/products/un1265) | Component | Details | -| :----------------- | :-------------------------------------------------------------------------------- | +| ------------------ | --------------------------------------------------------------------------------- | | CPU | Intel® Core™ i7-12650H Processor, 10 Cores/16 Threads (24M Cache, up to 4.70 GHz) | | Memory | 64GB DDR4 3200MHz SODIMM (2x32GB) | | Storage (Internal) | Samsung NVMe 970 EVO Plus 1TB | @@ -17,8 +19,23 @@ | Storage (External) | Samsung SSD 870 QVO 1TB | | Storage (External) | 64GB USB | + +### Node 2 (Secondary) + +[Intel NUC6CAYB](https://www.intel.com/content/dam/support/us/en/documents/boardsandkits/NUC6CAYB_TechProdSpec.pdf) + +| Component | Details | +| ------------------ | ----------------------------- | +| CPU | Intel Celeron J3455 @ 1.50Ghz | +| Memory | 8GB | +| Storage (Internal) | 240GB SSD | + ## Installation +!!! note + + The following instructions are for Node 1 (Primary). Modify the *install disk*, *FQDN* and *IP Address* values for Node 2 (Secondary). + 1. Power on the server and enter the BIOS. 2. Go to `Advanced` > `System Devices Configuration` and set `VT-d` and `SR-IOV` to `Enabled`. @@ -47,46 +64,18 @@ Below are the post installation steps for configuring the Proxmox VE server. Copy SSH public key to the Proxmox VE server's `authorized_keys` file: ```bash -ssh-copy-id -i root@proxmox01.net.dbren.uk +ssh-copy-id root@proxmox01.net.dbren.uk ``` ### Storage -!!! info - - I chose not to automate this process because it only has to be done one time and takes only a few minutes to complete. - 1. Extend the Proxmox `data` logical volume to use the remaining space in the volume group: ```bash lvextend -l +100%FREE /dev/pve/data ``` -2. Login to the Proxmox web interface and navigate to `Datacenter` > `proxmox01` > `Disks`: - - - Click `/dev/sda` and select `Initialize Disk with GPT` - - Click `/dev/sdb` and select `Initialize Disk with GPT` - - Click `/dev/sdc` and select `Initialize Disk with GPT` - -3. Navigate to `Disks` > `LVM-Thin` > `Create: Thinpool`, enter the following details and click **Create**: - - | Setting | Value | - | ------- | ----------- | - | Disk | `/dev/sda` | - | Name | ssd-crucial | - - | Setting | Value | - | ------- | ----------- | - | Disk | `/dev/sdb` | - | Name | ssd-samsung | - -4. Navigate to `Disks` > `Directory` > `Create: Directory`, enter the following details and click **Create**: - - | Setting | Value | - | ---------- | ---------- | - | Disk | `/dev/sdc` | - | Filesystem | `xfs` | - | Name | ISOs | +2. Use the [proxmox-storage-playbook.yml](https://github.com/dbrennand/home-ops/blob/dev/ansible/playbooks/proxmox-storage-playbook.yml) to configure the Proxmox storage on Node 1 (Primary). ### Scripts diff --git a/terraform/kubernetes/variables.tf b/terraform/kubernetes/variables.tf index 495f9de..d4759d0 100644 --- a/terraform/kubernetes/variables.tf +++ b/terraform/kubernetes/variables.tf @@ -45,13 +45,13 @@ variable "virtual_environment_template_vm_id" { variable "virtual_environment_os_disk_datastore_id" { description = "ID of the Proxmox VE datastore used for the OS disk" type = string - default = "ssd-samsung" + default = "lv-ssd-samsung" } variable "virtual_environment_data_disk_datastore_id" { description = "ID of the Proxmox VE datastore used for the data disk" type = string - default = "ssd-crucial" + default = "lv-ssd-crucial" } variable "nodes_gateway" {