From 591793582fed464f83be485bcee007f8fd801a36 Mon Sep 17 00:00:00 2001 From: mg Date: Sat, 13 Feb 2021 16:18:29 +0100 Subject: [PATCH] =?UTF-8?q?=C3=9Cberarbeitung:=20zfs-Rollen?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- group_vars/proxmox.yml | 3 + host_vars/pve2-test.grote.lan.yml | 6 +- host_vars/pve2.grote.lan.yml | 7 +- host_vars/pve3-test.grote.lan.yml | 9 +- host_vars/pve3.grote.lan.yml | 9 +- host_vars/pve4-test.grote.lan.yml | 9 +- host_vars/pve4.grote.lan.yml | 8 +- inventory | 2 - playbooks/service/proxmox.yml | 15 ++- .../tasks/main.yml | 8 ++ .../templates/zfs-free.sh | 0 roles/mgrote.fileserver_smb/README.md | 4 +- roles/mgrote.zfs_arc_mem/README.md | 11 +++ roles/mgrote.zfs_arc_mem/defaults/main.yml | 2 + roles/mgrote.zfs_arc_mem/meta/main.yml | 3 + roles/mgrote.zfs_arc_mem/tasks/main.yml | 7 ++ roles/mgrote.zfs_health/README.md | 12 +++ roles/mgrote.zfs_health/defaults/main.yml | 4 + roles/mgrote.zfs_health/meta/main.yml | 4 + roles/mgrote.zfs_health/tasks/main.yml | 16 +++ .../templates/zfs-health.sh | 2 +- roles/mgrote.zfs_packages/README.md | 10 ++ roles/mgrote.zfs_packages/tasks/main.yml | 9 ++ roles/mgrote.zfs_scrub/README.md | 12 +++ roles/mgrote.zfs_scrub/defaults/main.yml | 2 + roles/mgrote.zfs_scrub/meta/main.yml | 3 + roles/mgrote.zfs_scrub/tasks/main.yml | 18 ++++ roles/mgrote.zfs_tools_cron/README.md | 36 ------- roles/mgrote.zfs_tools_cron/defaults/main.yml | 19 ---- roles/mgrote.zfs_tools_cron/handlers/main.yml | 10 -- roles/mgrote.zfs_tools_cron/meta/main.yml | 14 --- roles/mgrote.zfs_tools_cron/tasks/main.yml | 97 ------------------- .../templates/sudoers_zfs | 18 ---- roles/mgrote.zfs_trim/README.md | 12 +++ roles/mgrote.zfs_trim/defaults/main.yml | 2 + roles/mgrote.zfs_trim/meta/main.yml | 3 + roles/mgrote.zfs_trim/tasks/main.yml | 14 +++ roles/mgrote.zfs_zed/README.md | 11 +++ roles/mgrote.zfs_zed/defaults/main.yml | 2 + roles/mgrote.zfs_zed/handlers/main.yml | 2 + roles/mgrote.zfs_zed/meta/main.yml | 4 + roles/mgrote.zfs_zed/tasks/main.yml | 9 ++ .../templates/zed.rc | 0 43 files changed, 222 insertions(+), 226 deletions(-) rename roles/{mgrote.zfs_tools_cron => mgrote.apt_install_packages}/templates/zfs-free.sh (100%) create mode 100644 roles/mgrote.zfs_arc_mem/README.md create mode 100644 roles/mgrote.zfs_arc_mem/defaults/main.yml create mode 100644 roles/mgrote.zfs_arc_mem/meta/main.yml create mode 100644 roles/mgrote.zfs_arc_mem/tasks/main.yml create mode 100644 roles/mgrote.zfs_health/README.md create mode 100644 roles/mgrote.zfs_health/defaults/main.yml create mode 100644 roles/mgrote.zfs_health/meta/main.yml create mode 100644 roles/mgrote.zfs_health/tasks/main.yml rename roles/{mgrote.zfs_tools_cron => mgrote.zfs_health}/templates/zfs-health.sh (97%) create mode 100644 roles/mgrote.zfs_packages/README.md create mode 100644 roles/mgrote.zfs_packages/tasks/main.yml create mode 100644 roles/mgrote.zfs_scrub/README.md create mode 100644 roles/mgrote.zfs_scrub/defaults/main.yml create mode 100644 roles/mgrote.zfs_scrub/meta/main.yml create mode 100644 roles/mgrote.zfs_scrub/tasks/main.yml delete mode 100644 roles/mgrote.zfs_tools_cron/README.md delete mode 100644 roles/mgrote.zfs_tools_cron/defaults/main.yml delete mode 100644 roles/mgrote.zfs_tools_cron/handlers/main.yml delete mode 100644 roles/mgrote.zfs_tools_cron/meta/main.yml delete mode 100644 roles/mgrote.zfs_tools_cron/tasks/main.yml delete mode 100644 roles/mgrote.zfs_tools_cron/templates/sudoers_zfs create mode 100644 roles/mgrote.zfs_trim/README.md create mode 100644 roles/mgrote.zfs_trim/defaults/main.yml create mode 100644 roles/mgrote.zfs_trim/meta/main.yml create mode 100644 roles/mgrote.zfs_trim/tasks/main.yml create mode 100644 roles/mgrote.zfs_zed/README.md create mode 100644 roles/mgrote.zfs_zed/defaults/main.yml create mode 100644 roles/mgrote.zfs_zed/handlers/main.yml create mode 100644 roles/mgrote.zfs_zed/meta/main.yml create mode 100644 roles/mgrote.zfs_zed/tasks/main.yml rename roles/{mgrote.zfs_tools_cron => mgrote.zfs_zed}/templates/zed.rc (100%) diff --git a/group_vars/proxmox.yml b/group_vars/proxmox.yml index 956aa850..0fd006d6 100644 --- a/group_vars/proxmox.yml +++ b/group_vars/proxmox.yml @@ -17,6 +17,9 @@ tmux_conf_destination: "/root/.tmux.conf" tmux_bashrc_destination: "/root/.bashrc" tmux_standardsession_name: "default" + ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* + zfs_extra_max_usage_health: "80" + # Ansible Variablen diff --git a/host_vars/pve2-test.grote.lan.yml b/host_vars/pve2-test.grote.lan.yml index af51d08c..0c73fe30 100644 --- a/host_vars/pve2-test.grote.lan.yml +++ b/host_vars/pve2-test.grote.lan.yml @@ -39,9 +39,9 @@ ### mgrote.apcupsd apcupsd_nis_master: true apcupsd_nis_master_hostname: pve2-test.grote.lan - ### mgrote.zfs_tools_cron - zfs_arc_max: "12884901888" - zfs_pools: + ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* + zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes + zfs_extra_zfs_pools: - name: "ssd_vm_mirror" type: "ssd" cron_minute_zfs_trim: "5" diff --git a/host_vars/pve2.grote.lan.yml b/host_vars/pve2.grote.lan.yml index 6b53acb7..a35f089d 100644 --- a/host_vars/pve2.grote.lan.yml +++ b/host_vars/pve2.grote.lan.yml @@ -38,9 +38,10 @@ ### mgrote.apcupsd apcupsd_nis_master: true apcupsd_nis_master_hostname: pve2.grote.lan - ### mgrote.zfs_tools_cron - zfs_arc_max: "12884901888" - zfs_pools: + ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* + zfs_extra_arc_max_size: "17179869184" # 16GB in Bytes + zfs_extra_max_usage_health: "90" + zfs_extra_zfs_pools: - name: "ssd_vm_mirror" type: "ssd" cron_minute_zfs_trim: "5" diff --git a/host_vars/pve3-test.grote.lan.yml b/host_vars/pve3-test.grote.lan.yml index 6f574714..7cc5e581 100644 --- a/host_vars/pve3-test.grote.lan.yml +++ b/host_vars/pve3-test.grote.lan.yml @@ -1,6 +1,6 @@ --- ### mgrote.zfs_manage_datasets - # rppol wird von pve bei installation erstellt + # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird zfs_datasets: - dataset: rpool/vm/dir state: present @@ -25,9 +25,10 @@ xattr: sa dnodesize: auto atime: on - snapdir: hidden ### mgrote.zfs_tools_cron - zfs_arc_max: "12884901888" - zfs_pools: + snapdir: hidden + ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* + zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes + zfs_extra_zfs_pools: - name: "rpool" type: "ssd" cron_minute_zfs_trim: "5" diff --git a/host_vars/pve3.grote.lan.yml b/host_vars/pve3.grote.lan.yml index 6f574714..3b817516 100644 --- a/host_vars/pve3.grote.lan.yml +++ b/host_vars/pve3.grote.lan.yml @@ -1,6 +1,6 @@ --- ### mgrote.zfs_manage_datasets - # rppol wird von pve bei installation erstellt + # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird zfs_datasets: - dataset: rpool/vm/dir state: present @@ -25,9 +25,10 @@ xattr: sa dnodesize: auto atime: on - snapdir: hidden ### mgrote.zfs_tools_cron - zfs_arc_max: "12884901888" - zfs_pools: + snapdir: hidden + ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* + zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes + zfs_extra_zfs_pools: - name: "rpool" type: "ssd" cron_minute_zfs_trim: "5" diff --git a/host_vars/pve4-test.grote.lan.yml b/host_vars/pve4-test.grote.lan.yml index 6f574714..7cc5e581 100644 --- a/host_vars/pve4-test.grote.lan.yml +++ b/host_vars/pve4-test.grote.lan.yml @@ -1,6 +1,6 @@ --- ### mgrote.zfs_manage_datasets - # rppol wird von pve bei installation erstellt + # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird zfs_datasets: - dataset: rpool/vm/dir state: present @@ -25,9 +25,10 @@ xattr: sa dnodesize: auto atime: on - snapdir: hidden ### mgrote.zfs_tools_cron - zfs_arc_max: "12884901888" - zfs_pools: + snapdir: hidden + ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* + zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes + zfs_extra_zfs_pools: - name: "rpool" type: "ssd" cron_minute_zfs_trim: "5" diff --git a/host_vars/pve4.grote.lan.yml b/host_vars/pve4.grote.lan.yml index be9115aa..3b817516 100644 --- a/host_vars/pve4.grote.lan.yml +++ b/host_vars/pve4.grote.lan.yml @@ -1,6 +1,6 @@ --- ### mgrote.zfs_manage_datasets - # rppol wird von pve bei installation erstellt + # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird zfs_datasets: - dataset: rpool/vm/dir state: present @@ -26,9 +26,9 @@ dnodesize: auto atime: on snapdir: hidden - ### mgrote.zfs_tools_cron - zfs_arc_max: "12884901888" - zfs_pools: + ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* + zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes + zfs_extra_zfs_pools: - name: "rpool" type: "ssd" cron_minute_zfs_trim: "5" diff --git a/inventory b/inventory index 5a3b51b8..cf4b9112 100644 --- a/inventory +++ b/inventory @@ -57,7 +57,6 @@ all: hosts: gitlab-runner-test.grote.lan: gitlab-runner.grote.lan: - gitlab-runner2.grote.lan: production: hosts: @@ -71,7 +70,6 @@ all: pve2.grote.lan: gitlab.grote.lan: gitlab-runner.grote.lan: - gitlab-runner2.grote.lan: pve3.grote.lan: pve4.grote.lan: test: diff --git a/playbooks/service/proxmox.yml b/playbooks/service/proxmox.yml index 7b40867c..fb0f6c71 100644 --- a/playbooks/service/proxmox.yml +++ b/playbooks/service/proxmox.yml @@ -1,15 +1,20 @@ --- - hosts: proxmox roles: - - { role: mgrote.zfs_manage_datasets, tags: "datasets" } - { role: mgrote.apt_manage_sources, tags: "apt_sources" } - - { role: mgrote.apcupsd, tags: "apcupsd" } - - { role: mgrote.smart, tags: "smart" } - - { role: mgrote.zfs_tools_cron, tags: "zfs_tools" } - - { role: mgrote.postfix, tags: "postfix" } + - { role: mgrote.zfs_packages, tags: "zfs_packages" } + - { role: mgrote.zfs_arc_mem, tags: "zfs_arc_mem" } + - { role: mgrote.zfs_manage_datasets, tags: "datasets" } + - { role: mgrote.zfs_scrub, tags: "zfs_scrub" } + - { role: mgrote.zfs_trim, tags: "zfs_trim" } + - { role: mgrote.zfs_zed, tags: "zfs_zed" } + - { role: mgrote.zfs_health, tags: "zfs_health" } - { role: mgrote.zfs_sanoid, tags: "sanoid", when: "'pve2.grote.lan' in inventory_hostname" } + - { role: mgrote.apcupsd, tags: "apcupsd" } + - { role: mgrote.smart, tags: "smart" } + - { role: mgrote.postfix, tags: "postfix" } - { role: mgrote.ecc-rasdaemon, tags: "ecc", when: "'pve2.grote.lan' in inventory_hostname" } diff --git a/roles/mgrote.apt_install_packages/tasks/main.yml b/roles/mgrote.apt_install_packages/tasks/main.yml index de52cd18..f2a133e1 100644 --- a/roles/mgrote.apt_install_packages/tasks/main.yml +++ b/roles/mgrote.apt_install_packages/tasks/main.yml @@ -46,3 +46,11 @@ dest: "/usr/local/bin/systemtemps.sh" mode: a+x when: ansible_virtualization_role != 'guest' + + - name: copy zfs-free.sh + become: yes + ansible.builtin.template: + mode: 0555 + src: zfs-free.sh + dest: /usr/local/bin/zfs-free.sh + when: "'proxmox' in group_names" diff --git a/roles/mgrote.zfs_tools_cron/templates/zfs-free.sh b/roles/mgrote.apt_install_packages/templates/zfs-free.sh similarity index 100% rename from roles/mgrote.zfs_tools_cron/templates/zfs-free.sh rename to roles/mgrote.apt_install_packages/templates/zfs-free.sh diff --git a/roles/mgrote.fileserver_smb/README.md b/roles/mgrote.fileserver_smb/README.md index 6b831fa7..cdfc548a 100644 --- a/roles/mgrote.fileserver_smb/README.md +++ b/roles/mgrote.fileserver_smb/README.md @@ -35,7 +35,7 @@ Es sind keine "defaults" gesetzt! ## Alternatives Dictionary Format: ```bash - zfs_pools: + zfs_extra_zfs_pools: - name: "ssd_vm_mirror" type: "ssd" cron_minute_zfs_trim: "5" @@ -48,6 +48,6 @@ Es sind keine "defaults" gesetzt! ``` ist das gleiche wie: ```bash - zfs_pools: + zfs_extra_zfs_pools: - { name: "ssd_vm_mirror", type: "ssd", cron_minute_zfs_trim: "5", cron_hour_zfs_trim: "22", cron_month_zfs_trim: "4,8,12", cron_day_zfs_trim: "2", cron_weekday_zfs_scrub: "6", cron_minutes_zfs_scrub: "0", cron_hour_zfs_scrub: "23"} ``` diff --git a/roles/mgrote.zfs_arc_mem/README.md b/roles/mgrote.zfs_arc_mem/README.md new file mode 100644 index 00000000..63e623b7 --- /dev/null +++ b/roles/mgrote.zfs_arc_mem/README.md @@ -0,0 +1,11 @@ +## mgrote.zfs_arc_mem + +### Beschreibung +Setzt die maximale ZFS ARC-Groesse. + +### Funktioniert auf +- [x] ProxMox 6.1 + +### Variablen + Defaults +- see [defaults](./defaults/main.yml) +- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* diff --git a/roles/mgrote.zfs_arc_mem/defaults/main.yml b/roles/mgrote.zfs_arc_mem/defaults/main.yml new file mode 100644 index 00000000..a6e42f5d --- /dev/null +++ b/roles/mgrote.zfs_arc_mem/defaults/main.yml @@ -0,0 +1,2 @@ +--- +zfs_extra_arc_max_size: "8589934592" #in Bytes diff --git a/roles/mgrote.zfs_arc_mem/meta/main.yml b/roles/mgrote.zfs_arc_mem/meta/main.yml new file mode 100644 index 00000000..35df54ac --- /dev/null +++ b/roles/mgrote.zfs_arc_mem/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: mgrote.zfs_packages diff --git a/roles/mgrote.zfs_arc_mem/tasks/main.yml b/roles/mgrote.zfs_arc_mem/tasks/main.yml new file mode 100644 index 00000000..bc9bde41 --- /dev/null +++ b/roles/mgrote.zfs_arc_mem/tasks/main.yml @@ -0,0 +1,7 @@ +--- + - name: Limit ZFS Memory Usage + become: yes + ansible.builtin.lineinfile: + path: /etc/modprobe.d/zfs.conf + line: options zfs zfs_arc_max={{ zfs_extra_arc_max_size }} + create: yes diff --git a/roles/mgrote.zfs_health/README.md b/roles/mgrote.zfs_health/README.md new file mode 100644 index 00000000..418c0747 --- /dev/null +++ b/roles/mgrote.zfs_health/README.md @@ -0,0 +1,12 @@ +## mgrote.zfs_health + +### Beschreibung +Richtet "zfs_health.sh", ein ZFS-Checkscript das auch Mails versendet bei Fehlern. + + +### Funktioniert auf +- [x] ProxMox 6.1 + +### Variablen + Defaults +- see [defaults](./defaults/main.yml) +- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* diff --git a/roles/mgrote.zfs_health/defaults/main.yml b/roles/mgrote.zfs_health/defaults/main.yml new file mode 100644 index 00000000..a153e8c3 --- /dev/null +++ b/roles/mgrote.zfs_health/defaults/main.yml @@ -0,0 +1,4 @@ +--- +zfs_extra_cron_minutes_zfs_health: "0,15,30,45" +zfs_extra_cron_hours_zfs_health: "*" +zfs_extra_max_usage_health: "80" diff --git a/roles/mgrote.zfs_health/meta/main.yml b/roles/mgrote.zfs_health/meta/main.yml new file mode 100644 index 00000000..873271d1 --- /dev/null +++ b/roles/mgrote.zfs_health/meta/main.yml @@ -0,0 +1,4 @@ +--- + dependencies: + - role: mgrote.postfix + - role: mgrote.zfs_packages diff --git a/roles/mgrote.zfs_health/tasks/main.yml b/roles/mgrote.zfs_health/tasks/main.yml new file mode 100644 index 00000000..54f41053 --- /dev/null +++ b/roles/mgrote.zfs_health/tasks/main.yml @@ -0,0 +1,16 @@ +--- + - name: copy "zfs-health.sh" + become: yes + ansible.builtin.template: + src: zfs-health.sh + dest: /usr/local/bin/zfs-health.sh + mode: "+x" + + - name: add cronjob "zfs-health.sh" + become: yes + ansible.builtin.cron: + name: zfs-health + state: present + job: "/usr/local/bin/zfs-health.sh" + minute: "{{ zfs_extra_cron_minutes_zfs_health }}" + hour: "{{ zfs_extra_cron_hours_zfs_health }}" diff --git a/roles/mgrote.zfs_tools_cron/templates/zfs-health.sh b/roles/mgrote.zfs_health/templates/zfs-health.sh similarity index 97% rename from roles/mgrote.zfs_tools_cron/templates/zfs-health.sh rename to roles/mgrote.zfs_health/templates/zfs-health.sh index 442139a8..40ccff0a 100644 --- a/roles/mgrote.zfs_tools_cron/templates/zfs-health.sh +++ b/roles/mgrote.zfs_health/templates/zfs-health.sh @@ -19,7 +19,7 @@ fi # SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can # probably set the warning closer to 95%. -maxCapacity={{ zfs_tool_max_cap }} +maxCapacity={{ zfs_extra_max_usage_health }} if [ ${problems} -eq 0 ]; then capacity=$(/sbin/zpool list -H -o capacity) diff --git a/roles/mgrote.zfs_packages/README.md b/roles/mgrote.zfs_packages/README.md new file mode 100644 index 00000000..8cc742eb --- /dev/null +++ b/roles/mgrote.zfs_packages/README.md @@ -0,0 +1,10 @@ +## mgrote.zfs_packages + +### Beschreibung +Installiert zfs-packages. + +### Funktioniert auf +- [x] ProxMox 6.1 + +### Variablen + Defaults +- keine diff --git a/roles/mgrote.zfs_packages/tasks/main.yml b/roles/mgrote.zfs_packages/tasks/main.yml new file mode 100644 index 00000000..cd1f585b --- /dev/null +++ b/roles/mgrote.zfs_packages/tasks/main.yml @@ -0,0 +1,9 @@ +--- + - name: install zfs-tools-packages + become: yes + ansible.builtin.package: + name: + - zfsutils + - zfs-initramfs + - zfs-zed + state: present diff --git a/roles/mgrote.zfs_scrub/README.md b/roles/mgrote.zfs_scrub/README.md new file mode 100644 index 00000000..c29189b8 --- /dev/null +++ b/roles/mgrote.zfs_scrub/README.md @@ -0,0 +1,12 @@ +## mgrote.zfs_scrub + +### Beschreibung +Richtet regelmaessige Scrubs(jeden Sonntag) ein. + + +### Funktioniert auf +- [x] ProxMox 6.1 + +### Variablen + Defaults +- see [defaults](./defaults/main.yml) +- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* diff --git a/roles/mgrote.zfs_scrub/defaults/main.yml b/roles/mgrote.zfs_scrub/defaults/main.yml new file mode 100644 index 00000000..31569546 --- /dev/null +++ b/roles/mgrote.zfs_scrub/defaults/main.yml @@ -0,0 +1,2 @@ +--- +zfs_extra_path_zpool: "/usr/sbin/zpool" diff --git a/roles/mgrote.zfs_scrub/meta/main.yml b/roles/mgrote.zfs_scrub/meta/main.yml new file mode 100644 index 00000000..35df54ac --- /dev/null +++ b/roles/mgrote.zfs_scrub/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: mgrote.zfs_packages diff --git a/roles/mgrote.zfs_scrub/tasks/main.yml b/roles/mgrote.zfs_scrub/tasks/main.yml new file mode 100644 index 00000000..f60ded4c --- /dev/null +++ b/roles/mgrote.zfs_scrub/tasks/main.yml @@ -0,0 +1,18 @@ +--- + - name: remove system scrub job #https://forum.proxmox.com/threads/script-sequential-zfs-scrub-for-cron.25124/ + become: true + ansible.builtin.lineinfile: + path: /etc/cron.d/zfsutils-linux + state: absent + line: '24 0 8-14 * * root [ $(date +\%w) -eq 0 ] && [ -x /usr/lib/zfs-linux/scrub ] && /usr/lib/zfs-linux/scrub' + + - name: add cronjob "zfs-scrub" + become: yes + ansible.builtin.cron: + name: zfs-scrub - "{{ item.name }}" + state: present + job: "{{ zfs_extra_path_zpool }} scrub {{ item.name }}" + weekday: "{{ item.cron_weekday_zfs_scrub }}" + minute: "{{ item.cron_minutes_zfs_scrub }}" + hour: "{{ item.cron_hour_zfs_scrub }}" + with_items: "{{ zfs_extra_zfs_pools }}" diff --git a/roles/mgrote.zfs_tools_cron/README.md b/roles/mgrote.zfs_tools_cron/README.md deleted file mode 100644 index f2cbb2d3..00000000 --- a/roles/mgrote.zfs_tools_cron/README.md +++ /dev/null @@ -1,36 +0,0 @@ -## mgrote.zfs_tools_cron - -### Beschreibung -Aktiviert die Mail Funktion von ZED (ZFS Event Daemon). -Setzt die maximale ARC-Groesse. -Benoetigt "mgrote.postfix". -Richtet regelmaessige Scrubs(jeden Sonntag) und Trim(alle 4 Monate) ein. -Richtet "zfs_health.sh", ein ZFS-Checkscript das auch Mails versendet bei Fehlern. -Deaktiviert das mitinstallierte scrub-Script in /etc/cron.d/zfsutils-linux. - -### Funktioniert auf -- [x] ProxMox 6.1 - -### Variablen + Defaults -##### Wer soll die Mails bekommen -empfaenger_mail: michael.grote@posteo.de -zfs_pools: zfs_vm_mirror -##### Maximale Groesse ARC in Bytes -Beim aendern wird die Zeile einmal mit dem alten Wert und dem neuen Wert in die Zeile eingefuegt! -zfs_arc_max: "8589934592" -Die aenderung der maximalen ARC-Size wird erst nach einem Neustart uebernommen. -##### Ausfuehrung des ZFS_health-Scripts in Minuten -cron_minutes_zfs_health: "15" -cron_hours_zfs_health: "6,18" -##### Poolauflistung + Parameter -- u.A. fuer Cron fuer Trim und Scrub -Alle 4 Monate am 2. des Monats um 23:12 -``- { name: "zfs_single_hdd", type: "ssd", cron_minute_zfs_trim: "12", cron_hour_zfs_trim: "23", cron_month_zfs_trim: "4,8,12", cron_day_zfs_trim: "3", cron_weekday_zfs_scrub: "0", cron_minutes_zfs_scrub: "0", cron_hour_zfs_scrub: "5"}`` -Erst Poolname -Type: ssd/hdd; wenn der Wert SSD ist für ein Cronjobh angelegt der zfs-trim ausführt -Trim: Minute, Stunde, Monat, Tag des Monats -Scrub: Wochentag, Minute, Stunde -##### Pfad zu zpool-binary -pfad_zu_zpool: "/usr/sbin/zpool" -##### Füllstand für Warnmail -zfs_tool_max_cap: "80" diff --git a/roles/mgrote.zfs_tools_cron/defaults/main.yml b/roles/mgrote.zfs_tools_cron/defaults/main.yml deleted file mode 100644 index d5d4775e..00000000 --- a/roles/mgrote.zfs_tools_cron/defaults/main.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -zfs_arc_max: "8589934592" - -cron_minutes_zfs_health: "0,15,30,45" -cron_hours_zfs_health: "*" - -cron_weekday_zfs_scrub: "0" -cron_minutes_zfs_scrub: "5" -cron_hour_zfs_scrub: "4" - -cron_weekday_zfs_trim: "*" -cron_minute_zfs_trim: "12" -cron_hour_zfs_trim: "23" -cron_month_zfs_trim: "4,8,12" -cron_day_zfs_trim: "2" - -pfad_zu_zpool: "/usr/sbin/zpool" - -zfs_tool_max_cap: "80" diff --git a/roles/mgrote.zfs_tools_cron/handlers/main.yml b/roles/mgrote.zfs_tools_cron/handlers/main.yml deleted file mode 100644 index 6d876e02..00000000 --- a/roles/mgrote.zfs_tools_cron/handlers/main.yml +++ /dev/null @@ -1,10 +0,0 @@ - - - name: telegraf_starten_aktivieren - become: yes - systemd: - name: telegraf - enabled: yes - state: restarted - - - name: testmail - shell: echo "zed ist eingerichtet" | mail -s "{{ ansible_hostname }} - zed" {{ empfaenger_mail }} diff --git a/roles/mgrote.zfs_tools_cron/meta/main.yml b/roles/mgrote.zfs_tools_cron/meta/main.yml deleted file mode 100644 index c56a97fa..00000000 --- a/roles/mgrote.zfs_tools_cron/meta/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -dependencies: - - role: mgrote.postfix -galaxy_info: - author: mgrote - description: installs zfs-tools - min_ansible_version: 2.0 - license: GPLv3 - platforms: - - name: Ubuntu - versions: - - all - galaxy_tags: - - system diff --git a/roles/mgrote.zfs_tools_cron/tasks/main.yml b/roles/mgrote.zfs_tools_cron/tasks/main.yml deleted file mode 100644 index da5716da..00000000 --- a/roles/mgrote.zfs_tools_cron/tasks/main.yml +++ /dev/null @@ -1,97 +0,0 @@ - - name: zfs-tools installieren - become: yes - ansible.builtin.package: - name: - - zfsutils - - zfs-initramfs - - zfs-zed - state: present - - - name: Limit ZFS Memory Usage - become: yes - ansible.builtin.lineinfile: - path: /etc/modprobe.d/zfs.conf - line: options zfs zfs_arc_max={{ zfs_arc_max }} - create: yes - - - name: remove system scrub job #https://forum.proxmox.com/threads/script-sequential-zfs-scrub-for-cron.25124/ - become: true - ansible.builtin.lineinfile: - path: /etc/cron.d/zfsutils-linux - state: absent - line: '24 0 8-14 * * root [ $(date +\%w) -eq 0 ] && [ -x /usr/lib/zfs-linux/scrub ] && /usr/lib/zfs-linux/scrub' - - - name: erstelle Ordner "root-zfs" - become: yes - ansible.builtin.file: - path: /root/zfs - state: directory - - - name: kopiere "zfs-health.sh" - become: yes - ansible.builtin.template: - src: zfs-health.sh - dest: /usr/local/bin/zfs-health.sh - mode: "+x" - - - name: lege cronjob fuer "zfs-health.sh" an - become: yes - ansible.builtin.cron: - name: zfs-health - state: present - job: "/usr/local/bin/zfs-health.sh" - minute: "{{ cron_minutes_zfs_health }}" - hour: "{{ cron_hours_zfs_health }}" - - - name: lege cronjob fuer "zfs-scrub" an - become: yes - ansible.builtin.cron: - name: zfs-scrub - "{{ item.name }}" - state: present - job: "{{ pfad_zu_zpool }} scrub {{ item.name }}" - weekday: "{{ item.cron_weekday_zfs_scrub }}" - minute: "{{ item.cron_minutes_zfs_scrub }}" - hour: "{{ item.cron_hour_zfs_scrub }}" - with_items: "{{ zfs_pools }}" - - - name: lege cronjob fuer "zfs-trim" an - become: yes - ansible.builtin.cron: - name: zfs-trim - "{{ item.name }}" - state: present - job: "{{ pfad_zu_zpool }} trim {{ item.name }}" - minute: "{{ item.cron_minute_zfs_trim }}" - hour: "{{ item.cron_hour_zfs_trim }}" - month: "{{ item.cron_month_zfs_trim }}" - day: "{{ item.cron_day_zfs_trim }}" -# disabled: yes - when: item.type == 'ssd' - with_items: "{{ zfs_pools }}" - - - name: kopiere zed.rc - become: yes - ansible.builtin.template: - owner: root - mode: 0600 - src: zed.rc - dest: /etc/zfs/zed.d/zed.rc - notify: testmail - - - name: copy zfs-free.sh - become: yes - ansible.builtin.template: - mode: 0555 - src: zfs-free.sh - dest: /usr/local/bin/zfs-free.sh - - - name: Erlaube "non-root" Usern Read-Only ZFS Commands - become: yes - ansible.builtin.template: - src: sudoers_zfs - dest: /etc/sudoers.d/zfs - owner: root - group: root - force: yes - backup: yes - mode: 0440 - validate: /usr/sbin/visudo -cf %s diff --git a/roles/mgrote.zfs_tools_cron/templates/sudoers_zfs b/roles/mgrote.zfs_tools_cron/templates/sudoers_zfs deleted file mode 100644 index 0acd6387..00000000 --- a/roles/mgrote.zfs_tools_cron/templates/sudoers_zfs +++ /dev/null @@ -1,18 +0,0 @@ -# Allow read-only ZoL commands to be called through sudo -# without a password. Remove the first '#' column to enable. -# -# CAUTION: Any syntax error introduced here will break sudo. -# -# Cmnd alias specification -Cmnd_Alias C_ZFS = \ - /sbin/zfs "", /sbin/zfs help *, \ - /sbin/zfs get, /sbin/zfs get *, \ - /sbin/zfs list, /sbin/zfs list *, \ - /sbin/zpool "", /sbin/zpool help *, \ - /sbin/zpool iostat, /sbin/zpool iostat *, \ - /sbin/zpool list, /sbin/zpool list *, \ - /sbin/zpool status, /sbin/zpool status *, \ - /sbin/zpool upgrade, /sbin/zpool upgrade -v -# -# allow any user to use basic read-only ZFS commands -ALL ALL = (root) NOPASSWD: C_ZFS diff --git a/roles/mgrote.zfs_trim/README.md b/roles/mgrote.zfs_trim/README.md new file mode 100644 index 00000000..281813d5 --- /dev/null +++ b/roles/mgrote.zfs_trim/README.md @@ -0,0 +1,12 @@ +## mgrote.zfs_trim + +### Beschreibung +Richtet regelmaessigen Trim(alle 4 Monate) ein. + + +### Funktioniert auf +- [x] ProxMox 6.1 + +### Variablen + Defaults +- see [defaults](./defaults/main.yml) +- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* diff --git a/roles/mgrote.zfs_trim/defaults/main.yml b/roles/mgrote.zfs_trim/defaults/main.yml new file mode 100644 index 00000000..31569546 --- /dev/null +++ b/roles/mgrote.zfs_trim/defaults/main.yml @@ -0,0 +1,2 @@ +--- +zfs_extra_path_zpool: "/usr/sbin/zpool" diff --git a/roles/mgrote.zfs_trim/meta/main.yml b/roles/mgrote.zfs_trim/meta/main.yml new file mode 100644 index 00000000..35df54ac --- /dev/null +++ b/roles/mgrote.zfs_trim/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: mgrote.zfs_packages diff --git a/roles/mgrote.zfs_trim/tasks/main.yml b/roles/mgrote.zfs_trim/tasks/main.yml new file mode 100644 index 00000000..c6ed622a --- /dev/null +++ b/roles/mgrote.zfs_trim/tasks/main.yml @@ -0,0 +1,14 @@ +--- + - name: add cronjob "zfs-trim" + become: yes + ansible.builtin.cron: + name: zfs-trim - "{{ item.name }}" + state: present + job: "{{ zfs_extra_path_zpool }} trim {{ item.name }}" + minute: "{{ item.cron_minute_zfs_trim }}" + hour: "{{ item.cron_hour_zfs_trim }}" + month: "{{ item.cron_month_zfs_trim }}" + day: "{{ item.cron_day_zfs_trim }}" +# disabled: yes + when: item.type == 'ssd' + with_items: "{{ zfs_extra_zfs_pools }}" diff --git a/roles/mgrote.zfs_zed/README.md b/roles/mgrote.zfs_zed/README.md new file mode 100644 index 00000000..90ca357a --- /dev/null +++ b/roles/mgrote.zfs_zed/README.md @@ -0,0 +1,11 @@ +## mgrote.zfs_zed + +### Beschreibung +Aktiviert die Mail Funktion von ZED (ZFS Event Daemon). + +### Funktioniert auf +- [x] ProxMox 6.1 + +### Variablen + Defaults +- see [defaults](./defaults/main.yml) +- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* diff --git a/roles/mgrote.zfs_zed/defaults/main.yml b/roles/mgrote.zfs_zed/defaults/main.yml new file mode 100644 index 00000000..1c5b8377 --- /dev/null +++ b/roles/mgrote.zfs_zed/defaults/main.yml @@ -0,0 +1,2 @@ +--- + empfaenger_mail: michael.grote@posteo.de diff --git a/roles/mgrote.zfs_zed/handlers/main.yml b/roles/mgrote.zfs_zed/handlers/main.yml new file mode 100644 index 00000000..778800fb --- /dev/null +++ b/roles/mgrote.zfs_zed/handlers/main.yml @@ -0,0 +1,2 @@ + - name: testmail + shell: echo "zed ist eingerichtet" | mail -s "{{ ansible_hostname }} - zed" {{ empfaenger_mail }} diff --git a/roles/mgrote.zfs_zed/meta/main.yml b/roles/mgrote.zfs_zed/meta/main.yml new file mode 100644 index 00000000..c8caf934 --- /dev/null +++ b/roles/mgrote.zfs_zed/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - role: mgrote.postfix + - role: mgrote.zfs_packages diff --git a/roles/mgrote.zfs_zed/tasks/main.yml b/roles/mgrote.zfs_zed/tasks/main.yml new file mode 100644 index 00000000..bb8039f9 --- /dev/null +++ b/roles/mgrote.zfs_zed/tasks/main.yml @@ -0,0 +1,9 @@ +--- + - name: kopiere zed.rc + become: yes + ansible.builtin.template: + owner: root + mode: 0600 + src: zed.rc + dest: /etc/zfs/zed.d/zed.rc + notify: testmail diff --git a/roles/mgrote.zfs_tools_cron/templates/zed.rc b/roles/mgrote.zfs_zed/templates/zed.rc similarity index 100% rename from roles/mgrote.zfs_tools_cron/templates/zed.rc rename to roles/mgrote.zfs_zed/templates/zed.rc