Überarbeitung: zfs-Rollen
This commit is contained in:
parent
aee96bf4d1
commit
591793582f
43 changed files with 222 additions and 226 deletions
|
@ -17,6 +17,9 @@
|
|||
tmux_conf_destination: "/root/.tmux.conf"
|
||||
tmux_bashrc_destination: "/root/.bashrc"
|
||||
tmux_standardsession_name: "default"
|
||||
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||
zfs_extra_max_usage_health: "80"
|
||||
|
||||
|
||||
|
||||
# Ansible Variablen
|
||||
|
|
|
@ -39,9 +39,9 @@
|
|||
### mgrote.apcupsd
|
||||
apcupsd_nis_master: true
|
||||
apcupsd_nis_master_hostname: pve2-test.grote.lan
|
||||
### mgrote.zfs_tools_cron
|
||||
zfs_arc_max: "12884901888"
|
||||
zfs_pools:
|
||||
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||
zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
|
||||
zfs_extra_zfs_pools:
|
||||
- name: "ssd_vm_mirror"
|
||||
type: "ssd"
|
||||
cron_minute_zfs_trim: "5"
|
||||
|
|
|
@ -38,9 +38,10 @@
|
|||
### mgrote.apcupsd
|
||||
apcupsd_nis_master: true
|
||||
apcupsd_nis_master_hostname: pve2.grote.lan
|
||||
### mgrote.zfs_tools_cron
|
||||
zfs_arc_max: "12884901888"
|
||||
zfs_pools:
|
||||
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||
zfs_extra_arc_max_size: "17179869184" # 16GB in Bytes
|
||||
zfs_extra_max_usage_health: "90"
|
||||
zfs_extra_zfs_pools:
|
||||
- name: "ssd_vm_mirror"
|
||||
type: "ssd"
|
||||
cron_minute_zfs_trim: "5"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
### mgrote.zfs_manage_datasets
|
||||
# rppol wird von pve bei installation erstellt
|
||||
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
|
||||
zfs_datasets:
|
||||
- dataset: rpool/vm/dir
|
||||
state: present
|
||||
|
@ -25,9 +25,10 @@
|
|||
xattr: sa
|
||||
dnodesize: auto
|
||||
atime: on
|
||||
snapdir: hidden ### mgrote.zfs_tools_cron
|
||||
zfs_arc_max: "12884901888"
|
||||
zfs_pools:
|
||||
snapdir: hidden
|
||||
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||
zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
|
||||
zfs_extra_zfs_pools:
|
||||
- name: "rpool"
|
||||
type: "ssd"
|
||||
cron_minute_zfs_trim: "5"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
### mgrote.zfs_manage_datasets
|
||||
# rppol wird von pve bei installation erstellt
|
||||
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
|
||||
zfs_datasets:
|
||||
- dataset: rpool/vm/dir
|
||||
state: present
|
||||
|
@ -25,9 +25,10 @@
|
|||
xattr: sa
|
||||
dnodesize: auto
|
||||
atime: on
|
||||
snapdir: hidden ### mgrote.zfs_tools_cron
|
||||
zfs_arc_max: "12884901888"
|
||||
zfs_pools:
|
||||
snapdir: hidden
|
||||
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||
zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes
|
||||
zfs_extra_zfs_pools:
|
||||
- name: "rpool"
|
||||
type: "ssd"
|
||||
cron_minute_zfs_trim: "5"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
### mgrote.zfs_manage_datasets
|
||||
# rppol wird von pve bei installation erstellt
|
||||
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
|
||||
zfs_datasets:
|
||||
- dataset: rpool/vm/dir
|
||||
state: present
|
||||
|
@ -25,9 +25,10 @@
|
|||
xattr: sa
|
||||
dnodesize: auto
|
||||
atime: on
|
||||
snapdir: hidden ### mgrote.zfs_tools_cron
|
||||
zfs_arc_max: "12884901888"
|
||||
zfs_pools:
|
||||
snapdir: hidden
|
||||
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||
zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
|
||||
zfs_extra_zfs_pools:
|
||||
- name: "rpool"
|
||||
type: "ssd"
|
||||
cron_minute_zfs_trim: "5"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
### mgrote.zfs_manage_datasets
|
||||
# rppol wird von pve bei installation erstellt
|
||||
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
|
||||
zfs_datasets:
|
||||
- dataset: rpool/vm/dir
|
||||
state: present
|
||||
|
@ -26,9 +26,9 @@
|
|||
dnodesize: auto
|
||||
atime: on
|
||||
snapdir: hidden
|
||||
### mgrote.zfs_tools_cron
|
||||
zfs_arc_max: "12884901888"
|
||||
zfs_pools:
|
||||
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||
zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes
|
||||
zfs_extra_zfs_pools:
|
||||
- name: "rpool"
|
||||
type: "ssd"
|
||||
cron_minute_zfs_trim: "5"
|
||||
|
|
|
@ -57,7 +57,6 @@ all:
|
|||
hosts:
|
||||
gitlab-runner-test.grote.lan:
|
||||
gitlab-runner.grote.lan:
|
||||
gitlab-runner2.grote.lan:
|
||||
|
||||
production:
|
||||
hosts:
|
||||
|
@ -71,7 +70,6 @@ all:
|
|||
pve2.grote.lan:
|
||||
gitlab.grote.lan:
|
||||
gitlab-runner.grote.lan:
|
||||
gitlab-runner2.grote.lan:
|
||||
pve3.grote.lan:
|
||||
pve4.grote.lan:
|
||||
test:
|
||||
|
|
|
@ -1,15 +1,20 @@
|
|||
---
|
||||
- hosts: proxmox
|
||||
roles:
|
||||
- { role: mgrote.zfs_manage_datasets, tags: "datasets" }
|
||||
- { role: mgrote.apt_manage_sources, tags: "apt_sources" }
|
||||
- { role: mgrote.apcupsd, tags: "apcupsd" }
|
||||
- { role: mgrote.smart, tags: "smart" }
|
||||
- { role: mgrote.zfs_tools_cron, tags: "zfs_tools" }
|
||||
- { role: mgrote.postfix, tags: "postfix" }
|
||||
- { role: mgrote.zfs_packages, tags: "zfs_packages" }
|
||||
- { role: mgrote.zfs_arc_mem, tags: "zfs_arc_mem" }
|
||||
- { role: mgrote.zfs_manage_datasets, tags: "datasets" }
|
||||
- { role: mgrote.zfs_scrub, tags: "zfs_scrub" }
|
||||
- { role: mgrote.zfs_trim, tags: "zfs_trim" }
|
||||
- { role: mgrote.zfs_zed, tags: "zfs_zed" }
|
||||
- { role: mgrote.zfs_health, tags: "zfs_health" }
|
||||
- { role: mgrote.zfs_sanoid,
|
||||
tags: "sanoid",
|
||||
when: "'pve2.grote.lan' in inventory_hostname" }
|
||||
- { role: mgrote.apcupsd, tags: "apcupsd" }
|
||||
- { role: mgrote.smart, tags: "smart" }
|
||||
- { role: mgrote.postfix, tags: "postfix" }
|
||||
- { role: mgrote.ecc-rasdaemon,
|
||||
tags: "ecc",
|
||||
when: "'pve2.grote.lan' in inventory_hostname" }
|
||||
|
|
|
@ -46,3 +46,11 @@
|
|||
dest: "/usr/local/bin/systemtemps.sh"
|
||||
mode: a+x
|
||||
when: ansible_virtualization_role != 'guest'
|
||||
|
||||
- name: copy zfs-free.sh
|
||||
become: yes
|
||||
ansible.builtin.template:
|
||||
mode: 0555
|
||||
src: zfs-free.sh
|
||||
dest: /usr/local/bin/zfs-free.sh
|
||||
when: "'proxmox' in group_names"
|
||||
|
|
|
@ -35,7 +35,7 @@ Es sind keine "defaults" gesetzt!
|
|||
|
||||
## Alternatives Dictionary Format:
|
||||
```bash
|
||||
zfs_pools:
|
||||
zfs_extra_zfs_pools:
|
||||
- name: "ssd_vm_mirror"
|
||||
type: "ssd"
|
||||
cron_minute_zfs_trim: "5"
|
||||
|
@ -48,6 +48,6 @@ Es sind keine "defaults" gesetzt!
|
|||
```
|
||||
ist das gleiche wie:
|
||||
```bash
|
||||
zfs_pools:
|
||||
zfs_extra_zfs_pools:
|
||||
- { name: "ssd_vm_mirror", type: "ssd", cron_minute_zfs_trim: "5", cron_hour_zfs_trim: "22", cron_month_zfs_trim: "4,8,12", cron_day_zfs_trim: "2", cron_weekday_zfs_scrub: "6", cron_minutes_zfs_scrub: "0", cron_hour_zfs_scrub: "23"}
|
||||
```
|
||||
|
|
11
roles/mgrote.zfs_arc_mem/README.md
Normal file
11
roles/mgrote.zfs_arc_mem/README.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
## mgrote.zfs_arc_mem
|
||||
|
||||
### Beschreibung
|
||||
Setzt die maximale ZFS ARC-Groesse.
|
||||
|
||||
### Funktioniert auf
|
||||
- [x] ProxMox 6.1
|
||||
|
||||
### Variablen + Defaults
|
||||
- see [defaults](./defaults/main.yml)
|
||||
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
2
roles/mgrote.zfs_arc_mem/defaults/main.yml
Normal file
2
roles/mgrote.zfs_arc_mem/defaults/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
zfs_extra_arc_max_size: "8589934592" #in Bytes
|
3
roles/mgrote.zfs_arc_mem/meta/main.yml
Normal file
3
roles/mgrote.zfs_arc_mem/meta/main.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: mgrote.zfs_packages
|
7
roles/mgrote.zfs_arc_mem/tasks/main.yml
Normal file
7
roles/mgrote.zfs_arc_mem/tasks/main.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
- name: Limit ZFS Memory Usage
|
||||
become: yes
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/modprobe.d/zfs.conf
|
||||
line: options zfs zfs_arc_max={{ zfs_extra_arc_max_size }}
|
||||
create: yes
|
12
roles/mgrote.zfs_health/README.md
Normal file
12
roles/mgrote.zfs_health/README.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
## mgrote.zfs_health
|
||||
|
||||
### Beschreibung
|
||||
Richtet "zfs_health.sh", ein ZFS-Checkscript das auch Mails versendet bei Fehlern.
|
||||
|
||||
|
||||
### Funktioniert auf
|
||||
- [x] ProxMox 6.1
|
||||
|
||||
### Variablen + Defaults
|
||||
- see [defaults](./defaults/main.yml)
|
||||
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
4
roles/mgrote.zfs_health/defaults/main.yml
Normal file
4
roles/mgrote.zfs_health/defaults/main.yml
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
zfs_extra_cron_minutes_zfs_health: "0,15,30,45"
|
||||
zfs_extra_cron_hours_zfs_health: "*"
|
||||
zfs_extra_max_usage_health: "80"
|
4
roles/mgrote.zfs_health/meta/main.yml
Normal file
4
roles/mgrote.zfs_health/meta/main.yml
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: mgrote.postfix
|
||||
- role: mgrote.zfs_packages
|
16
roles/mgrote.zfs_health/tasks/main.yml
Normal file
16
roles/mgrote.zfs_health/tasks/main.yml
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
- name: copy "zfs-health.sh"
|
||||
become: yes
|
||||
ansible.builtin.template:
|
||||
src: zfs-health.sh
|
||||
dest: /usr/local/bin/zfs-health.sh
|
||||
mode: "+x"
|
||||
|
||||
- name: add cronjob "zfs-health.sh"
|
||||
become: yes
|
||||
ansible.builtin.cron:
|
||||
name: zfs-health
|
||||
state: present
|
||||
job: "/usr/local/bin/zfs-health.sh"
|
||||
minute: "{{ zfs_extra_cron_minutes_zfs_health }}"
|
||||
hour: "{{ zfs_extra_cron_hours_zfs_health }}"
|
|
@ -19,7 +19,7 @@ fi
|
|||
# SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can
|
||||
# probably set the warning closer to 95%.
|
||||
|
||||
maxCapacity={{ zfs_tool_max_cap }}
|
||||
maxCapacity={{ zfs_extra_max_usage_health }}
|
||||
|
||||
if [ ${problems} -eq 0 ]; then
|
||||
capacity=$(/sbin/zpool list -H -o capacity)
|
10
roles/mgrote.zfs_packages/README.md
Normal file
10
roles/mgrote.zfs_packages/README.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
## mgrote.zfs_packages
|
||||
|
||||
### Beschreibung
|
||||
Installiert zfs-packages.
|
||||
|
||||
### Funktioniert auf
|
||||
- [x] ProxMox 6.1
|
||||
|
||||
### Variablen + Defaults
|
||||
- keine
|
9
roles/mgrote.zfs_packages/tasks/main.yml
Normal file
9
roles/mgrote.zfs_packages/tasks/main.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: install zfs-tools-packages
|
||||
become: yes
|
||||
ansible.builtin.package:
|
||||
name:
|
||||
- zfsutils
|
||||
- zfs-initramfs
|
||||
- zfs-zed
|
||||
state: present
|
12
roles/mgrote.zfs_scrub/README.md
Normal file
12
roles/mgrote.zfs_scrub/README.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
## mgrote.zfs_scrub
|
||||
|
||||
### Beschreibung
|
||||
Richtet regelmaessige Scrubs(jeden Sonntag) ein.
|
||||
|
||||
|
||||
### Funktioniert auf
|
||||
- [x] ProxMox 6.1
|
||||
|
||||
### Variablen + Defaults
|
||||
- see [defaults](./defaults/main.yml)
|
||||
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
2
roles/mgrote.zfs_scrub/defaults/main.yml
Normal file
2
roles/mgrote.zfs_scrub/defaults/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
zfs_extra_path_zpool: "/usr/sbin/zpool"
|
3
roles/mgrote.zfs_scrub/meta/main.yml
Normal file
3
roles/mgrote.zfs_scrub/meta/main.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: mgrote.zfs_packages
|
18
roles/mgrote.zfs_scrub/tasks/main.yml
Normal file
18
roles/mgrote.zfs_scrub/tasks/main.yml
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
- name: remove system scrub job #https://forum.proxmox.com/threads/script-sequential-zfs-scrub-for-cron.25124/
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/cron.d/zfsutils-linux
|
||||
state: absent
|
||||
line: '24 0 8-14 * * root [ $(date +\%w) -eq 0 ] && [ -x /usr/lib/zfs-linux/scrub ] && /usr/lib/zfs-linux/scrub'
|
||||
|
||||
- name: add cronjob "zfs-scrub"
|
||||
become: yes
|
||||
ansible.builtin.cron:
|
||||
name: zfs-scrub - "{{ item.name }}"
|
||||
state: present
|
||||
job: "{{ zfs_extra_path_zpool }} scrub {{ item.name }}"
|
||||
weekday: "{{ item.cron_weekday_zfs_scrub }}"
|
||||
minute: "{{ item.cron_minutes_zfs_scrub }}"
|
||||
hour: "{{ item.cron_hour_zfs_scrub }}"
|
||||
with_items: "{{ zfs_extra_zfs_pools }}"
|
|
@ -1,36 +0,0 @@
|
|||
## mgrote.zfs_tools_cron
|
||||
|
||||
### Beschreibung
|
||||
Aktiviert die Mail Funktion von ZED (ZFS Event Daemon).
|
||||
Setzt die maximale ARC-Groesse.
|
||||
Benoetigt "mgrote.postfix".
|
||||
Richtet regelmaessige Scrubs(jeden Sonntag) und Trim(alle 4 Monate) ein.
|
||||
Richtet "zfs_health.sh", ein ZFS-Checkscript das auch Mails versendet bei Fehlern.
|
||||
Deaktiviert das mitinstallierte scrub-Script in /etc/cron.d/zfsutils-linux.
|
||||
|
||||
### Funktioniert auf
|
||||
- [x] ProxMox 6.1
|
||||
|
||||
### Variablen + Defaults
|
||||
##### Wer soll die Mails bekommen
|
||||
empfaenger_mail: michael.grote@posteo.de
|
||||
zfs_pools: zfs_vm_mirror
|
||||
##### Maximale Groesse ARC in Bytes
|
||||
Beim aendern wird die Zeile einmal mit dem alten Wert und dem neuen Wert in die Zeile eingefuegt!
|
||||
zfs_arc_max: "8589934592"
|
||||
Die aenderung der maximalen ARC-Size wird erst nach einem Neustart uebernommen.
|
||||
##### Ausfuehrung des ZFS_health-Scripts in Minuten
|
||||
cron_minutes_zfs_health: "15"
|
||||
cron_hours_zfs_health: "6,18"
|
||||
##### Poolauflistung + Parameter
|
||||
- u.A. fuer Cron fuer Trim und Scrub
|
||||
Alle 4 Monate am 2. des Monats um 23:12
|
||||
``- { name: "zfs_single_hdd", type: "ssd", cron_minute_zfs_trim: "12", cron_hour_zfs_trim: "23", cron_month_zfs_trim: "4,8,12", cron_day_zfs_trim: "3", cron_weekday_zfs_scrub: "0", cron_minutes_zfs_scrub: "0", cron_hour_zfs_scrub: "5"}``
|
||||
Erst Poolname
|
||||
Type: ssd/hdd; wenn der Wert SSD ist für ein Cronjobh angelegt der zfs-trim ausführt
|
||||
Trim: Minute, Stunde, Monat, Tag des Monats
|
||||
Scrub: Wochentag, Minute, Stunde
|
||||
##### Pfad zu zpool-binary
|
||||
pfad_zu_zpool: "/usr/sbin/zpool"
|
||||
##### Füllstand für Warnmail
|
||||
zfs_tool_max_cap: "80"
|
|
@ -1,19 +0,0 @@
|
|||
---
|
||||
zfs_arc_max: "8589934592"
|
||||
|
||||
cron_minutes_zfs_health: "0,15,30,45"
|
||||
cron_hours_zfs_health: "*"
|
||||
|
||||
cron_weekday_zfs_scrub: "0"
|
||||
cron_minutes_zfs_scrub: "5"
|
||||
cron_hour_zfs_scrub: "4"
|
||||
|
||||
cron_weekday_zfs_trim: "*"
|
||||
cron_minute_zfs_trim: "12"
|
||||
cron_hour_zfs_trim: "23"
|
||||
cron_month_zfs_trim: "4,8,12"
|
||||
cron_day_zfs_trim: "2"
|
||||
|
||||
pfad_zu_zpool: "/usr/sbin/zpool"
|
||||
|
||||
zfs_tool_max_cap: "80"
|
|
@ -1,10 +0,0 @@
|
|||
|
||||
- name: telegraf_starten_aktivieren
|
||||
become: yes
|
||||
systemd:
|
||||
name: telegraf
|
||||
enabled: yes
|
||||
state: restarted
|
||||
|
||||
- name: testmail
|
||||
shell: echo "zed ist eingerichtet" | mail -s "{{ ansible_hostname }} - zed" {{ empfaenger_mail }}
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: mgrote.postfix
|
||||
galaxy_info:
|
||||
author: mgrote
|
||||
description: installs zfs-tools
|
||||
min_ansible_version: 2.0
|
||||
license: GPLv3
|
||||
platforms:
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- system
|
|
@ -1,97 +0,0 @@
|
|||
- name: zfs-tools installieren
|
||||
become: yes
|
||||
ansible.builtin.package:
|
||||
name:
|
||||
- zfsutils
|
||||
- zfs-initramfs
|
||||
- zfs-zed
|
||||
state: present
|
||||
|
||||
- name: Limit ZFS Memory Usage
|
||||
become: yes
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/modprobe.d/zfs.conf
|
||||
line: options zfs zfs_arc_max={{ zfs_arc_max }}
|
||||
create: yes
|
||||
|
||||
- name: remove system scrub job #https://forum.proxmox.com/threads/script-sequential-zfs-scrub-for-cron.25124/
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/cron.d/zfsutils-linux
|
||||
state: absent
|
||||
line: '24 0 8-14 * * root [ $(date +\%w) -eq 0 ] && [ -x /usr/lib/zfs-linux/scrub ] && /usr/lib/zfs-linux/scrub'
|
||||
|
||||
- name: erstelle Ordner "root-zfs"
|
||||
become: yes
|
||||
ansible.builtin.file:
|
||||
path: /root/zfs
|
||||
state: directory
|
||||
|
||||
- name: kopiere "zfs-health.sh"
|
||||
become: yes
|
||||
ansible.builtin.template:
|
||||
src: zfs-health.sh
|
||||
dest: /usr/local/bin/zfs-health.sh
|
||||
mode: "+x"
|
||||
|
||||
- name: lege cronjob fuer "zfs-health.sh" an
|
||||
become: yes
|
||||
ansible.builtin.cron:
|
||||
name: zfs-health
|
||||
state: present
|
||||
job: "/usr/local/bin/zfs-health.sh"
|
||||
minute: "{{ cron_minutes_zfs_health }}"
|
||||
hour: "{{ cron_hours_zfs_health }}"
|
||||
|
||||
- name: lege cronjob fuer "zfs-scrub" an
|
||||
become: yes
|
||||
ansible.builtin.cron:
|
||||
name: zfs-scrub - "{{ item.name }}"
|
||||
state: present
|
||||
job: "{{ pfad_zu_zpool }} scrub {{ item.name }}"
|
||||
weekday: "{{ item.cron_weekday_zfs_scrub }}"
|
||||
minute: "{{ item.cron_minutes_zfs_scrub }}"
|
||||
hour: "{{ item.cron_hour_zfs_scrub }}"
|
||||
with_items: "{{ zfs_pools }}"
|
||||
|
||||
- name: lege cronjob fuer "zfs-trim" an
|
||||
become: yes
|
||||
ansible.builtin.cron:
|
||||
name: zfs-trim - "{{ item.name }}"
|
||||
state: present
|
||||
job: "{{ pfad_zu_zpool }} trim {{ item.name }}"
|
||||
minute: "{{ item.cron_minute_zfs_trim }}"
|
||||
hour: "{{ item.cron_hour_zfs_trim }}"
|
||||
month: "{{ item.cron_month_zfs_trim }}"
|
||||
day: "{{ item.cron_day_zfs_trim }}"
|
||||
# disabled: yes
|
||||
when: item.type == 'ssd'
|
||||
with_items: "{{ zfs_pools }}"
|
||||
|
||||
- name: kopiere zed.rc
|
||||
become: yes
|
||||
ansible.builtin.template:
|
||||
owner: root
|
||||
mode: 0600
|
||||
src: zed.rc
|
||||
dest: /etc/zfs/zed.d/zed.rc
|
||||
notify: testmail
|
||||
|
||||
- name: copy zfs-free.sh
|
||||
become: yes
|
||||
ansible.builtin.template:
|
||||
mode: 0555
|
||||
src: zfs-free.sh
|
||||
dest: /usr/local/bin/zfs-free.sh
|
||||
|
||||
- name: Erlaube "non-root" Usern Read-Only ZFS Commands
|
||||
become: yes
|
||||
ansible.builtin.template:
|
||||
src: sudoers_zfs
|
||||
dest: /etc/sudoers.d/zfs
|
||||
owner: root
|
||||
group: root
|
||||
force: yes
|
||||
backup: yes
|
||||
mode: 0440
|
||||
validate: /usr/sbin/visudo -cf %s
|
|
@ -1,18 +0,0 @@
|
|||
# Allow read-only ZoL commands to be called through sudo
|
||||
# without a password. Remove the first '#' column to enable.
|
||||
#
|
||||
# CAUTION: Any syntax error introduced here will break sudo.
|
||||
#
|
||||
# Cmnd alias specification
|
||||
Cmnd_Alias C_ZFS = \
|
||||
/sbin/zfs "", /sbin/zfs help *, \
|
||||
/sbin/zfs get, /sbin/zfs get *, \
|
||||
/sbin/zfs list, /sbin/zfs list *, \
|
||||
/sbin/zpool "", /sbin/zpool help *, \
|
||||
/sbin/zpool iostat, /sbin/zpool iostat *, \
|
||||
/sbin/zpool list, /sbin/zpool list *, \
|
||||
/sbin/zpool status, /sbin/zpool status *, \
|
||||
/sbin/zpool upgrade, /sbin/zpool upgrade -v
|
||||
#
|
||||
# allow any user to use basic read-only ZFS commands
|
||||
ALL ALL = (root) NOPASSWD: C_ZFS
|
12
roles/mgrote.zfs_trim/README.md
Normal file
12
roles/mgrote.zfs_trim/README.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
## mgrote.zfs_trim
|
||||
|
||||
### Beschreibung
|
||||
Richtet regelmaessigen Trim(alle 4 Monate) ein.
|
||||
|
||||
|
||||
### Funktioniert auf
|
||||
- [x] ProxMox 6.1
|
||||
|
||||
### Variablen + Defaults
|
||||
- see [defaults](./defaults/main.yml)
|
||||
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
2
roles/mgrote.zfs_trim/defaults/main.yml
Normal file
2
roles/mgrote.zfs_trim/defaults/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
zfs_extra_path_zpool: "/usr/sbin/zpool"
|
3
roles/mgrote.zfs_trim/meta/main.yml
Normal file
3
roles/mgrote.zfs_trim/meta/main.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: mgrote.zfs_packages
|
14
roles/mgrote.zfs_trim/tasks/main.yml
Normal file
14
roles/mgrote.zfs_trim/tasks/main.yml
Normal file
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
- name: add cronjob "zfs-trim"
|
||||
become: yes
|
||||
ansible.builtin.cron:
|
||||
name: zfs-trim - "{{ item.name }}"
|
||||
state: present
|
||||
job: "{{ zfs_extra_path_zpool }} trim {{ item.name }}"
|
||||
minute: "{{ item.cron_minute_zfs_trim }}"
|
||||
hour: "{{ item.cron_hour_zfs_trim }}"
|
||||
month: "{{ item.cron_month_zfs_trim }}"
|
||||
day: "{{ item.cron_day_zfs_trim }}"
|
||||
# disabled: yes
|
||||
when: item.type == 'ssd'
|
||||
with_items: "{{ zfs_extra_zfs_pools }}"
|
11
roles/mgrote.zfs_zed/README.md
Normal file
11
roles/mgrote.zfs_zed/README.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
## mgrote.zfs_zed
|
||||
|
||||
### Beschreibung
|
||||
Aktiviert die Mail Funktion von ZED (ZFS Event Daemon).
|
||||
|
||||
### Funktioniert auf
|
||||
- [x] ProxMox 6.1
|
||||
|
||||
### Variablen + Defaults
|
||||
- see [defaults](./defaults/main.yml)
|
||||
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
2
roles/mgrote.zfs_zed/defaults/main.yml
Normal file
2
roles/mgrote.zfs_zed/defaults/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
empfaenger_mail: michael.grote@posteo.de
|
2
roles/mgrote.zfs_zed/handlers/main.yml
Normal file
2
roles/mgrote.zfs_zed/handlers/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
- name: testmail
|
||||
shell: echo "zed ist eingerichtet" | mail -s "{{ ansible_hostname }} - zed" {{ empfaenger_mail }}
|
4
roles/mgrote.zfs_zed/meta/main.yml
Normal file
4
roles/mgrote.zfs_zed/meta/main.yml
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: mgrote.postfix
|
||||
- role: mgrote.zfs_packages
|
9
roles/mgrote.zfs_zed/tasks/main.yml
Normal file
9
roles/mgrote.zfs_zed/tasks/main.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: kopiere zed.rc
|
||||
become: yes
|
||||
ansible.builtin.template:
|
||||
owner: root
|
||||
mode: 0600
|
||||
src: zed.rc
|
||||
dest: /etc/zfs/zed.d/zed.rc
|
||||
notify: testmail
|
Loading…
Reference in a new issue