Überarbeitung: zfs-Rollen

This commit is contained in:
Michael Grote 2021-02-13 16:18:29 +01:00
parent aee96bf4d1
commit 591793582f
43 changed files with 222 additions and 226 deletions

View file

@ -17,6 +17,9 @@
tmux_conf_destination: "/root/.tmux.conf" tmux_conf_destination: "/root/.tmux.conf"
tmux_bashrc_destination: "/root/.bashrc" tmux_bashrc_destination: "/root/.bashrc"
tmux_standardsession_name: "default" tmux_standardsession_name: "default"
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_max_usage_health: "80"
# Ansible Variablen # Ansible Variablen

View file

@ -39,9 +39,9 @@
### mgrote.apcupsd ### mgrote.apcupsd
apcupsd_nis_master: true apcupsd_nis_master: true
apcupsd_nis_master_hostname: pve2-test.grote.lan apcupsd_nis_master_hostname: pve2-test.grote.lan
### mgrote.zfs_tools_cron ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_arc_max: "12884901888" zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
zfs_pools: zfs_extra_zfs_pools:
- name: "ssd_vm_mirror" - name: "ssd_vm_mirror"
type: "ssd" type: "ssd"
cron_minute_zfs_trim: "5" cron_minute_zfs_trim: "5"

View file

@ -38,9 +38,10 @@
### mgrote.apcupsd ### mgrote.apcupsd
apcupsd_nis_master: true apcupsd_nis_master: true
apcupsd_nis_master_hostname: pve2.grote.lan apcupsd_nis_master_hostname: pve2.grote.lan
### mgrote.zfs_tools_cron ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_arc_max: "12884901888" zfs_extra_arc_max_size: "17179869184" # 16GB in Bytes
zfs_pools: zfs_extra_max_usage_health: "90"
zfs_extra_zfs_pools:
- name: "ssd_vm_mirror" - name: "ssd_vm_mirror"
type: "ssd" type: "ssd"
cron_minute_zfs_trim: "5" cron_minute_zfs_trim: "5"

View file

@ -1,6 +1,6 @@
--- ---
### mgrote.zfs_manage_datasets ### mgrote.zfs_manage_datasets
# rppol wird von pve bei installation erstellt # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: zfs_datasets:
- dataset: rpool/vm/dir - dataset: rpool/vm/dir
state: present state: present
@ -25,9 +25,10 @@
xattr: sa xattr: sa
dnodesize: auto dnodesize: auto
atime: on atime: on
snapdir: hidden ### mgrote.zfs_tools_cron snapdir: hidden
zfs_arc_max: "12884901888" ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_pools: zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool" - name: "rpool"
type: "ssd" type: "ssd"
cron_minute_zfs_trim: "5" cron_minute_zfs_trim: "5"

View file

@ -1,6 +1,6 @@
--- ---
### mgrote.zfs_manage_datasets ### mgrote.zfs_manage_datasets
# rppol wird von pve bei installation erstellt # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: zfs_datasets:
- dataset: rpool/vm/dir - dataset: rpool/vm/dir
state: present state: present
@ -25,9 +25,10 @@
xattr: sa xattr: sa
dnodesize: auto dnodesize: auto
atime: on atime: on
snapdir: hidden ### mgrote.zfs_tools_cron snapdir: hidden
zfs_arc_max: "12884901888" ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_pools: zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool" - name: "rpool"
type: "ssd" type: "ssd"
cron_minute_zfs_trim: "5" cron_minute_zfs_trim: "5"

View file

@ -1,6 +1,6 @@
--- ---
### mgrote.zfs_manage_datasets ### mgrote.zfs_manage_datasets
# rppol wird von pve bei installation erstellt # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: zfs_datasets:
- dataset: rpool/vm/dir - dataset: rpool/vm/dir
state: present state: present
@ -25,9 +25,10 @@
xattr: sa xattr: sa
dnodesize: auto dnodesize: auto
atime: on atime: on
snapdir: hidden ### mgrote.zfs_tools_cron snapdir: hidden
zfs_arc_max: "12884901888" ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_pools: zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool" - name: "rpool"
type: "ssd" type: "ssd"
cron_minute_zfs_trim: "5" cron_minute_zfs_trim: "5"

View file

@ -1,6 +1,6 @@
--- ---
### mgrote.zfs_manage_datasets ### mgrote.zfs_manage_datasets
# rppol wird von pve bei installation erstellt # rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: zfs_datasets:
- dataset: rpool/vm/dir - dataset: rpool/vm/dir
state: present state: present
@ -26,9 +26,9 @@
dnodesize: auto dnodesize: auto
atime: on atime: on
snapdir: hidden snapdir: hidden
### mgrote.zfs_tools_cron ### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_arc_max: "12884901888" zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes
zfs_pools: zfs_extra_zfs_pools:
- name: "rpool" - name: "rpool"
type: "ssd" type: "ssd"
cron_minute_zfs_trim: "5" cron_minute_zfs_trim: "5"

View file

@ -57,7 +57,6 @@ all:
hosts: hosts:
gitlab-runner-test.grote.lan: gitlab-runner-test.grote.lan:
gitlab-runner.grote.lan: gitlab-runner.grote.lan:
gitlab-runner2.grote.lan:
production: production:
hosts: hosts:
@ -71,7 +70,6 @@ all:
pve2.grote.lan: pve2.grote.lan:
gitlab.grote.lan: gitlab.grote.lan:
gitlab-runner.grote.lan: gitlab-runner.grote.lan:
gitlab-runner2.grote.lan:
pve3.grote.lan: pve3.grote.lan:
pve4.grote.lan: pve4.grote.lan:
test: test:

View file

@ -1,15 +1,20 @@
--- ---
- hosts: proxmox - hosts: proxmox
roles: roles:
- { role: mgrote.zfs_manage_datasets, tags: "datasets" }
- { role: mgrote.apt_manage_sources, tags: "apt_sources" } - { role: mgrote.apt_manage_sources, tags: "apt_sources" }
- { role: mgrote.apcupsd, tags: "apcupsd" } - { role: mgrote.zfs_packages, tags: "zfs_packages" }
- { role: mgrote.smart, tags: "smart" } - { role: mgrote.zfs_arc_mem, tags: "zfs_arc_mem" }
- { role: mgrote.zfs_tools_cron, tags: "zfs_tools" } - { role: mgrote.zfs_manage_datasets, tags: "datasets" }
- { role: mgrote.postfix, tags: "postfix" } - { role: mgrote.zfs_scrub, tags: "zfs_scrub" }
- { role: mgrote.zfs_trim, tags: "zfs_trim" }
- { role: mgrote.zfs_zed, tags: "zfs_zed" }
- { role: mgrote.zfs_health, tags: "zfs_health" }
- { role: mgrote.zfs_sanoid, - { role: mgrote.zfs_sanoid,
tags: "sanoid", tags: "sanoid",
when: "'pve2.grote.lan' in inventory_hostname" } when: "'pve2.grote.lan' in inventory_hostname" }
- { role: mgrote.apcupsd, tags: "apcupsd" }
- { role: mgrote.smart, tags: "smart" }
- { role: mgrote.postfix, tags: "postfix" }
- { role: mgrote.ecc-rasdaemon, - { role: mgrote.ecc-rasdaemon,
tags: "ecc", tags: "ecc",
when: "'pve2.grote.lan' in inventory_hostname" } when: "'pve2.grote.lan' in inventory_hostname" }

View file

@ -46,3 +46,11 @@
dest: "/usr/local/bin/systemtemps.sh" dest: "/usr/local/bin/systemtemps.sh"
mode: a+x mode: a+x
when: ansible_virtualization_role != 'guest' when: ansible_virtualization_role != 'guest'
- name: copy zfs-free.sh
become: yes
ansible.builtin.template:
mode: 0555
src: zfs-free.sh
dest: /usr/local/bin/zfs-free.sh
when: "'proxmox' in group_names"

View file

@ -35,7 +35,7 @@ Es sind keine "defaults" gesetzt!
## Alternatives Dictionary Format: ## Alternatives Dictionary Format:
```bash ```bash
zfs_pools: zfs_extra_zfs_pools:
- name: "ssd_vm_mirror" - name: "ssd_vm_mirror"
type: "ssd" type: "ssd"
cron_minute_zfs_trim: "5" cron_minute_zfs_trim: "5"
@ -48,6 +48,6 @@ Es sind keine "defaults" gesetzt!
``` ```
ist das gleiche wie: ist das gleiche wie:
```bash ```bash
zfs_pools: zfs_extra_zfs_pools:
- { name: "ssd_vm_mirror", type: "ssd", cron_minute_zfs_trim: "5", cron_hour_zfs_trim: "22", cron_month_zfs_trim: "4,8,12", cron_day_zfs_trim: "2", cron_weekday_zfs_scrub: "6", cron_minutes_zfs_scrub: "0", cron_hour_zfs_scrub: "23"} - { name: "ssd_vm_mirror", type: "ssd", cron_minute_zfs_trim: "5", cron_hour_zfs_trim: "22", cron_month_zfs_trim: "4,8,12", cron_day_zfs_trim: "2", cron_weekday_zfs_scrub: "6", cron_minutes_zfs_scrub: "0", cron_hour_zfs_scrub: "23"}
``` ```

View file

@ -0,0 +1,11 @@
## mgrote.zfs_arc_mem
### Beschreibung
Setzt die maximale ZFS ARC-Groesse.
### Funktioniert auf
- [x] ProxMox 6.1
### Variablen + Defaults
- see [defaults](./defaults/main.yml)
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*

View file

@ -0,0 +1,2 @@
---
zfs_extra_arc_max_size: "8589934592" #in Bytes

View file

@ -0,0 +1,3 @@
---
dependencies:
- role: mgrote.zfs_packages

View file

@ -0,0 +1,7 @@
---
- name: Limit ZFS Memory Usage
become: yes
ansible.builtin.lineinfile:
path: /etc/modprobe.d/zfs.conf
line: options zfs zfs_arc_max={{ zfs_extra_arc_max_size }}
create: yes

View file

@ -0,0 +1,12 @@
## mgrote.zfs_health
### Beschreibung
Richtet "zfs_health.sh", ein ZFS-Checkscript das auch Mails versendet bei Fehlern.
### Funktioniert auf
- [x] ProxMox 6.1
### Variablen + Defaults
- see [defaults](./defaults/main.yml)
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*

View file

@ -0,0 +1,4 @@
---
zfs_extra_cron_minutes_zfs_health: "0,15,30,45"
zfs_extra_cron_hours_zfs_health: "*"
zfs_extra_max_usage_health: "80"

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: mgrote.postfix
- role: mgrote.zfs_packages

View file

@ -0,0 +1,16 @@
---
- name: copy "zfs-health.sh"
become: yes
ansible.builtin.template:
src: zfs-health.sh
dest: /usr/local/bin/zfs-health.sh
mode: "+x"
- name: add cronjob "zfs-health.sh"
become: yes
ansible.builtin.cron:
name: zfs-health
state: present
job: "/usr/local/bin/zfs-health.sh"
minute: "{{ zfs_extra_cron_minutes_zfs_health }}"
hour: "{{ zfs_extra_cron_hours_zfs_health }}"

View file

@ -19,7 +19,7 @@ fi
# SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can # SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can
# probably set the warning closer to 95%. # probably set the warning closer to 95%.
maxCapacity={{ zfs_tool_max_cap }} maxCapacity={{ zfs_extra_max_usage_health }}
if [ ${problems} -eq 0 ]; then if [ ${problems} -eq 0 ]; then
capacity=$(/sbin/zpool list -H -o capacity) capacity=$(/sbin/zpool list -H -o capacity)

View file

@ -0,0 +1,10 @@
## mgrote.zfs_packages
### Beschreibung
Installiert zfs-packages.
### Funktioniert auf
- [x] ProxMox 6.1
### Variablen + Defaults
- keine

View file

@ -0,0 +1,9 @@
---
- name: install zfs-tools-packages
become: yes
ansible.builtin.package:
name:
- zfsutils
- zfs-initramfs
- zfs-zed
state: present

View file

@ -0,0 +1,12 @@
## mgrote.zfs_scrub
### Beschreibung
Richtet regelmaessige Scrubs(jeden Sonntag) ein.
### Funktioniert auf
- [x] ProxMox 6.1
### Variablen + Defaults
- see [defaults](./defaults/main.yml)
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*

View file

@ -0,0 +1,2 @@
---
zfs_extra_path_zpool: "/usr/sbin/zpool"

View file

@ -0,0 +1,3 @@
---
dependencies:
- role: mgrote.zfs_packages

View file

@ -0,0 +1,18 @@
---
- name: remove system scrub job #https://forum.proxmox.com/threads/script-sequential-zfs-scrub-for-cron.25124/
become: true
ansible.builtin.lineinfile:
path: /etc/cron.d/zfsutils-linux
state: absent
line: '24 0 8-14 * * root [ $(date +\%w) -eq 0 ] && [ -x /usr/lib/zfs-linux/scrub ] && /usr/lib/zfs-linux/scrub'
- name: add cronjob "zfs-scrub"
become: yes
ansible.builtin.cron:
name: zfs-scrub - "{{ item.name }}"
state: present
job: "{{ zfs_extra_path_zpool }} scrub {{ item.name }}"
weekday: "{{ item.cron_weekday_zfs_scrub }}"
minute: "{{ item.cron_minutes_zfs_scrub }}"
hour: "{{ item.cron_hour_zfs_scrub }}"
with_items: "{{ zfs_extra_zfs_pools }}"

View file

@ -1,36 +0,0 @@
## mgrote.zfs_tools_cron
### Beschreibung
Aktiviert die Mail Funktion von ZED (ZFS Event Daemon).
Setzt die maximale ARC-Groesse.
Benoetigt "mgrote.postfix".
Richtet regelmaessige Scrubs(jeden Sonntag) und Trim(alle 4 Monate) ein.
Richtet "zfs_health.sh", ein ZFS-Checkscript das auch Mails versendet bei Fehlern.
Deaktiviert das mitinstallierte scrub-Script in /etc/cron.d/zfsutils-linux.
### Funktioniert auf
- [x] ProxMox 6.1
### Variablen + Defaults
##### Wer soll die Mails bekommen
empfaenger_mail: michael.grote@posteo.de
zfs_pools: zfs_vm_mirror
##### Maximale Groesse ARC in Bytes
Beim aendern wird die Zeile einmal mit dem alten Wert und dem neuen Wert in die Zeile eingefuegt!
zfs_arc_max: "8589934592"
Die aenderung der maximalen ARC-Size wird erst nach einem Neustart uebernommen.
##### Ausfuehrung des ZFS_health-Scripts in Minuten
cron_minutes_zfs_health: "15"
cron_hours_zfs_health: "6,18"
##### Poolauflistung + Parameter
- u.A. fuer Cron fuer Trim und Scrub
Alle 4 Monate am 2. des Monats um 23:12
``- { name: "zfs_single_hdd", type: "ssd", cron_minute_zfs_trim: "12", cron_hour_zfs_trim: "23", cron_month_zfs_trim: "4,8,12", cron_day_zfs_trim: "3", cron_weekday_zfs_scrub: "0", cron_minutes_zfs_scrub: "0", cron_hour_zfs_scrub: "5"}``
Erst Poolname
Type: ssd/hdd; wenn der Wert SSD ist für ein Cronjobh angelegt der zfs-trim ausführt
Trim: Minute, Stunde, Monat, Tag des Monats
Scrub: Wochentag, Minute, Stunde
##### Pfad zu zpool-binary
pfad_zu_zpool: "/usr/sbin/zpool"
##### Füllstand für Warnmail
zfs_tool_max_cap: "80"

View file

@ -1,19 +0,0 @@
---
zfs_arc_max: "8589934592"
cron_minutes_zfs_health: "0,15,30,45"
cron_hours_zfs_health: "*"
cron_weekday_zfs_scrub: "0"
cron_minutes_zfs_scrub: "5"
cron_hour_zfs_scrub: "4"
cron_weekday_zfs_trim: "*"
cron_minute_zfs_trim: "12"
cron_hour_zfs_trim: "23"
cron_month_zfs_trim: "4,8,12"
cron_day_zfs_trim: "2"
pfad_zu_zpool: "/usr/sbin/zpool"
zfs_tool_max_cap: "80"

View file

@ -1,10 +0,0 @@
- name: telegraf_starten_aktivieren
become: yes
systemd:
name: telegraf
enabled: yes
state: restarted
- name: testmail
shell: echo "zed ist eingerichtet" | mail -s "{{ ansible_hostname }} - zed" {{ empfaenger_mail }}

View file

@ -1,14 +0,0 @@
---
dependencies:
- role: mgrote.postfix
galaxy_info:
author: mgrote
description: installs zfs-tools
min_ansible_version: 2.0
license: GPLv3
platforms:
- name: Ubuntu
versions:
- all
galaxy_tags:
- system

View file

@ -1,97 +0,0 @@
- name: zfs-tools installieren
become: yes
ansible.builtin.package:
name:
- zfsutils
- zfs-initramfs
- zfs-zed
state: present
- name: Limit ZFS Memory Usage
become: yes
ansible.builtin.lineinfile:
path: /etc/modprobe.d/zfs.conf
line: options zfs zfs_arc_max={{ zfs_arc_max }}
create: yes
- name: remove system scrub job #https://forum.proxmox.com/threads/script-sequential-zfs-scrub-for-cron.25124/
become: true
ansible.builtin.lineinfile:
path: /etc/cron.d/zfsutils-linux
state: absent
line: '24 0 8-14 * * root [ $(date +\%w) -eq 0 ] && [ -x /usr/lib/zfs-linux/scrub ] && /usr/lib/zfs-linux/scrub'
- name: erstelle Ordner "root-zfs"
become: yes
ansible.builtin.file:
path: /root/zfs
state: directory
- name: kopiere "zfs-health.sh"
become: yes
ansible.builtin.template:
src: zfs-health.sh
dest: /usr/local/bin/zfs-health.sh
mode: "+x"
- name: lege cronjob fuer "zfs-health.sh" an
become: yes
ansible.builtin.cron:
name: zfs-health
state: present
job: "/usr/local/bin/zfs-health.sh"
minute: "{{ cron_minutes_zfs_health }}"
hour: "{{ cron_hours_zfs_health }}"
- name: lege cronjob fuer "zfs-scrub" an
become: yes
ansible.builtin.cron:
name: zfs-scrub - "{{ item.name }}"
state: present
job: "{{ pfad_zu_zpool }} scrub {{ item.name }}"
weekday: "{{ item.cron_weekday_zfs_scrub }}"
minute: "{{ item.cron_minutes_zfs_scrub }}"
hour: "{{ item.cron_hour_zfs_scrub }}"
with_items: "{{ zfs_pools }}"
- name: lege cronjob fuer "zfs-trim" an
become: yes
ansible.builtin.cron:
name: zfs-trim - "{{ item.name }}"
state: present
job: "{{ pfad_zu_zpool }} trim {{ item.name }}"
minute: "{{ item.cron_minute_zfs_trim }}"
hour: "{{ item.cron_hour_zfs_trim }}"
month: "{{ item.cron_month_zfs_trim }}"
day: "{{ item.cron_day_zfs_trim }}"
# disabled: yes
when: item.type == 'ssd'
with_items: "{{ zfs_pools }}"
- name: kopiere zed.rc
become: yes
ansible.builtin.template:
owner: root
mode: 0600
src: zed.rc
dest: /etc/zfs/zed.d/zed.rc
notify: testmail
- name: copy zfs-free.sh
become: yes
ansible.builtin.template:
mode: 0555
src: zfs-free.sh
dest: /usr/local/bin/zfs-free.sh
- name: Erlaube "non-root" Usern Read-Only ZFS Commands
become: yes
ansible.builtin.template:
src: sudoers_zfs
dest: /etc/sudoers.d/zfs
owner: root
group: root
force: yes
backup: yes
mode: 0440
validate: /usr/sbin/visudo -cf %s

View file

@ -1,18 +0,0 @@
# Allow read-only ZoL commands to be called through sudo
# without a password. Remove the first '#' column to enable.
#
# CAUTION: Any syntax error introduced here will break sudo.
#
# Cmnd alias specification
Cmnd_Alias C_ZFS = \
/sbin/zfs "", /sbin/zfs help *, \
/sbin/zfs get, /sbin/zfs get *, \
/sbin/zfs list, /sbin/zfs list *, \
/sbin/zpool "", /sbin/zpool help *, \
/sbin/zpool iostat, /sbin/zpool iostat *, \
/sbin/zpool list, /sbin/zpool list *, \
/sbin/zpool status, /sbin/zpool status *, \
/sbin/zpool upgrade, /sbin/zpool upgrade -v
#
# allow any user to use basic read-only ZFS commands
ALL ALL = (root) NOPASSWD: C_ZFS

View file

@ -0,0 +1,12 @@
## mgrote.zfs_trim
### Beschreibung
Richtet regelmaessigen Trim(alle 4 Monate) ein.
### Funktioniert auf
- [x] ProxMox 6.1
### Variablen + Defaults
- see [defaults](./defaults/main.yml)
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*

View file

@ -0,0 +1,2 @@
---
zfs_extra_path_zpool: "/usr/sbin/zpool"

View file

@ -0,0 +1,3 @@
---
dependencies:
- role: mgrote.zfs_packages

View file

@ -0,0 +1,14 @@
---
- name: add cronjob "zfs-trim"
become: yes
ansible.builtin.cron:
name: zfs-trim - "{{ item.name }}"
state: present
job: "{{ zfs_extra_path_zpool }} trim {{ item.name }}"
minute: "{{ item.cron_minute_zfs_trim }}"
hour: "{{ item.cron_hour_zfs_trim }}"
month: "{{ item.cron_month_zfs_trim }}"
day: "{{ item.cron_day_zfs_trim }}"
# disabled: yes
when: item.type == 'ssd'
with_items: "{{ zfs_extra_zfs_pools }}"

View file

@ -0,0 +1,11 @@
## mgrote.zfs_zed
### Beschreibung
Aktiviert die Mail Funktion von ZED (ZFS Event Daemon).
### Funktioniert auf
- [x] ProxMox 6.1
### Variablen + Defaults
- see [defaults](./defaults/main.yml)
- Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*

View file

@ -0,0 +1,2 @@
---
empfaenger_mail: michael.grote@posteo.de

View file

@ -0,0 +1,2 @@
- name: testmail
shell: echo "zed ist eingerichtet" | mail -s "{{ ansible_hostname }} - zed" {{ empfaenger_mail }}

View file

@ -0,0 +1,4 @@
---
dependencies:
- role: mgrote.postfix
- role: mgrote.zfs_packages

View file

@ -0,0 +1,9 @@
---
- name: kopiere zed.rc
become: yes
ansible.builtin.template:
owner: root
mode: 0600
src: zed.rc
dest: /etc/zfs/zed.d/zed.rc
notify: testmail