homeserver/host_vars/pve2-test2.grote.lan.yml
2022-04-01 19:03:05 +02:00

69 lines
2.2 KiB
YAML

---
### mgrote.zfs_manage_datasets
# rpool
# zpool create -f -o ashift=12 rpool mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2
# HDD_DATA_RAIDZ
# zpool create -f -o ashift=12 hdd_data_raidz raidz /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi4 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi5
zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: rpool/vm
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/zvol
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/qcow
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: hdd_data_raidz/snapshot_test
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
- name: "hdd_vm_mirror"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote.cv4pve-autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token_pve2-test2', 'password') }}"
cv4pve_vmid: all,-127,-112,-100,-116,-105
cv4pve_keep_snapshots: 5
cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download/v1.10.0/cv4pve-autosnap-linux-x64.zip"
### mgrote.zfs_sanoid
sanoid_datasets:
- path: 'hdd_data_raidz/snapshot_test'
template: '14tage'
recursive: 'yes'
snapshots: true