homeserver/host_vars/pve2-test2.grote.lan.yml

56 lines
1.7 KiB
YAML
Raw Normal View History

2021-02-07 12:46:03 +01:00
---
### mgrote.zfs_manage_datasets
2021-02-25 07:43:04 +01:00
# rpool
# zpool create -f -o ashift=12 rpool mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2
2021-02-07 12:46:03 +01:00
# HDD_DATA_RAIDZ
# zpool create -f -o ashift=12 hdd_data_raidz raidz /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi4 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi5
2021-02-18 08:36:43 +01:00
zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: rpool/vm
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
2021-02-12 22:26:26 +01:00
- dataset: rpool/vm/dir
2021-02-07 12:46:03 +01:00
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
2021-02-12 22:26:26 +01:00
- dataset: rpool/vm/zvol
2021-02-07 12:46:03 +01:00
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
2021-02-12 22:26:26 +01:00
- dataset: rpool/vm/qcow
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
2021-02-13 16:18:29 +01:00
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
cron_minutes_zfs_scrub: "5"
2021-02-12 10:32:22 +01:00
cron_hour_zfs_scrub: "23"
cron_day_of_month_zfs_scrub: "14"
cron_day_of_week_zfs_scrub: "*"
cron_month_zfs_scrub: "*/2"
2021-02-12 10:32:22 +01:00
- name: "hdd_vm_mirror"
cron_minutes_zfs_scrub: "10"
cron_hour_zfs_scrub: "23"
cron_day_of_month_zfs_scrub: "14"
cron_day_of_week_zfs_scrub: "*"
cron_month_zfs_scrub: "*/2"