--- # rpool ist unverschlüsselt als Boot-Medium # der Speicherort fur die VMs ist verschlüsselt # zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm # entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l ## hdd_data ### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2 # mgrote.zfs_manage_datasets ### mgrote_zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_* zfs_datasets: # DatenPools werden hier nicht verwaltet # rpool - System-Datasets - dataset: rpool state: present compression: zstd sync: disabled xattr: sa dnodesize: auto atime: on # noqa yaml[truthy] snapdir: hidden reservation: 1G refreservation: 1G acltype: posix - dataset: rpool/ROOT state: present refreservation: 1G - dataset: rpool/ROOT/pve-1 state: present refreservation: 1G acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen # rpool - VMs - dataset: rpool/vm state: present - dataset: rpool/vm/zvol state: present - dataset: rpool/vm/lxc state: present # hdd_data - dataset: hdd_data state: present compression: zstd sync: disabled xattr: sa dnodesize: auto atime: on # noqa yaml[truthy] snapdir: hidden reservation: 1G - dataset: hdd_data/papa_backup state: present - dataset: hdd_data/pve_backup state: present recordsize: 1M - dataset: hdd_data/videos state: present recordsize: 1M - dataset: hdd_data/music state: present recordsize: 1M - dataset: hdd_data/tmp state: present - dataset: hdd_data/archiv state: present - dataset: hdd_data/bilder state: present recordsize: 1M - dataset: hdd_data/scans state: present - dataset: hdd_data/restic state: present - dataset: hdd_data/backup state: present - dataset: hdd_data/buecher state: present - dataset: hdd_data/programme state: present - dataset: hdd_data/vm state: present zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes zfs_extra_zfs_pools: - name: "rpool" systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals - name: "hdd_data" systemd_timer_schedule: "*-01,04,07,10-01 23:00" ### mgrote_zfs_sanoid sanoid_datasets: - path: 'hdd_data/videos' template: '3tage' recursive: 'yes' snapshots: true - path: 'hdd_data/music' template: '14tage' recursive: 'yes' snapshots: true - path: 'hdd_data/papa_backup' template: '14tage' recursive: 'yes' snapshots: true - path: 'hdd_data/tmp' template: '3tage' recursive: 'yes' snapshots: true - path: 'hdd_data/pve_backup' template: '3tage' recursive: 'yes' snapshots: true - path: 'hdd_data/archiv' template: '14tage' recursive: 'yes' snapshots: true - path: hdd_data/bilder recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - path: hdd_data/scans recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' - path: hdd_data/backup recursive: 'no' # noqa yaml[truthy] snapshots: true template: '31tage' - path: hdd_data/restic recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' - path: hdd_data/programme recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - path: hdd_data/buecher recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - path: hdd_data/vm recursive: 'no' # noqa yaml[truthy] snapshots: true template: 'pve3tage' - path: rpool recursive: 'no' # noqa yaml[truthy] snapshots: true template: 'pve3tage' - path: rpool/ROOT recursive: 'no' # noqa yaml[truthy] snapshots: true template: 'pve3tage' - path: rpool/ROOT/pve-1 recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' ### mgrote_proxmox_bind_mounts pve_bind_mounts: - vmid: 100 mp_nr: 0 mp_path_host: /hdd_data/videos mp_path_guest: /shares_videos - vmid: 100 mp_nr: 2 mp_path_host: /hdd_data/pve_backup mp_path_guest: /shares_pve_backup - vmid: 100 mp_nr: 3 mp_path_host: /hdd_data/papa_backup mp_path_guest: /shares_papa_backup - vmid: 100 mp_nr: 4 mp_path_host: /hdd_data/music mp_path_guest: /shares_music - vmid: 100 mp_nr: 5 mp_path_host: /hdd_data/tmp mp_path_guest: /shares_tmp - vmid: 100 mp_nr: 6 mp_path_host: /hdd_data/archiv mp_path_guest: /shares_archiv - vmid: 100 mp_nr: 7 mp_path_host: /hdd_data/bilder mp_path_guest: /shares_bilder - vmid: 100 mp_nr: 9 mp_path_host: /hdd_data/scans mp_path_guest: /shares_scans - vmid: 100 mp_nr: 10 mp_path_host: /hdd_data/restic mp_path_guest: /shares_restic - vmid: 100 mp_nr: 12 mp_path_host: /hdd_data/backup mp_path_guest: /shares_backup - vmid: 100 mp_nr: 14 mp_path_host: /hdd_data/buecher mp_path_guest: /shares_buecher - vmid: 100 mp_nr: 15 mp_path_host: /hdd_data/programme mp_path_guest: /shares_programme - vmid: 100 mp_nr: 16 mp_path_host: /hdd_data/vm mp_path_guest: /shares_vm # mgrote.pbs_pve_integration pve_pbs_datastore: - name: pbs server: 192.168.2.18 datastore: zfs_backup username: user_pve5-test@pbs password: "{{ lookup('keepass', 'pbs_pve_user-test', 'password') }}" fingerprint: "38:53:f6:1e:99:99:76:78:c4:00:dd:90:1a:89:47:56:97:4e:f3:62:01:d2:2c:76:ba:f8:55:be:f8:05:d1:7a"