diff --git a/host_vars/fileserver3.mgrote.net.yml b/host_vars/fileserver3.mgrote.net.yml index 03460db7..420ee758 100644 --- a/host_vars/fileserver3.mgrote.net.yml +++ b/host_vars/fileserver3.mgrote.net.yml @@ -80,10 +80,6 @@ smb_shares: path: '/shares_archiv' users_ro: '' users_rw: 'michaelgrote win10' - - name: 'hm' - path: '/shares_hm' - users_ro: '' - users_rw: 'michaelgrote win10' - name: 'musik' path: '/shares_music' users_ro: 'navidrome kodi ' diff --git a/host_vars/pbs-test.mgrote.net.yml b/host_vars/pbs-test.mgrote.net.yml index fcdea01c..9da6c3c7 100644 --- a/host_vars/pbs-test.mgrote.net.yml +++ b/host_vars/pbs-test.mgrote.net.yml @@ -80,62 +80,62 @@ sanoid_syncoid_datasets_sync: - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/pve_backup - source_dataset: hdd_data_raidz/pve_backup + source_dataset: hdd_data/pve_backup - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/videos - source_dataset: hdd_data_raidz/videos + source_dataset: hdd_data/videos - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/music - source_dataset: hdd_data_raidz/music + source_dataset: hdd_data/music - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/tmp - source_dataset: hdd_data_raidz/tmp + source_dataset: hdd_data/tmp - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/archiv - source_dataset: hdd_data_raidz/archiv + source_dataset: hdd_data/archiv - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/bilder - source_dataset: hdd_data_raidz/bilder + source_dataset: hdd_data/bilder - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/scans - source_dataset: hdd_data_raidz/scans + source_dataset: hdd_data/scans - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/restic - source_dataset: hdd_data_raidz/restic + source_dataset: hdd_data/restic - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/backup - source_dataset: hdd_data_raidz/backup + source_dataset: hdd_data/backup - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/buecher - source_dataset: hdd_data_raidz/buecher + source_dataset: hdd_data/buecher - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/programme - source_dataset: hdd_data_raidz/programme + source_dataset: hdd_data/programme - source_host: pve5.mgrote.net destination_mount_check: backup destination_dataset: backup/pve5/vm - source_dataset: hdd_data_raidz/vm + source_dataset: hdd_data/vm # sanoid sanoid_datasets: diff --git a/host_vars/pbs.mgrote.net.yml b/host_vars/pbs.mgrote.net.yml index 31fc5106..6588a8c3 100644 --- a/host_vars/pbs.mgrote.net.yml +++ b/host_vars/pbs.mgrote.net.yml @@ -75,62 +75,62 @@ sanoid_syncoid_datasets_sync: - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/pve_backup - source_dataset: hdd_data_raidz/pve_backup + source_dataset: hdd_data/pve_backup - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/videos - source_dataset: hdd_data_raidz/videos + source_dataset: hdd_data/videos - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/music - source_dataset: hdd_data_raidz/music + source_dataset: hdd_data/music - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/tmp - source_dataset: hdd_data_raidz/tmp + source_dataset: hdd_data/tmp - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/archiv - source_dataset: hdd_data_raidz/archiv + source_dataset: hdd_data/archiv - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/bilder - source_dataset: hdd_data_raidz/bilder + source_dataset: hdd_data/bilder - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/scans - source_dataset: hdd_data_raidz/scans + source_dataset: hdd_data/scans - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/restic - source_dataset: hdd_data_raidz/restic + source_dataset: hdd_data/restic - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/backup - source_dataset: hdd_data_raidz/backup + source_dataset: hdd_data/backup - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/buecher - source_dataset: hdd_data_raidz/buecher + source_dataset: hdd_data/buecher - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/programme - source_dataset: hdd_data_raidz/programme + source_dataset: hdd_data/programme - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann destination_mount_check: backup destination_dataset: backup/pve5/vm - source_dataset: hdd_data_raidz/vm + source_dataset: hdd_data/vm # sanoid sanoid_datasets: diff --git a/host_vars/pve5-test.mgrote.net.yml b/host_vars/pve5-test.mgrote.net.yml index da173aa8..59b510a7 100644 --- a/host_vars/pve5-test.mgrote.net.yml +++ b/host_vars/pve5-test.mgrote.net.yml @@ -3,8 +3,8 @@ # der Speicherort fur die VMs ist verschlüsselt # zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm # entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l -## HDD_DATA_RAIDZ -### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data_raidz mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2 +## hdd_data +### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2 # mgrote.zfs_manage_datasets ### mgrote_zfs_extra @@ -35,8 +35,8 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet state: present - dataset: rpool/vm/lxc state: present - # hdd_data_raidz - - dataset: hdd_data_raidz + # hdd_data + - dataset: hdd_data state: present compression: zstd sync: disabled @@ -45,95 +45,95 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet atime: on # noqa yaml[truthy] snapdir: hidden reservation: 1G - - dataset: hdd_data_raidz/papa_backup + - dataset: hdd_data/papa_backup state: present - - dataset: hdd_data_raidz/pve_backup + - dataset: hdd_data/pve_backup state: present recordsize: 1M - - dataset: hdd_data_raidz/videos + - dataset: hdd_data/videos state: present recordsize: 1M - - dataset: hdd_data_raidz/music + - dataset: hdd_data/music state: present recordsize: 1M - - dataset: hdd_data_raidz/tmp + - dataset: hdd_data/tmp state: present - - dataset: hdd_data_raidz/archiv + - dataset: hdd_data/archiv state: present - - dataset: hdd_data_raidz/bilder + - dataset: hdd_data/bilder state: present recordsize: 1M - - dataset: hdd_data_raidz/scans + - dataset: hdd_data/scans state: present - - dataset: hdd_data_raidz/restic + - dataset: hdd_data/restic state: present - - dataset: hdd_data_raidz/backup + - dataset: hdd_data/backup state: present - - dataset: hdd_data_raidz/buecher + - dataset: hdd_data/buecher state: present - - dataset: hdd_data_raidz/programme + - dataset: hdd_data/programme state: present - - dataset: hdd_data_raidz/vm + - dataset: hdd_data/vm state: present zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes zfs_extra_zfs_pools: - name: "rpool" systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals - - name: "hdd_data_raidz" + - name: "hdd_data" systemd_timer_schedule: "*-01,04,07,10-01 23:00" ### mgrote_zfs_sanoid sanoid_datasets: - - path: 'hdd_data_raidz/videos' + - path: 'hdd_data/videos' template: '3tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/music' + - path: 'hdd_data/music' template: '14tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/papa_backup' + - path: 'hdd_data/papa_backup' template: '14tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/tmp' + - path: 'hdd_data/tmp' template: '3tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/pve_backup' + - path: 'hdd_data/pve_backup' template: '3tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/archiv' + - path: 'hdd_data/archiv' template: '14tage' recursive: 'yes' snapshots: true - - path: hdd_data_raidz/bilder + - path: hdd_data/bilder recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - - path: hdd_data_raidz/scans + - path: hdd_data/scans recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' - - path: hdd_data_raidz/backup + - path: hdd_data/backup recursive: 'no' # noqa yaml[truthy] snapshots: true template: '31tage' - - path: hdd_data_raidz/restic + - path: hdd_data/restic recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' - - path: hdd_data_raidz/programme + - path: hdd_data/programme recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - - path: hdd_data_raidz/buecher + - path: hdd_data/buecher recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - - path: hdd_data_raidz/vm + - path: hdd_data/vm recursive: 'no' # noqa yaml[truthy] snapshots: true template: 'pve3tage' @@ -161,55 +161,55 @@ cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download pve_bind_mounts: - vmid: 100 mp_nr: 0 - mp_path_host: /hdd_data_raidz/videos + mp_path_host: /hdd_data/videos mp_path_guest: /shares_videos - vmid: 100 mp_nr: 2 - mp_path_host: /hdd_data_raidz/pve_backup + mp_path_host: /hdd_data/pve_backup mp_path_guest: /shares_pve_backup - vmid: 100 mp_nr: 3 - mp_path_host: /hdd_data_raidz/papa_backup + mp_path_host: /hdd_data/papa_backup mp_path_guest: /shares_papa_backup - vmid: 100 mp_nr: 4 - mp_path_host: /hdd_data_raidz/music + mp_path_host: /hdd_data/music mp_path_guest: /shares_music - vmid: 100 mp_nr: 5 - mp_path_host: /hdd_data_raidz/tmp + mp_path_host: /hdd_data/tmp mp_path_guest: /shares_tmp - vmid: 100 mp_nr: 6 - mp_path_host: /hdd_data_raidz/archiv + mp_path_host: /hdd_data/archiv mp_path_guest: /shares_archiv - vmid: 100 mp_nr: 7 - mp_path_host: /hdd_data_raidz/bilder + mp_path_host: /hdd_data/bilder mp_path_guest: /shares_bilder - vmid: 100 mp_nr: 9 - mp_path_host: /hdd_data_raidz/scans + mp_path_host: /hdd_data/scans mp_path_guest: /shares_scans - vmid: 100 mp_nr: 10 - mp_path_host: /hdd_data_raidz/restic + mp_path_host: /hdd_data/restic mp_path_guest: /shares_restic - vmid: 100 mp_nr: 12 - mp_path_host: /hdd_data_raidz/backup + mp_path_host: /hdd_data/backup mp_path_guest: /shares_backup - vmid: 100 mp_nr: 14 - mp_path_host: /hdd_data_raidz/buecher + mp_path_host: /hdd_data/buecher mp_path_guest: /shares_buecher - vmid: 100 mp_nr: 15 - mp_path_host: /hdd_data_raidz/programme + mp_path_host: /hdd_data/programme mp_path_guest: /shares_programme - vmid: 100 mp_nr: 16 - mp_path_host: /hdd_data_raidz/vm + mp_path_host: /hdd_data/vm mp_path_guest: /shares_vm # mgrote.pbs_pve_integration diff --git a/host_vars/pve5.mgrote.net.yml b/host_vars/pve5.mgrote.net.yml index 5f28bc1b..15ec18df 100644 --- a/host_vars/pve5.mgrote.net.yml +++ b/host_vars/pve5.mgrote.net.yml @@ -3,8 +3,8 @@ # der Speicherort fur die VMs ist verschlüsselt # zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm # entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l -## HDD_DATA_RAIDZ -### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data_raidz mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A28LFJDH +## hdd_data +### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS # mgrote.zfs_manage_datasets ### mgrote_zfs_extra @@ -39,8 +39,8 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet state: present - dataset: rpool/data state: present - # hdd_data_raidz - - dataset: hdd_data_raidz + # hdd_data + - dataset: hdd_data state: present compression: zstd sync: disabled @@ -49,42 +49,42 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet atime: on # noqa yaml[truthy] snapdir: hidden reservation: 1G - - dataset: hdd_data_raidz/papa_backup + - dataset: hdd_data/papa_backup state: present - - dataset: hdd_data_raidz/pve_backup + - dataset: hdd_data/pve_backup state: present recordsize: 1M - - dataset: hdd_data_raidz/videos + - dataset: hdd_data/videos state: present recordsize: 1M - - dataset: hdd_data_raidz/music + - dataset: hdd_data/music state: present recordsize: 1M - - dataset: hdd_data_raidz/tmp + - dataset: hdd_data/tmp state: present - - dataset: hdd_data_raidz/archiv + - dataset: hdd_data/archiv state: present - - dataset: hdd_data_raidz/bilder + - dataset: hdd_data/bilder state: present recordsize: 1M - - dataset: hdd_data_raidz/scans + - dataset: hdd_data/scans state: present - - dataset: hdd_data_raidz/restic + - dataset: hdd_data/restic state: present - - dataset: hdd_data_raidz/backup + - dataset: hdd_data/backup state: present - - dataset: hdd_data_raidz/buecher + - dataset: hdd_data/buecher state: present - - dataset: hdd_data_raidz/programme + - dataset: hdd_data/programme state: present - - dataset: hdd_data_raidz/vm + - dataset: hdd_data/vm state: present zfs_extra_arc_max_size: "8589934592" # 8GB in Bytes zfs_extra_zfs_pools: - name: "rpool" systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals - - name: "hdd_data_raidz" + - name: "hdd_data" systemd_timer_schedule: "*-01,04,07,10-01 23:00" ### mgrote_zfs_sanoid @@ -93,56 +93,56 @@ sanoid_snaps_enable: true sanoid_syncoid_source_host: true sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}" sanoid_datasets: - ### hdd_data_raidz - - path: 'hdd_data_raidz/videos' + ### hdd_data + - path: 'hdd_data/videos' template: '3tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/music' + - path: 'hdd_data/music' template: '14tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/papa_backup' + - path: 'hdd_data/papa_backup' template: '14tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/tmp' + - path: 'hdd_data/tmp' template: '3tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/pve_backup' + - path: 'hdd_data/pve_backup' template: '3tage' recursive: 'yes' snapshots: true - - path: 'hdd_data_raidz/archiv' + - path: 'hdd_data/archiv' template: '14tage' recursive: 'yes' snapshots: true - - path: hdd_data_raidz/bilder + - path: hdd_data/bilder recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - - path: hdd_data_raidz/scans + - path: hdd_data/scans recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' - - path: hdd_data_raidz/backup + - path: hdd_data/backup recursive: 'no' # noqa yaml[truthy] snapshots: true template: '31tage' - - path: hdd_data_raidz/restic + - path: hdd_data/restic recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' - - path: hdd_data_raidz/programme + - path: hdd_data/programme recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - - path: hdd_data_raidz/buecher + - path: hdd_data/buecher recursive: 'no' # noqa yaml[truthy] snapshots: true template: '14tage' - - path: hdd_data_raidz/vm + - path: hdd_data/vm recursive: 'no' # noqa yaml[truthy] snapshots: true template: '3tage' @@ -172,55 +172,55 @@ pve_bind_mounts: ### fileserver3 - vmid: 115 mp_nr: 0 - mp_path_host: /hdd_data_raidz/videos + mp_path_host: /hdd_data/videos mp_path_guest: /shares_videos - vmid: 115 mp_nr: 2 - mp_path_host: /hdd_data_raidz/pve_backup + mp_path_host: /hdd_data/pve_backup mp_path_guest: /shares_pve_backup - vmid: 115 mp_nr: 3 - mp_path_host: /hdd_data_raidz/papa_backup + mp_path_host: /hdd_data/papa_backup mp_path_guest: /shares_papa_backup - vmid: 115 mp_nr: 4 - mp_path_host: /hdd_data_raidz/music + mp_path_host: /hdd_data/music mp_path_guest: /shares_music - vmid: 115 mp_nr: 5 - mp_path_host: /hdd_data_raidz/tmp + mp_path_host: /hdd_data/tmp mp_path_guest: /shares_tmp - vmid: 115 mp_nr: 6 - mp_path_host: /hdd_data_raidz/archiv + mp_path_host: /hdd_data/archiv mp_path_guest: /shares_archiv - vmid: 115 mp_nr: 7 - mp_path_host: /hdd_data_raidz/bilder + mp_path_host: /hdd_data/bilder mp_path_guest: /shares_bilder - vmid: 115 mp_nr: 9 - mp_path_host: /hdd_data_raidz/scans + mp_path_host: /hdd_data/scans mp_path_guest: /shares_scans - vmid: 115 mp_nr: 10 - mp_path_host: /hdd_data_raidz/restic + mp_path_host: /hdd_data/restic mp_path_guest: /shares_restic - vmid: 115 mp_nr: 12 - mp_path_host: /hdd_data_raidz/backup + mp_path_host: /hdd_data/backup mp_path_guest: /shares_backup - vmid: 115 mp_nr: 14 - mp_path_host: /hdd_data_raidz/buecher + mp_path_host: /hdd_data/buecher mp_path_guest: /shares_buecher - vmid: 115 mp_nr: 15 - mp_path_host: /hdd_data_raidz/programme + mp_path_host: /hdd_data/programme mp_path_guest: /shares_programme - vmid: 115 mp_nr: 16 - mp_path_host: /hdd_data_raidz/vm + mp_path_host: /hdd_data/vm mp_path_guest: /shares_vm # mgrote.pbs_pve_integration diff --git a/roles/mgrote_proxmox_bind_mounts/tasks/bm.yml b/roles/mgrote_proxmox_bind_mounts/tasks/bm.yml index 777c14de..9ecee9dd 100644 --- a/roles/mgrote_proxmox_bind_mounts/tasks/bm.yml +++ b/roles/mgrote_proxmox_bind_mounts/tasks/bm.yml @@ -18,6 +18,10 @@ become: true ansible.builtin.command: "pct set {{ item.vmid }} -mp{{ item.mp_nr }} {{ item.mp_path_host }},mp={{ item.mp_path_guest }}" register: restart + changed_when: + - restart.rc == 25 + failed_when: + - restart.rc != 25 notify: restart lxc # füge bind-mount hinzu falls er fehlt, also rc ungleich 0 # pro bind-mount @@ -29,7 +33,7 @@ # schreibe vm id in die Liste "reboot" - name: set reboot list # noqa no-handler var-naming[pattern] ansible.builtin.set_fact: - ansible.builtin.reboot: + reboot: - "{{ item.vmid }}" when: restart.changed diff --git a/roles/mgrote_zfs_sanoid/README.md b/roles/mgrote_zfs_sanoid/README.md index 70b9f6cd..58fa7317 100644 --- a/roles/mgrote_zfs_sanoid/README.md +++ b/roles/mgrote_zfs_sanoid/README.md @@ -35,7 +35,7 @@ Es gibt 3 Funktionen: --- sanoid_snaps_enable: true sanoid_datasets: - - path: 'hdd_data_raidz/videos' + - path: 'hdd_data/videos' template: '31tage' recursive: 'yes' snapshots: true @@ -76,8 +76,8 @@ Es gibt 3 Funktionen: sanoid_syncoid_datasets_sync: - source_host: host1.lan source_dataset: hdd_data_mirror - destination_mount_check: hdd_data_raidz/encrypted # Wenn dieses Dataset nicht gemountet ist(z.B. durch Verschlüsselung, dann bricht syncoid ab) - destination_dataset: hdd_data_raidz/encrypted/syncoid/zfs1 + destination_mount_check: hdd_data/encrypted # Wenn dieses Dataset nicht gemountet ist(z.B. durch Verschlüsselung, dann bricht syncoid ab) + destination_dataset: hdd_data/encrypted/syncoid/zfs1 skip_parent: false sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}" sanoid_syncoid_destination_host: true diff --git a/roles/mgrote_zfs_sanoid/defaults/main.yml b/roles/mgrote_zfs_sanoid/defaults/main.yml index 1cf026c5..99ddd921 100644 --- a/roles/mgrote_zfs_sanoid/defaults/main.yml +++ b/roles/mgrote_zfs_sanoid/defaults/main.yml @@ -8,11 +8,11 @@ sanoid_deb_url: http://docker10.mgrote.net:3344/sanoid_3.0.0.deb # ### "Default" Datasets # sanoid_datasets: # dictionary -# - path: 'hdd_data_raidz/data' # path to dataset; without leading / +# - path: 'hdd_data/data' # path to dataset; without leading / # template: 'fiveminutes' # name # recursive: 'no' # recursive snapshotting # snapshots: true # (de)activate; can be used to disable snapshotting of subdatasets if recursive is set -# - path: 'hdd_data_raidz/test' +# - path: 'hdd_data/test' # snapshots: false # deaktiviert sanoid für das dataset # # ### Templates @@ -47,6 +47,6 @@ sanoid_user_group: sanoid ### mgrote_sanoid #sanoid_syncoid_datasets_sync: # - source_host: pve5.mgrote.net -# source_dataset: hdd_data_raidz/tmp -# destination_mount_check: hdd_data_raidz/tmp # zielpool +# source_dataset: hdd_data/tmp +# destination_mount_check: hdd_data/tmp # zielpool # destination_dataset: backup/pve5/tmp