rename zfs-pool: hdd_data_raidz --> hdd_data (#622)

Reviewed-on: #622
This commit is contained in:
Michael Grote 2023-11-29 22:24:57 +01:00
parent cde39970eb
commit 32d3b7fde4
8 changed files with 125 additions and 125 deletions

View File

@ -80,10 +80,6 @@ smb_shares:
path: '/shares_archiv' path: '/shares_archiv'
users_ro: '' users_ro: ''
users_rw: 'michaelgrote win10' users_rw: 'michaelgrote win10'
- name: 'hm'
path: '/shares_hm'
users_ro: ''
users_rw: 'michaelgrote win10'
- name: 'musik' - name: 'musik'
path: '/shares_music' path: '/shares_music'
users_ro: 'navidrome kodi ' users_ro: 'navidrome kodi '

View File

@ -80,62 +80,62 @@ sanoid_syncoid_datasets_sync:
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/pve_backup destination_dataset: backup/pve5/pve_backup
source_dataset: hdd_data_raidz/pve_backup source_dataset: hdd_data/pve_backup
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/videos destination_dataset: backup/pve5/videos
source_dataset: hdd_data_raidz/videos source_dataset: hdd_data/videos
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/music destination_dataset: backup/pve5/music
source_dataset: hdd_data_raidz/music source_dataset: hdd_data/music
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/tmp destination_dataset: backup/pve5/tmp
source_dataset: hdd_data_raidz/tmp source_dataset: hdd_data/tmp
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/archiv destination_dataset: backup/pve5/archiv
source_dataset: hdd_data_raidz/archiv source_dataset: hdd_data/archiv
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/bilder destination_dataset: backup/pve5/bilder
source_dataset: hdd_data_raidz/bilder source_dataset: hdd_data/bilder
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/scans destination_dataset: backup/pve5/scans
source_dataset: hdd_data_raidz/scans source_dataset: hdd_data/scans
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/restic destination_dataset: backup/pve5/restic
source_dataset: hdd_data_raidz/restic source_dataset: hdd_data/restic
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/backup destination_dataset: backup/pve5/backup
source_dataset: hdd_data_raidz/backup source_dataset: hdd_data/backup
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/buecher destination_dataset: backup/pve5/buecher
source_dataset: hdd_data_raidz/buecher source_dataset: hdd_data/buecher
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/programme destination_dataset: backup/pve5/programme
source_dataset: hdd_data_raidz/programme source_dataset: hdd_data/programme
- source_host: pve5.mgrote.net - source_host: pve5.mgrote.net
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/vm destination_dataset: backup/pve5/vm
source_dataset: hdd_data_raidz/vm source_dataset: hdd_data/vm
# sanoid # sanoid
sanoid_datasets: sanoid_datasets:

View File

@ -75,62 +75,62 @@ sanoid_syncoid_datasets_sync:
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/pve_backup destination_dataset: backup/pve5/pve_backup
source_dataset: hdd_data_raidz/pve_backup source_dataset: hdd_data/pve_backup
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/videos destination_dataset: backup/pve5/videos
source_dataset: hdd_data_raidz/videos source_dataset: hdd_data/videos
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/music destination_dataset: backup/pve5/music
source_dataset: hdd_data_raidz/music source_dataset: hdd_data/music
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/tmp destination_dataset: backup/pve5/tmp
source_dataset: hdd_data_raidz/tmp source_dataset: hdd_data/tmp
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/archiv destination_dataset: backup/pve5/archiv
source_dataset: hdd_data_raidz/archiv source_dataset: hdd_data/archiv
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/bilder destination_dataset: backup/pve5/bilder
source_dataset: hdd_data_raidz/bilder source_dataset: hdd_data/bilder
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/scans destination_dataset: backup/pve5/scans
source_dataset: hdd_data_raidz/scans source_dataset: hdd_data/scans
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/restic destination_dataset: backup/pve5/restic
source_dataset: hdd_data_raidz/restic source_dataset: hdd_data/restic
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/backup destination_dataset: backup/pve5/backup
source_dataset: hdd_data_raidz/backup source_dataset: hdd_data/backup
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/buecher destination_dataset: backup/pve5/buecher
source_dataset: hdd_data_raidz/buecher source_dataset: hdd_data/buecher
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/programme destination_dataset: backup/pve5/programme
source_dataset: hdd_data_raidz/programme source_dataset: hdd_data/programme
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann - source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup destination_mount_check: backup
destination_dataset: backup/pve5/vm destination_dataset: backup/pve5/vm
source_dataset: hdd_data_raidz/vm source_dataset: hdd_data/vm
# sanoid # sanoid
sanoid_datasets: sanoid_datasets:

View File

@ -3,8 +3,8 @@
# der Speicherort fur die VMs ist verschlüsselt # der Speicherort fur die VMs ist verschlüsselt
# zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm # zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l # entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
## HDD_DATA_RAIDZ ## hdd_data
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data_raidz mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2 ### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2
# mgrote.zfs_manage_datasets # mgrote.zfs_manage_datasets
### mgrote_zfs_extra ### mgrote_zfs_extra
@ -35,8 +35,8 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
state: present state: present
- dataset: rpool/vm/lxc - dataset: rpool/vm/lxc
state: present state: present
# hdd_data_raidz # hdd_data
- dataset: hdd_data_raidz - dataset: hdd_data
state: present state: present
compression: zstd compression: zstd
sync: disabled sync: disabled
@ -45,95 +45,95 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
atime: on # noqa yaml[truthy] atime: on # noqa yaml[truthy]
snapdir: hidden snapdir: hidden
reservation: 1G reservation: 1G
- dataset: hdd_data_raidz/papa_backup - dataset: hdd_data/papa_backup
state: present state: present
- dataset: hdd_data_raidz/pve_backup - dataset: hdd_data/pve_backup
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/videos - dataset: hdd_data/videos
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/music - dataset: hdd_data/music
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/tmp - dataset: hdd_data/tmp
state: present state: present
- dataset: hdd_data_raidz/archiv - dataset: hdd_data/archiv
state: present state: present
- dataset: hdd_data_raidz/bilder - dataset: hdd_data/bilder
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/scans - dataset: hdd_data/scans
state: present state: present
- dataset: hdd_data_raidz/restic - dataset: hdd_data/restic
state: present state: present
- dataset: hdd_data_raidz/backup - dataset: hdd_data/backup
state: present state: present
- dataset: hdd_data_raidz/buecher - dataset: hdd_data/buecher
state: present state: present
- dataset: hdd_data_raidz/programme - dataset: hdd_data/programme
state: present state: present
- dataset: hdd_data_raidz/vm - dataset: hdd_data/vm
state: present state: present
zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes
zfs_extra_zfs_pools: zfs_extra_zfs_pools:
- name: "rpool" - name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
- name: "hdd_data_raidz" - name: "hdd_data"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid ### mgrote_zfs_sanoid
sanoid_datasets: sanoid_datasets:
- path: 'hdd_data_raidz/videos' - path: 'hdd_data/videos'
template: '3tage' template: '3tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/music' - path: 'hdd_data/music'
template: '14tage' template: '14tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/papa_backup' - path: 'hdd_data/papa_backup'
template: '14tage' template: '14tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/tmp' - path: 'hdd_data/tmp'
template: '3tage' template: '3tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/pve_backup' - path: 'hdd_data/pve_backup'
template: '3tage' template: '3tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/archiv' - path: 'hdd_data/archiv'
template: '14tage' template: '14tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: hdd_data_raidz/bilder - path: hdd_data/bilder
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '14tage' template: '14tage'
- path: hdd_data_raidz/scans - path: hdd_data/scans
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '3tage' template: '3tage'
- path: hdd_data_raidz/backup - path: hdd_data/backup
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '31tage' template: '31tage'
- path: hdd_data_raidz/restic - path: hdd_data/restic
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '3tage' template: '3tage'
- path: hdd_data_raidz/programme - path: hdd_data/programme
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '14tage' template: '14tage'
- path: hdd_data_raidz/buecher - path: hdd_data/buecher
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '14tage' template: '14tage'
- path: hdd_data_raidz/vm - path: hdd_data/vm
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: 'pve3tage' template: 'pve3tage'
@ -161,55 +161,55 @@ cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download
pve_bind_mounts: pve_bind_mounts:
- vmid: 100 - vmid: 100
mp_nr: 0 mp_nr: 0
mp_path_host: /hdd_data_raidz/videos mp_path_host: /hdd_data/videos
mp_path_guest: /shares_videos mp_path_guest: /shares_videos
- vmid: 100 - vmid: 100
mp_nr: 2 mp_nr: 2
mp_path_host: /hdd_data_raidz/pve_backup mp_path_host: /hdd_data/pve_backup
mp_path_guest: /shares_pve_backup mp_path_guest: /shares_pve_backup
- vmid: 100 - vmid: 100
mp_nr: 3 mp_nr: 3
mp_path_host: /hdd_data_raidz/papa_backup mp_path_host: /hdd_data/papa_backup
mp_path_guest: /shares_papa_backup mp_path_guest: /shares_papa_backup
- vmid: 100 - vmid: 100
mp_nr: 4 mp_nr: 4
mp_path_host: /hdd_data_raidz/music mp_path_host: /hdd_data/music
mp_path_guest: /shares_music mp_path_guest: /shares_music
- vmid: 100 - vmid: 100
mp_nr: 5 mp_nr: 5
mp_path_host: /hdd_data_raidz/tmp mp_path_host: /hdd_data/tmp
mp_path_guest: /shares_tmp mp_path_guest: /shares_tmp
- vmid: 100 - vmid: 100
mp_nr: 6 mp_nr: 6
mp_path_host: /hdd_data_raidz/archiv mp_path_host: /hdd_data/archiv
mp_path_guest: /shares_archiv mp_path_guest: /shares_archiv
- vmid: 100 - vmid: 100
mp_nr: 7 mp_nr: 7
mp_path_host: /hdd_data_raidz/bilder mp_path_host: /hdd_data/bilder
mp_path_guest: /shares_bilder mp_path_guest: /shares_bilder
- vmid: 100 - vmid: 100
mp_nr: 9 mp_nr: 9
mp_path_host: /hdd_data_raidz/scans mp_path_host: /hdd_data/scans
mp_path_guest: /shares_scans mp_path_guest: /shares_scans
- vmid: 100 - vmid: 100
mp_nr: 10 mp_nr: 10
mp_path_host: /hdd_data_raidz/restic mp_path_host: /hdd_data/restic
mp_path_guest: /shares_restic mp_path_guest: /shares_restic
- vmid: 100 - vmid: 100
mp_nr: 12 mp_nr: 12
mp_path_host: /hdd_data_raidz/backup mp_path_host: /hdd_data/backup
mp_path_guest: /shares_backup mp_path_guest: /shares_backup
- vmid: 100 - vmid: 100
mp_nr: 14 mp_nr: 14
mp_path_host: /hdd_data_raidz/buecher mp_path_host: /hdd_data/buecher
mp_path_guest: /shares_buecher mp_path_guest: /shares_buecher
- vmid: 100 - vmid: 100
mp_nr: 15 mp_nr: 15
mp_path_host: /hdd_data_raidz/programme mp_path_host: /hdd_data/programme
mp_path_guest: /shares_programme mp_path_guest: /shares_programme
- vmid: 100 - vmid: 100
mp_nr: 16 mp_nr: 16
mp_path_host: /hdd_data_raidz/vm mp_path_host: /hdd_data/vm
mp_path_guest: /shares_vm mp_path_guest: /shares_vm
# mgrote.pbs_pve_integration # mgrote.pbs_pve_integration

View File

@ -3,8 +3,8 @@
# der Speicherort fur die VMs ist verschlüsselt # der Speicherort fur die VMs ist verschlüsselt
# zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm # zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l # entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
## HDD_DATA_RAIDZ ## hdd_data
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data_raidz mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A28LFJDH ### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS
# mgrote.zfs_manage_datasets # mgrote.zfs_manage_datasets
### mgrote_zfs_extra ### mgrote_zfs_extra
@ -39,8 +39,8 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
state: present state: present
- dataset: rpool/data - dataset: rpool/data
state: present state: present
# hdd_data_raidz # hdd_data
- dataset: hdd_data_raidz - dataset: hdd_data
state: present state: present
compression: zstd compression: zstd
sync: disabled sync: disabled
@ -49,42 +49,42 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
atime: on # noqa yaml[truthy] atime: on # noqa yaml[truthy]
snapdir: hidden snapdir: hidden
reservation: 1G reservation: 1G
- dataset: hdd_data_raidz/papa_backup - dataset: hdd_data/papa_backup
state: present state: present
- dataset: hdd_data_raidz/pve_backup - dataset: hdd_data/pve_backup
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/videos - dataset: hdd_data/videos
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/music - dataset: hdd_data/music
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/tmp - dataset: hdd_data/tmp
state: present state: present
- dataset: hdd_data_raidz/archiv - dataset: hdd_data/archiv
state: present state: present
- dataset: hdd_data_raidz/bilder - dataset: hdd_data/bilder
state: present state: present
recordsize: 1M recordsize: 1M
- dataset: hdd_data_raidz/scans - dataset: hdd_data/scans
state: present state: present
- dataset: hdd_data_raidz/restic - dataset: hdd_data/restic
state: present state: present
- dataset: hdd_data_raidz/backup - dataset: hdd_data/backup
state: present state: present
- dataset: hdd_data_raidz/buecher - dataset: hdd_data/buecher
state: present state: present
- dataset: hdd_data_raidz/programme - dataset: hdd_data/programme
state: present state: present
- dataset: hdd_data_raidz/vm - dataset: hdd_data/vm
state: present state: present
zfs_extra_arc_max_size: "8589934592" # 8GB in Bytes zfs_extra_arc_max_size: "8589934592" # 8GB in Bytes
zfs_extra_zfs_pools: zfs_extra_zfs_pools:
- name: "rpool" - name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
- name: "hdd_data_raidz" - name: "hdd_data"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid ### mgrote_zfs_sanoid
@ -93,56 +93,56 @@ sanoid_snaps_enable: true
sanoid_syncoid_source_host: true sanoid_syncoid_source_host: true
sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}" sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}"
sanoid_datasets: sanoid_datasets:
### hdd_data_raidz ### hdd_data
- path: 'hdd_data_raidz/videos' - path: 'hdd_data/videos'
template: '3tage' template: '3tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/music' - path: 'hdd_data/music'
template: '14tage' template: '14tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/papa_backup' - path: 'hdd_data/papa_backup'
template: '14tage' template: '14tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/tmp' - path: 'hdd_data/tmp'
template: '3tage' template: '3tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/pve_backup' - path: 'hdd_data/pve_backup'
template: '3tage' template: '3tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: 'hdd_data_raidz/archiv' - path: 'hdd_data/archiv'
template: '14tage' template: '14tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
- path: hdd_data_raidz/bilder - path: hdd_data/bilder
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '14tage' template: '14tage'
- path: hdd_data_raidz/scans - path: hdd_data/scans
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '3tage' template: '3tage'
- path: hdd_data_raidz/backup - path: hdd_data/backup
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '31tage' template: '31tage'
- path: hdd_data_raidz/restic - path: hdd_data/restic
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '3tage' template: '3tage'
- path: hdd_data_raidz/programme - path: hdd_data/programme
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '14tage' template: '14tage'
- path: hdd_data_raidz/buecher - path: hdd_data/buecher
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '14tage' template: '14tage'
- path: hdd_data_raidz/vm - path: hdd_data/vm
recursive: 'no' # noqa yaml[truthy] recursive: 'no' # noqa yaml[truthy]
snapshots: true snapshots: true
template: '3tage' template: '3tage'
@ -172,55 +172,55 @@ pve_bind_mounts:
### fileserver3 ### fileserver3
- vmid: 115 - vmid: 115
mp_nr: 0 mp_nr: 0
mp_path_host: /hdd_data_raidz/videos mp_path_host: /hdd_data/videos
mp_path_guest: /shares_videos mp_path_guest: /shares_videos
- vmid: 115 - vmid: 115
mp_nr: 2 mp_nr: 2
mp_path_host: /hdd_data_raidz/pve_backup mp_path_host: /hdd_data/pve_backup
mp_path_guest: /shares_pve_backup mp_path_guest: /shares_pve_backup
- vmid: 115 - vmid: 115
mp_nr: 3 mp_nr: 3
mp_path_host: /hdd_data_raidz/papa_backup mp_path_host: /hdd_data/papa_backup
mp_path_guest: /shares_papa_backup mp_path_guest: /shares_papa_backup
- vmid: 115 - vmid: 115
mp_nr: 4 mp_nr: 4
mp_path_host: /hdd_data_raidz/music mp_path_host: /hdd_data/music
mp_path_guest: /shares_music mp_path_guest: /shares_music
- vmid: 115 - vmid: 115
mp_nr: 5 mp_nr: 5
mp_path_host: /hdd_data_raidz/tmp mp_path_host: /hdd_data/tmp
mp_path_guest: /shares_tmp mp_path_guest: /shares_tmp
- vmid: 115 - vmid: 115
mp_nr: 6 mp_nr: 6
mp_path_host: /hdd_data_raidz/archiv mp_path_host: /hdd_data/archiv
mp_path_guest: /shares_archiv mp_path_guest: /shares_archiv
- vmid: 115 - vmid: 115
mp_nr: 7 mp_nr: 7
mp_path_host: /hdd_data_raidz/bilder mp_path_host: /hdd_data/bilder
mp_path_guest: /shares_bilder mp_path_guest: /shares_bilder
- vmid: 115 - vmid: 115
mp_nr: 9 mp_nr: 9
mp_path_host: /hdd_data_raidz/scans mp_path_host: /hdd_data/scans
mp_path_guest: /shares_scans mp_path_guest: /shares_scans
- vmid: 115 - vmid: 115
mp_nr: 10 mp_nr: 10
mp_path_host: /hdd_data_raidz/restic mp_path_host: /hdd_data/restic
mp_path_guest: /shares_restic mp_path_guest: /shares_restic
- vmid: 115 - vmid: 115
mp_nr: 12 mp_nr: 12
mp_path_host: /hdd_data_raidz/backup mp_path_host: /hdd_data/backup
mp_path_guest: /shares_backup mp_path_guest: /shares_backup
- vmid: 115 - vmid: 115
mp_nr: 14 mp_nr: 14
mp_path_host: /hdd_data_raidz/buecher mp_path_host: /hdd_data/buecher
mp_path_guest: /shares_buecher mp_path_guest: /shares_buecher
- vmid: 115 - vmid: 115
mp_nr: 15 mp_nr: 15
mp_path_host: /hdd_data_raidz/programme mp_path_host: /hdd_data/programme
mp_path_guest: /shares_programme mp_path_guest: /shares_programme
- vmid: 115 - vmid: 115
mp_nr: 16 mp_nr: 16
mp_path_host: /hdd_data_raidz/vm mp_path_host: /hdd_data/vm
mp_path_guest: /shares_vm mp_path_guest: /shares_vm
# mgrote.pbs_pve_integration # mgrote.pbs_pve_integration

View File

@ -18,6 +18,10 @@
become: true become: true
ansible.builtin.command: "pct set {{ item.vmid }} -mp{{ item.mp_nr }} {{ item.mp_path_host }},mp={{ item.mp_path_guest }}" ansible.builtin.command: "pct set {{ item.vmid }} -mp{{ item.mp_nr }} {{ item.mp_path_host }},mp={{ item.mp_path_guest }}"
register: restart register: restart
changed_when:
- restart.rc == 25
failed_when:
- restart.rc != 25
notify: restart lxc notify: restart lxc
# füge bind-mount hinzu falls er fehlt, also rc ungleich 0 # füge bind-mount hinzu falls er fehlt, also rc ungleich 0
# pro bind-mount # pro bind-mount
@ -29,7 +33,7 @@
# schreibe vm id in die Liste "reboot" # schreibe vm id in die Liste "reboot"
- name: set reboot list # noqa no-handler var-naming[pattern] - name: set reboot list # noqa no-handler var-naming[pattern]
ansible.builtin.set_fact: ansible.builtin.set_fact:
ansible.builtin.reboot: reboot:
- "{{ item.vmid }}" - "{{ item.vmid }}"
when: restart.changed when: restart.changed

View File

@ -35,7 +35,7 @@ Es gibt 3 Funktionen:
--- ---
sanoid_snaps_enable: true sanoid_snaps_enable: true
sanoid_datasets: sanoid_datasets:
- path: 'hdd_data_raidz/videos' - path: 'hdd_data/videos'
template: '31tage' template: '31tage'
recursive: 'yes' recursive: 'yes'
snapshots: true snapshots: true
@ -76,8 +76,8 @@ Es gibt 3 Funktionen:
sanoid_syncoid_datasets_sync: sanoid_syncoid_datasets_sync:
- source_host: host1.lan - source_host: host1.lan
source_dataset: hdd_data_mirror source_dataset: hdd_data_mirror
destination_mount_check: hdd_data_raidz/encrypted # Wenn dieses Dataset nicht gemountet ist(z.B. durch Verschlüsselung, dann bricht syncoid ab) destination_mount_check: hdd_data/encrypted # Wenn dieses Dataset nicht gemountet ist(z.B. durch Verschlüsselung, dann bricht syncoid ab)
destination_dataset: hdd_data_raidz/encrypted/syncoid/zfs1 destination_dataset: hdd_data/encrypted/syncoid/zfs1
skip_parent: false skip_parent: false
sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}" sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
sanoid_syncoid_destination_host: true sanoid_syncoid_destination_host: true

View File

@ -8,11 +8,11 @@ sanoid_deb_url: http://docker10.mgrote.net:3344/sanoid_3.0.0.deb
# ### "Default" Datasets # ### "Default" Datasets
# sanoid_datasets: # dictionary # sanoid_datasets: # dictionary
# - path: 'hdd_data_raidz/data' # path to dataset; without leading / # - path: 'hdd_data/data' # path to dataset; without leading /
# template: 'fiveminutes' # name # template: 'fiveminutes' # name
# recursive: 'no' # recursive snapshotting # recursive: 'no' # recursive snapshotting
# snapshots: true # (de)activate; can be used to disable snapshotting of subdatasets if recursive is set # snapshots: true # (de)activate; can be used to disable snapshotting of subdatasets if recursive is set
# - path: 'hdd_data_raidz/test' # - path: 'hdd_data/test'
# snapshots: false # deaktiviert sanoid für das dataset # snapshots: false # deaktiviert sanoid für das dataset
# #
# ### Templates # ### Templates
@ -47,6 +47,6 @@ sanoid_user_group: sanoid
### mgrote_sanoid ### mgrote_sanoid
#sanoid_syncoid_datasets_sync: #sanoid_syncoid_datasets_sync:
# - source_host: pve5.mgrote.net # - source_host: pve5.mgrote.net
# source_dataset: hdd_data_raidz/tmp # source_dataset: hdd_data/tmp
# destination_mount_check: hdd_data_raidz/tmp # zielpool # destination_mount_check: hdd_data/tmp # zielpool
# destination_dataset: backup/pve5/tmp # destination_dataset: backup/pve5/tmp