homeserver/host_vars/pve5.mgrote.net.yml
Michael Grote c6f4fc1f8c
Some checks failed
ansible-lint / gitleaks (pull_request) Successful in 4s
ansible-lint / Ansible Lint (pull_request) Failing after 27s
dfghf
2024-10-29 21:03:18 +01:00

257 lines
7.6 KiB
YAML

# noqa yaml[truthy]---
# rpool ist unverschlüsselt als Boot-Medium
# der Speicherort fur die VMs ist verschlüsselt
# zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
## hdd_data
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS
## hdd_data "neu"
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS
# mgrote.zfs_manage_datasets
### mgrote_zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
# rpool - System-Datasets
- dataset: rpool
state: present
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on # noqa yaml[truthy]
snapdir: hidden
reservation: 1G
refreservation: 10G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 10G
- dataset: rpool/ROOT/pve-1
state: present
refreservation: 10G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# rpool - Data
- dataset: rpool/data
state: present
# rpool - VMs
- dataset: rpool/vm
state: present
- dataset: rpool/vm/zvol
state: present
- dataset: rpool/vm/lxc
state: present
- dataset: rpool/data
state: present
# hdd_data
- dataset: hdd_data
state: present
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on # noqa yaml[truthy]
snapdir: hidden
reservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
- dataset: hdd_data/papa_backup
state: present
- dataset: hdd_data/pve_backup
state: present
recordsize: 1M
- dataset: hdd_data/videos
state: present
recordsize: 1M
- dataset: hdd_data/music
state: present
recordsize: 1M
- dataset: hdd_data/tmp
state: present
- dataset: hdd_data/archiv
state: present
- dataset: hdd_data/bilder
state: present
recordsize: 1M
- dataset: hdd_data/scans
state: present
- dataset: hdd_data/restic
state: present
- dataset: hdd_data/backup
state: present
- dataset: hdd_data/buecher
state: present
- dataset: hdd_data/programme
state: present
- dataset: hdd_data/vm
state: present
zfs_extra_arc_max_size: "8589934592" # 8GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
- name: "hdd_data"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid
sanoid_snaps_enable: true
sanoid_datasets:
### hdd_data
- path: 'hdd_data/videos'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/music'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/papa_backup'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/tmp'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/pve_backup'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/archiv'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: hdd_data/bilder
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/scans
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/backup
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '31tage'
- path: hdd_data/restic
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/programme
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/buecher
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
### rpool
- path: rpool
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: 'pve14tage'
- path: rpool/ROOT
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: 'pve14tage'
- path: rpool/ROOT/pve-1
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: 'pve14tage'
### mgrote_proxmox_bind_mounts
pve_bind_mounts:
### fileserver3
- vmid: 107
mp_nr: 0
mp_path_host: /hdd_data/videos
mp_path_guest: /shares_videos
- vmid: 107
mp_nr: 2
mp_path_host: /hdd_data/pve_backup
mp_path_guest: /shares_pve_backup
- vmid: 107
mp_nr: 3
mp_path_host: /hdd_data/papa_backup
mp_path_guest: /shares_papa_backup
- vmid: 107
mp_nr: 4
mp_path_host: /hdd_data/music
mp_path_guest: /shares_music
- vmid: 107
mp_nr: 5
mp_path_host: /hdd_data/tmp
mp_path_guest: /shares_tmp
- vmid: 107
mp_nr: 6
mp_path_host: /hdd_data/archiv
mp_path_guest: /shares_archiv
- vmid: 107
mp_nr: 7
mp_path_host: /hdd_data/bilder
mp_path_guest: /shares_bilder
- vmid: 107
mp_nr: 9
mp_path_host: /hdd_data/scans
mp_path_guest: /shares_scans
- vmid: 107
mp_nr: 10
mp_path_host: /hdd_data/restic
mp_path_guest: /shares_restic
- vmid: 107
mp_nr: 12
mp_path_host: /hdd_data/backup
mp_path_guest: /shares_backup
- vmid: 107
mp_nr: 14
mp_path_host: /hdd_data/buecher
mp_path_guest: /shares_buecher
- vmid: 107
mp_nr: 15
mp_path_host: /hdd_data/programme
mp_path_guest: /shares_programme
- vmid: 107
mp_nr: 16
mp_path_host: /hdd_data/vm
mp_path_guest: /shares_vm
# mgrote.pbs_pve_integration
pve_pbs_datastore:
- name: pbs
server: pbs.mgrote.net
datastore: zfs_backup
username: user_pve5@pbs
password: "{{ lookup('viczem.keepass.keepass', 'pbs_pve_user', 'password') }}"
fingerprint: "7F:AC:54:75:1C:33:55:84:1E:1E:3A:15:5A:5E:AF:79:33:C9:D4:E1:C0:A0:1C:0D:9E:6A:EA:82:F9:27:57:79"
### mgrote_sync
rsync_host_role: source
rsync_mirror_dirs: # bei Src+Dest KEIN "/" am Ende
- src: /hdd_data/tmp
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/tmp"
- src: /hdd_data/archiv
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/archiv"
- src: /hdd_data/backup
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/backup"
- src: /hdd_data/bilder
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/bilder"
- src: /hdd_data/buecher
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/buecher"
- src: /hdd_data/music
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/music"
- src: /hdd_data/programme
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/programme"
- src: /hdd_data/pve_backup
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/pve_backup"
- src: /hdd_data/restic
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/restic"
- src: /hdd_data/scans
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/scans"
- src: /hdd_data/tmp
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/tmp"
- src: /hdd_data/videos
dest: "{{ rsync_mirror_user }}@pbs.mgrote.net:/backup/pve5/videos"
# hdd_data/vm ist mit Absicht nicht dabei, zu groß