homeserver/host_vars/pbs-test.mgrote.net.yml
Michael Grote 28f22968da
Some checks failed
ci/woodpecker/push/gitleaks Pipeline was successful
ci/woodpecker/push/ansible-lint Pipeline was successful
ci/woodpecker/push/ansible-playbook Pipeline failed
ci: deploy config on merge or push (#127)
Reviewed-on: #127
Co-authored-by: Michael Grote <michael.grote@posteo.de>
Co-committed-by: Michael Grote <michael.grote@posteo.de>

ci: testing deployment (#128)

Reviewed-on: #128
Co-authored-by: Michael Grote <michael.grote@posteo.de>
Co-committed-by: Michael Grote <michael.grote@posteo.de>

ci: test

ci: enable deployment

ci: set ssh-key for deployment

ci: debug

ci: deactivate ansible-lint temporarily

ci: deactivate ansible-galaxy temporarily

ci: debug ssh-key shell redirect

ci: base64

ci: debug

ci: debug

ci: fix output

Revert "ci: deactivate ansible-lint temporarily"

This reverts commit 6729342f26.

ci: fix vault-pass secret

pbs_integration: enable no_log

ci: debug ansible-vault

ci: debug

ci: ansible-vault + move to viczem.keepass (#130)

Reviewed-on: #130
Co-authored-by: Michael Grote <michael.grote@posteo.de>
Co-committed-by: Michael Grote <michael.grote@posteo.de>

ff

plugin umbennennugn

ff
2024-07-09 22:27:57 +02:00

167 lines
4.9 KiB
YAML

---
# pbs_*
pbs_datastores:
- name: zfs_backup
path: /backup/pbs_data
gc_schedule: "sat 19:00"
pbs_prune_jobs:
- name: standard
schedule: "sat 18:15"
store: zfs_backup
keep_last: 3
keep_hourly: 24
keep_daily: 7
keep_weekly: 2
pbs_permissions:
- user: user_pve5-test@pbs
datastore: zfs_backup
role: DatastoreBackup
pbs_users:
- name: user_pve5
password: "{{ lookup('viczem.keepass.keepass', 'pbs_pve_user', 'password') }}"
realm: pbs
- name: user_pve5-test
password: "{{ lookup('viczem.keepass.keepass', 'pbs_pve_user-test', 'password') }}"
realm: pbs
# rpool ist unverschlüsselt als Boot-Medium
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
## backup
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase backup /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
# mgrote.zfs_manage_datasets
### mgrote_zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
# rpool - System-Datasets
- dataset: rpool
state: present
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on # noqa yaml[truthy]
snapdir: hidden
reservation: 1G
refreservation: 1G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 1G
- dataset: rpool/ROOT/pbs-1
state: present
refreservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# backup-pool
- dataset: backup/pbs_data
state: present
quota: 10GB
- dataset: backup/pve5
state: present
canmount: off # noqa yaml[truthy]
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
- name: "backup"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid
sanoid_snaps_enable: true
## syncoid
sanoid_syncoid_destination_host: false
sanoid_syncoid_ssh_privkey: "{{ lookup('viczem.keepass.keepass', 'sanoid_syncoid_private_key', 'notes') }}"
sanoid_syncoid_timer: '*-*-* *:00:00' # jede Stunde
sanoid_syncoid_bwlimit: 30m # 30MB/s
sanoid_syncoid_datasets_sync:
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/pve_backup
source_dataset: hdd_data/pve_backup
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/videos
source_dataset: hdd_data/videos
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/music
source_dataset: hdd_data/music
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/tmp
source_dataset: hdd_data/tmp
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/archiv
source_dataset: hdd_data/archiv
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/bilder
source_dataset: hdd_data/bilder
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/scans
source_dataset: hdd_data/scans
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/restic
source_dataset: hdd_data/restic
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/backup
source_dataset: hdd_data/backup
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/buecher
source_dataset: hdd_data/buecher
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/programme
source_dataset: hdd_data/programme
- source_host: pve5.mgrote.net
destination_mount_check: backup
destination_dataset: backup/pve5/vm
source_dataset: hdd_data/vm
# sanoid
sanoid_datasets:
### rpool
- path: rpool
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: 'pve3tage'
- path: rpool/ROOT
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: 'pve3tage'
- path: rpool/ROOT/pbs-1
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: 'pve3tage'
### backup
- path: backup
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: backup/pbs_data
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'