neuer Backup-Server (syncoid) (#543)

Co-authored-by: Michael Grote <michael.grote@posteo.de>
Reviewed-on: #543
This commit is contained in:
Michael Grote 2023-07-05 11:47:57 +02:00
parent ce813a881b
commit 0bb877a9a9
32 changed files with 663 additions and 121 deletions

View File

@ -128,3 +128,7 @@ services:
logo: "assets/icons/pve.png"
url: "https://192.168.2.239:8007"
target: "_blank"
- name: "Proxmox Backup Server - Test"
logo: "assets/icons/pve.png"
url: "https://192.168.2.18:8007"
target: "_blank"

View File

@ -23,7 +23,7 @@ services:
environment:
# FLASK_DEBUG: 1 # for debugging
# FLASK_APP: app # for debugging
MAX_CONTENT_LENGTH: 50
MAX_CONTENT_LENGTH: 500
UPLOAD_DIRECTORY: /uploads
AUTH_TOKEN: {{ lookup('keepass', 'httpd-api-server-token', 'password') }}
ENABLE_WEBSERVER: false

View File

@ -62,6 +62,7 @@
- name: squid_traffic # proxmox
- name: solarman_api_inverter
- name: deye
- name: timesync
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
@ -84,6 +85,9 @@
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
### mgrote.dotfiles
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles
dotfiles_repo_path: /home/mg/dotfiles
@ -233,6 +237,8 @@
autosnap: 'yes'
autoprune: 'yes'
### mgrote.zfs_sanoid
sanoid_deb_url: http://docker10.grote.lan:3344/sanoid_2.1.0.deb
# Ansible Variablen
### User

View File

@ -57,8 +57,6 @@
munin_node_bind_port: "4949"
munin_node_allowed_cidrs: [192.168.2.0/24]
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
- name: systemd_status
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
@ -94,6 +92,8 @@
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_volumesize
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/docker/docker_volumesize
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
### mgrote.apt_manage_packages
apt_packages_extra:
- bc # für munin plugins

View File

@ -24,8 +24,8 @@
from_ip: 192.168.2.144/24
### mgrote.munin-node
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem

View File

@ -24,8 +24,8 @@
from_ip: 192.168.2.144/24
### mgrote.munin-node
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem

View File

@ -104,8 +104,8 @@
### mgrote.munin-node
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem

View File

@ -30,8 +30,8 @@
### mgrote.munin-node
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
@ -39,11 +39,6 @@
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |

View File

@ -26,7 +26,7 @@
public_ssh_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJcBwOjanQV6sFWaTetqpl20SVe3aRzGjKbsp7hKkDCE mg@irantu
allow_sudo: true
allow_passwordless_sudo: true
### mgrote.apt_manage_packages
apt_packages_extra:
- ifupdown2
@ -36,8 +36,8 @@
### mgrote.munin-node
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
@ -110,6 +110,7 @@
- name: apcupsd_pwr
- name: timesync
- name: http_response
- name: timesync
### mgrote.apt_manage_sources
manage_sources_apt_proxy: ""

View File

@ -97,8 +97,8 @@
### mgrote.munin-node
munin_node_allowed_cidrs: [0.0.0.0/0] # weil der munin-server aus einem anderen subnet zugreift
munin_node_plugins:
- name: timesync
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
- name: chrony
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem

View File

@ -0,0 +1,168 @@
---
# pbs_*
pbs_datastores:
- name: zfs_backup
path: /backup/pbs_data
gc_schedule: "sat 19:00"
pbs_prune_jobs:
- name: standard
schedule: "sat 18:15"
store: zfs_backup
keep_last: 3
keep_hourly: 24
keep_daily: 7
keep_weekly: 2
pbs_permissions:
- user: user_pve5-test@pbs
datastore: zfs_backup
role: DatastoreBackup
pbs_users:
- name: user_pve5
password: "{{ lookup('keepass', 'pbs_pve_user', 'password') }}"
realm: pbs
- name: user_pve5-test
password: "{{ lookup('keepass', 'pbs_pve_user-test', 'password') }}"
realm: pbs
# rpool ist unverschlüsselt als Boot-Medium
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
## backup
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase backup /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
# mgrote.zfs_manage_datasets
### mgrote.zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
# rpool - System-Datasets
- dataset: rpool
state: present
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
reservation: 1G
refreservation: 1G
- dataset: rpool/ROOT
state: present
refreservation: 1G
- dataset: rpool/ROOT/pbs-1
state: present
refreservation: 1G
# backup-pool
- dataset: backup/pbs_data
state: present
quota: 10GB
- dataset: backup/pve5
state: present
canmount: off
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
- name: "backup"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote.zfs_sanoid
sanoid_snaps_enable: true
## syncoid
sanoid_syncoid_destination_host: false
sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
sanoid_syncoid_timer: '*-*-* *:00:00' # jede Stunde
sanoid_syncoid_bwlimit: 30m # 30MB/s
sanoid_syncoid_datasets_sync:
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/pve_backup
source_dataset: hdd_data_raidz/pve_backup
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/videos
source_dataset: hdd_data_raidz/videos
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/music
source_dataset: hdd_data_raidz/music
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/tmp
source_dataset: hdd_data_raidz/tmp
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/archiv
source_dataset: hdd_data_raidz/archiv
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/bilder
source_dataset: hdd_data_raidz/bilder
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/hm
source_dataset: hdd_data_raidz/hm
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/scans
source_dataset: hdd_data_raidz/scans
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/restic
source_dataset: hdd_data_raidz/restic
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/backup
source_dataset: hdd_data_raidz/backup
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/buecher
source_dataset: hdd_data_raidz/buecher
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/programme
source_dataset: hdd_data_raidz/programme
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/vm
source_dataset: hdd_data_raidz/vm
# sanoid
sanoid_datasets:
### rpool
- path: rpool
recursive: 'no'
snapshots: true
template: 'pve3tage'
- path: rpool/ROOT
recursive: 'no'
snapshots: true
template: 'pve3tage'
- path: rpool/ROOT/pbs-1
recursive: 'no'
snapshots: true
template: 'pve3tage'
### backup
- path: backup
recursive: 'no'
snapshots: true
template: '14tage'
- path: backup/pbs_data
recursive: 'no'
snapshots: true
template: '14tage'

View File

@ -10,9 +10,9 @@
schedule: "sat 18:15"
store: zfs_backup
keep_last: 3
keep_hourly: 24
keep_hourly: 12
keep_daily: 7
keep_weekly: 2
keep_weekly: 1
pbs_permissions:
- user: user_pve5@pbs
@ -21,9 +21,8 @@
pbs_users:
- name: user_pve5
password: hallowelt
password: "{{ lookup('keepass', 'pbs_pve_user', 'password') }}"
realm: pbs
# rpool ist unverschlüsselt als Boot-Medium
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
@ -55,7 +54,9 @@
- dataset: backup/pbs_data
state: present
quota: 1TB
- dataset: backup/pve5
state: present
canmount: off
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "4294967296" # 4GB in Bytes
zfs_extra_zfs_pools:
@ -65,6 +66,80 @@
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote.zfs_sanoid
sanoid_snaps_enable: true
## syncoid
sanoid_syncoid_destination_host: true
sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
sanoid_syncoid_timer: '*-*-* *:00:00' # jede Stunde
sanoid_syncoid_bwlimit: 50M # 30MB/s
sanoid_syncoid_datasets_sync:
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/pve_backup
source_dataset: hdd_data_raidz/pve_backup
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/videos
source_dataset: hdd_data_raidz/videos
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/music
source_dataset: hdd_data_raidz/music
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/tmp
source_dataset: hdd_data_raidz/tmp
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/archiv
source_dataset: hdd_data_raidz/archiv
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/bilder
source_dataset: hdd_data_raidz/bilder
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/hm
source_dataset: hdd_data_raidz/hm
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/scans
source_dataset: hdd_data_raidz/scans
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/restic
source_dataset: hdd_data_raidz/restic
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/backup
source_dataset: hdd_data_raidz/backup
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/buecher
source_dataset: hdd_data_raidz/buecher
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/programme
source_dataset: hdd_data_raidz/programme
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/vm
source_dataset: hdd_data_raidz/vm
# sanoid
sanoid_datasets:
### rpool
- path: rpool
@ -80,11 +155,7 @@
snapshots: true
template: 'pve3tage'
### backup
- path: backup
recursive: 'no'
snapshots: true
template: '14tage'
- path: backup/pbs_data
recursive: 'no'
snapshots: true
template: '14tage'
template: '3tage'

View File

@ -221,3 +221,12 @@
mp_nr: 16
mp_path_host: /hdd_data_raidz/vm
mp_path_guest: /shares_vm
# mgrote.pbs_pve_integration
pve_pbs_datastore:
- name: pbs
server: 192.168.2.18
datastore: zfs_backup
username: user_pve5-test@pbs
password: "{{ lookup('keepass', 'pbs_pve_user-test', 'password') }}"
fingerprint: "7F:AC:54:75:1C:33:55:84:1E:1E:3A:15:5A:5E:AF:79:33:C9:D4:E1:C0:A0:1C:0D:9E:6A:EA:82:F9:27:57:79"

View File

@ -90,6 +90,10 @@
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote.zfs_sanoid
sanoid_snaps_enable: true
## enable sending snaps
sanoid_syncoid_source_host: true
sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}"
sanoid_datasets:
### hdd_data_raidz
- path: 'hdd_data_raidz/videos'
@ -228,3 +232,12 @@
mp_nr: 16
mp_path_host: /hdd_data_raidz/vm
mp_path_guest: /shares_vm
# mgrote.pbs_pve_integration
pve_pbs_datastore:
- name: pbs
server: 192.168.2.239
datastore: zfs_backup
username: user_pve5@pbs
password: "{{ lookup('keepass', 'pbs_pve_user', 'password') }}"
fingerprint: "7F:AC:54:75:1C:33:55:84:1E:1E:3A:15:5A:5E:AF:79:33:C9:D4:E1:C0:A0:1C:0D:9E:6A:EA:82:F9:27:57:79"

View File

@ -27,7 +27,7 @@ all:
pbs:
hosts:
pbs.grote.lan:
#pbs-test.grote.lan:
pbs-test.grote.lan:
physical:
hosts:
pve5.grote.lan:
@ -50,4 +50,4 @@ all:
hosts:
vm-test-2204.grote.lan:
pve5-test.grote.lan:
#pbs-test.grote.lan:
pbs-test.grote.lan:

Binary file not shown.

View File

@ -1,4 +1,4 @@
---
- hosts: all,!pbs
- hosts: all
roles:
- { role: mgrote.systemd-timesyncd, tags: "ntp"}
- { role: mgrote.ntp_chrony_client, tags: "ntp"}

View File

@ -4,4 +4,5 @@
systemd:
name: chrony
enabled: yes
masked: false
state: restarted

View File

@ -1,12 +1,17 @@
---
- name: mask systemd-timesyncd service
become: yes
systemd:
- name: Check systemd-timesyncd status
become: true
shell: systemctl is-active --quiet systemd-timesyncd && echo "Active" || echo "Inactive"
register: timesyncd_status
changed_when: false
- name: Disable systemd-timesyncd
become: true
ansible.builtin.systemd:
name: systemd-timesyncd
state: stopped
masked: yes
when:
- not "'proxmox' in group_names"
state: stopped
when: timesyncd_status.stdout == "Active"
- name: gather package facts
become: yes

View File

@ -1,9 +1,11 @@
---
# datatsores to create
pbs_datastores:
- name: zfs_backup
path: /backup/pbs_data
gc_schedule: "sat 19:00"
# prune jobs to create
pbs_prune_jobs:
- name: standard
schedule: "sat 18:15"
@ -13,6 +15,8 @@ pbs_prune_jobs:
keep_daily: 7
keep_weekly: 2
# user permissions per datastore
# for user creation see mgrote.pbs_users
pbs_permissions:
- user: user_pve5@pbs
datastore: zfs_backup

View File

@ -1,4 +1,5 @@
---
# add this datastore to pve
pve_pbs_datastore:
- name: pbs
server: 192.168.2.239

View File

@ -1,4 +1,5 @@
---
# pbs users to create
pbs_users:
- name: user_pve5
password: hallowelt

View File

@ -1,10 +1,85 @@
## mgrote.zfs_sanoid
### Beschreibung
Installiert und konfiguriert sanoid.
Installiert und konfiguriert ``sanoid`` + ``syncoid``.
Es gibt 3 Funktionen:
1. Snapshots erstellen und entfernen
2. Snapshots senden
3. Snapshots empfangen
### getestet auf
- [x] ProxMox 7*
- ProxMox 7.*
- Ubuntu 20.04
### Variablen + Defaults
- see [defaults](./defaults/main.yml)
### Beispiel Playbook
```yaml
---
- hosts: host1,host2
roles:
- { role: mgrote.zfs_sanoid, tags: "sanoid" }
```
### Beispiel - Snapshots erstellen
#### Variablen
```yaml
---
sanoid_snaps_enable: true
sanoid_datasets:
- path: 'hdd_data_raidz/videos'
template: '31tage'
recursive: 'yes'
snapshots: true
sanoid_templates:
- name: '31tage'
keep_hourly: '24' # Aufheben (Stunde)
keep_daily: '31' # Aufheben (Tage)
keep_monthly: '3' # Aufheben (Monate)
keep_yearly: '0' # Aufheben (Jahre)
frequently: '16' # Aufheben (Minuten)
frequent_period: '15' # Intervall (alle 5 Minuten)
autosnap: 'yes' # Automatisches erstellen von Snapshots
autoprune: 'yes'
```
### Beispiel - Snapshots senden und empfangen
- Host 1 = Source
- Host 2 = Destination
#### Variablen - Host 1
```yaml
sanoid_syncoid_source_host: true
sanoid_syncoid_ssh_pubkey: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3U37DGPRPDLlgxZcM0Zj/x6RVZxs7hcWBYfPywujH4+mjbpzJckr2tx3QLfxsCCjQVb4LNSEB0xsOvzDjfDsaPuG4wzqFVyZOtjI4iWg/it4ARndun33r+xSlWc5JKHH9GRK8SBOd4lXv5ylENdhWQ7z5ZF/FtCysb1JHTTYlobgXfTZ4NswJj6BBk669l13uL6zSXq6x6vm1GWiFIcIYqwM5WGSGHFoD2RNn0TJKI9A3AULPloMzWeHG3fJhoVfNY6ZB0kqpTHGoAmJUURkBFki1cJkzx3tyto4VpTzZmUyYg+qqIWbv7Me3YVJCln8JYD10uDb2oPRx6G3C9DlnzRmAVVbqCHzwvOY0H5TLTW7AXCHHgSdHaRym4oTUY9dDS/XFU3rHgexerBbi3sy1Tm0/dEU3cZFm4YOJXY/l4TeTRlhg2VbctsWE1BN1CZcoJRR+qNdJzM7Vl70Y6RGU92Y1rzSpooYVuyCFDrEIp0hAHidb5rs4paCvoxtVqak+LK8dcq0IbWxcxomEimeRG4+Opd3vo+U6subp5jqkOY0uYkFVJXaMHkP5ZIxlCFgif2A3YAPhz9IczRJaaNY3pbVgU7ybOBp+S8KRK8Ysk6OP5ApOTQVTlRhYeNqo7mpuW6139VRY5luekSCy3ehHCI9/MObhu2juF1Nz0HMeMQ== mg@irantu
```
#### Variablen - Host 2
```yaml
sanoid_syncoid_timer: '*:*'
sanoid_syncoid_bwlimit: 30m
sanoid_syncoid_datasets_sync:
- source_host: host1.lan
source_dataset: hdd_data_mirror
destination_mount_check: hdd_data_raidz/encrypted # Wenn dieses Dataset nicht gemountet ist(z.B. durch Verschlüsselung, dann bricht syncoid ab)
destination_dataset: hdd_data_raidz/encrypted/syncoid/zfs1
skip_parent: false
sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
sanoid_syncoid_destination_host: true
```

View File

@ -1,6 +1,10 @@
---
### when should the script be run
### when should sanoid be run (every 5 minutes)
sanoid_timer: '*-*-* *:00/5'
### when should syncoid be run
sanoid_syncoid_timer: '*-*-* *:00:00'
### where to download the package
sanoid_deb_url: http://docker10.grote.lan:3344/sanoid_2.1.0.deb
# ### "Default" Datasets
# sanoid_datasets: # dictionary
@ -8,10 +12,6 @@
# template: 'fiveminutes' # name
# recursive: 'no' # recursive snapshotting
# snapshots: true # (de)activate; can be used to disable snapshotting of subdatasets if recursive is set
# - path: 'hdd_data_raidz/videos'
# template: 'hourly'
# recursive: 'yes'
# snapshots: true
# - path: 'hdd_data_raidz/test'
# snapshots: false # deaktiviert sanoid für das dataset
#
@ -26,22 +26,27 @@
# frequent_period: '5' # Intervall (alle 5 Minuten)
# autosnap: 'yes' # Automatisches erstellen von Snapshots
# autoprune: 'yes'
# - name: 'hourly'
# keep_hourly: '24'
# keep_daily: '31'
# keep_monthly: '6'
# keep_yearly: '1'
# frequently: '0'
# frequent_period: '0'
# autosnap: 'yes'
# autoprune: 'yes'
# - name: 'daily'
# keep_hourly: '0'
# keep_daily: '31'
# keep_monthly: '6'
# keep_yearly: '1'
# frequently: '0'
# frequent_period: '0'
# autosnap: 'yes'
# autoprune: 'yes'
#
### user and group for sanoid
sanoid_user: sanoid
sanoid_user_group: sanoid
### enable/disable features
## enable snapshotting
# sanoid_snaps_enable: true
## enable sending snaps
# sanoid_syncoid_source_host: true
## enable receiving snaps
# sanoid_syncoid_destination_host: true
# syncoid
#sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
#sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}"
### mgrote.sanoid
#sanoid_syncoid_datasets_sync:
# - source_host: pve5.grote.lan
# source_dataset: hdd_data_raidz/tmp
# destination_mount_check: hdd_data_raidz/tmp # zielpool
# destination_dataset: backup/pve5/tmp

View File

@ -0,0 +1,77 @@
---
- name: template ssh private key
become: true
ansible.builtin.copy:
content: "{{ sanoid_syncoid_ssh_privkey }}"
dest: "/etc/sanoid/.ssh/id_sanoid"
owner: "{{ sanoid_user }}"
group: "{{ sanoid_user_group }}"
mode: 0400
no_log: true
when:
- sanoid_syncoid_destination_host
- name: add user to sudoers
become: true
ansible.builtin.blockinfile:
path: /etc/sudoers
state: present
block: |
{{ sanoid_user }} ALL=(ALL) NOPASSWD:ALL
validate: '/usr/sbin/visudo -cf %s'
backup: yes
marker_begin: sanoid-sudoers BEGIN
marker_end: sanoid-sudoers END
when:
- sanoid_syncoid_destination_host
- name: template syncoid.service
become: yes
ansible.builtin.template:
src: "syncoid.service.j2"
dest: /etc/systemd/system/syncoid.service
owner: root
group: root
mode: 0644
notify:
- systemctl daemon-reload
when:
- sanoid_syncoid_destination_host
- name: template syncoid_mail.service
become: yes
ansible.builtin.template:
src: "syncoid_mail.service.j2"
dest: /etc/systemd/system/syncoid_mail.service
owner: root
group: root
mode: 0644
notify:
- systemctl daemon-reload
when:
- sanoid_syncoid_destination_host
- name: template syncoid.timer
become: yes
ansible.builtin.template:
src: "syncoid.timer.j2"
dest: "/etc/systemd/system/syncoid.timer"
owner: root
group: root
mode: 0644
notify:
- systemctl daemon-reload
when:
- sanoid_syncoid_destination_host
- name: enable syncoid.timer
become: yes
ansible.builtin.systemd:
name: "syncoid.timer"
enabled: yes
masked: no
state: started
notify:
- systemctl daemon-reload
when:
- sanoid_syncoid_destination_host

View File

@ -1,66 +1,49 @@
---
- name: install packages
- name: include user tasks
include_tasks: user.yml
- name: install packages from repo
become: true
ansible.builtin.apt:
name:
- sanoid
- mbuffer
- lzop
state: present
- name: Create Sanoid Directory
- name: install packages from self-build
become: true
ansible.builtin.apt:
deb: "{{ sanoid_deb_url }}"
state: present
- name: create sanoid directories
become: true
ansible.builtin.file:
path: "/etc/sanoid"
path: "{{ item }}"
state: directory
owner: root
group: root
recurse: true
owner: "{{ sanoid_user }}"
group: "{{ sanoid_user_group }}"
mode: 0700
with_items:
- "/etc/sanoid"
- "/etc/sanoid/.ssh"
- name: Generate Sanoid Configuration
become: true
ansible.builtin.template:
src: sanoid.conf.j2
dest: "/etc/sanoid/sanoid.conf"
owner: root
group: root
mode: 0644
when: sanoid_datasets is defined and sanoid_templates is defined
- name: include snaps tasks
include_tasks: snaps.yml
when:
- sanoid_datasets is defined
- sanoid_templates is defined
- sanoid_snaps_enable is defined
- sanoid_snaps_enable
- name: template sanoid_mail.service
become: yes
ansible.builtin.template:
src: "sanoid_mail.service.j2"
dest: /etc/systemd/system/sanoid_mail.service
owner: root
group: root
mode: 0644
notify:
- systemctl daemon-reload
- name: include source-host tasks
include_tasks: source.yml
when:
- sanoid_syncoid_source_host is defined and sanoid_syncoid_source_host is true
- sanoid_syncoid_ssh_pubkey is defined
- name: add sanoid_mail.service to sanoid.service
become: true
ansible.builtin.blockinfile:
create: yes
mode: 0644
owner: root
group: root
path: /lib/systemd/system/sanoid.service.d/override.conf
block: |
[Unit]
OnFailure = sanoid_mail.service
notify:
- systemctl daemon-reload
- name: set timer
become: true
ansible.builtin.blockinfile:
create: yes
mode: 0644
owner: root
group: root
path: /lib/systemd/system/sanoid.timer.d/override.conf
block: |
[Timer]
OnCalendar = {{ sanoid_timer }}
when: sanoid_timer is defined
notify:
- systemctl daemon-reload
- name: include destination-host tasks
include_tasks: destination.yml
when:
- sanoid_syncoid_destination_host is defined and sanoid_syncoid_destination_host is true
- sanoid_syncoid_ssh_privkey is defined

View File

@ -0,0 +1,49 @@
---
- name: Generate Sanoid Configuration
become: true
ansible.builtin.template:
src: sanoid.conf.j2
dest: "/etc/sanoid/sanoid.conf"
owner: "{{ sanoid_user }}"
group: "{{ sanoid_user_group }}"
mode: 0400
- name: template sanoid_mail.service
become: yes
ansible.builtin.template:
src: "sanoid_mail.service.j2"
dest: /etc/systemd/system/sanoid_mail.service
owner: root
group: root
mode: 0644
notify:
- systemctl daemon-reload
- name: add sanoid_mail.service to sanoid.service
become: true
ansible.builtin.blockinfile:
create: yes
mode: 0644
owner: root
group: root
path: /lib/systemd/system/sanoid.service.d/override.conf
block: |
[Unit]
OnFailure = sanoid_mail.service
notify:
- systemctl daemon-reload
- name: set timer
become: true
ansible.builtin.blockinfile:
create: yes
mode: 0644
owner: root
group: root
path: /lib/systemd/system/sanoid.timer.d/override.conf
block: |
[Timer]
OnCalendar = {{ sanoid_timer }}
when: sanoid_timer is defined
notify:
- systemctl daemon-reload

View File

@ -0,0 +1,23 @@
---
- name: template ssh public key
become: true
ansible.posix.authorized_key:
user: "{{ sanoid_user }}"
key: "{{ sanoid_syncoid_ssh_pubkey }}"
state: present
when:
- sanoid_syncoid_source_host
- name: add user to sudoers
become: true
ansible.builtin.blockinfile:
path: /etc/sudoers
state: present
block: |
{{ sanoid_user }} ALL=(ALL) NOPASSWD:ALL
validate: '/usr/sbin/visudo -cf %s'
backup: yes
marker_begin: sanoid-sudoers BEGIN
marker_end: sanoid-sudoers END
when:
- sanoid_syncoid_source_host

View File

@ -0,0 +1,19 @@
---
- name: ensure group exists
become: true
ansible.builtin.group:
name: "{{ sanoid_user_group }}"
state: present
when:
- sanoid_user_group is defined
- sanoid_user is defined
- name: ensure user exists
become: true
ansible.builtin.user:
name: "{{ sanoid_user }}"
group: "{{ sanoid_user_group }}"
create_home: yes
when:
- sanoid_user_group is defined
- sanoid_user is defined

View File

@ -0,0 +1,15 @@
{{ file_header | default () }}
[Unit]
Description=Send zfs snapshots with sanoid/syncoid.
OnFailure=syncoid_mail.service
[Service]
Type=oneshot
# check if dest-dataset is mounted (sed: entferne 1. Zeile; awk: zeige nur yes/no; grep: RC1 when != yes)
{% for item in sanoid_syncoid_datasets_sync %}
ExecStart=/bin/sh -c '/usr/sbin/zfs get mounted {{ item.destination_mount_check }} | sed 1d | awk "{print $3}" | grep yes'
# syncoid
ExecStart=/usr/bin/syncoid --sshoption=StrictHostKeyChecking=no --delete-target-snapshots --use-hold --preserve-recordsize --sshkey "/etc/sanoid/.ssh/id_sanoid" --source-bwlimit {{ sanoid_syncoid_bwlimit }} {{ sanoid_user }}@{{ item.source_host }}:{{ item.source_dataset }} {{ item.destination_dataset }}
{% endfor %}

View File

@ -0,0 +1,9 @@
{{ file_header | default () }}
[Unit]
Description=Timer for syncoid.
[Timer]
OnCalendar={{ sanoid_syncoid_timer }}
[Install]
WantedBy=timers.target multi-user.target zfs.target

View File

@ -0,0 +1,8 @@
{{ file_header | default () }}
[Unit]
Description=Send a Mail in case of an error in sanoid.service.
[Service]
Type=oneshot
ExecStart=/bin/bash -c '/bin/systemctl status syncoid.service | mail -s "[ERROR] syncoid - %H" {{ empfaenger_mail }}'