neuer Backup-Server (syncoid) (#543)
Co-authored-by: Michael Grote <michael.grote@posteo.de> Reviewed-on: #543
This commit is contained in:
parent
ce813a881b
commit
0bb877a9a9
32 changed files with 663 additions and 121 deletions
|
@ -128,3 +128,7 @@ services:
|
||||||
logo: "assets/icons/pve.png"
|
logo: "assets/icons/pve.png"
|
||||||
url: "https://192.168.2.239:8007"
|
url: "https://192.168.2.239:8007"
|
||||||
target: "_blank"
|
target: "_blank"
|
||||||
|
- name: "Proxmox Backup Server - Test"
|
||||||
|
logo: "assets/icons/pve.png"
|
||||||
|
url: "https://192.168.2.18:8007"
|
||||||
|
target: "_blank"
|
||||||
|
|
|
@ -23,7 +23,7 @@ services:
|
||||||
environment:
|
environment:
|
||||||
# FLASK_DEBUG: 1 # for debugging
|
# FLASK_DEBUG: 1 # for debugging
|
||||||
# FLASK_APP: app # for debugging
|
# FLASK_APP: app # for debugging
|
||||||
MAX_CONTENT_LENGTH: 50
|
MAX_CONTENT_LENGTH: 500
|
||||||
UPLOAD_DIRECTORY: /uploads
|
UPLOAD_DIRECTORY: /uploads
|
||||||
AUTH_TOKEN: {{ lookup('keepass', 'httpd-api-server-token', 'password') }}
|
AUTH_TOKEN: {{ lookup('keepass', 'httpd-api-server-token', 'password') }}
|
||||||
ENABLE_WEBSERVER: false
|
ENABLE_WEBSERVER: false
|
||||||
|
|
|
@ -62,6 +62,7 @@
|
||||||
- name: squid_traffic # proxmox
|
- name: squid_traffic # proxmox
|
||||||
- name: solarman_api_inverter
|
- name: solarman_api_inverter
|
||||||
- name: deye
|
- name: deye
|
||||||
|
- name: timesync
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
- name: timesync
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
||||||
|
@ -84,6 +85,9 @@
|
||||||
env.client /usr/bin/fail2ban-client
|
env.client /usr/bin/fail2ban-client
|
||||||
env.config_dir /etc/fail2ban
|
env.config_dir /etc/fail2ban
|
||||||
user root
|
user root
|
||||||
|
- name: chrony
|
||||||
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
|
|
||||||
### mgrote.dotfiles
|
### mgrote.dotfiles
|
||||||
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles
|
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles
|
||||||
dotfiles_repo_path: /home/mg/dotfiles
|
dotfiles_repo_path: /home/mg/dotfiles
|
||||||
|
@ -233,6 +237,8 @@
|
||||||
autosnap: 'yes'
|
autosnap: 'yes'
|
||||||
autoprune: 'yes'
|
autoprune: 'yes'
|
||||||
|
|
||||||
|
### mgrote.zfs_sanoid
|
||||||
|
sanoid_deb_url: http://docker10.grote.lan:3344/sanoid_2.1.0.deb
|
||||||
|
|
||||||
# Ansible Variablen
|
# Ansible Variablen
|
||||||
### User
|
### User
|
||||||
|
|
|
@ -57,8 +57,6 @@
|
||||||
munin_node_bind_port: "4949"
|
munin_node_bind_port: "4949"
|
||||||
munin_node_allowed_cidrs: [192.168.2.0/24]
|
munin_node_allowed_cidrs: [192.168.2.0/24]
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
|
||||||
- name: systemd_status
|
- name: systemd_status
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
||||||
- name: systemd_mem
|
- name: systemd_mem
|
||||||
|
@ -94,6 +92,8 @@
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/docker/docker_
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/docker/docker_
|
||||||
- name: docker_volumesize
|
- name: docker_volumesize
|
||||||
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/docker/docker_volumesize
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/docker/docker_volumesize
|
||||||
|
- name: chrony
|
||||||
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
### mgrote.apt_manage_packages
|
### mgrote.apt_manage_packages
|
||||||
apt_packages_extra:
|
apt_packages_extra:
|
||||||
- bc # für munin plugins
|
- bc # für munin plugins
|
||||||
|
|
|
@ -24,8 +24,8 @@
|
||||||
from_ip: 192.168.2.144/24
|
from_ip: 192.168.2.144/24
|
||||||
### mgrote.munin-node
|
### mgrote.munin-node
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
- name: chrony
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
- name: systemd_status
|
- name: systemd_status
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
||||||
- name: systemd_mem
|
- name: systemd_mem
|
||||||
|
|
|
@ -24,8 +24,8 @@
|
||||||
from_ip: 192.168.2.144/24
|
from_ip: 192.168.2.144/24
|
||||||
### mgrote.munin-node
|
### mgrote.munin-node
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
- name: chrony
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
- name: systemd_status
|
- name: systemd_status
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
||||||
- name: systemd_mem
|
- name: systemd_mem
|
||||||
|
|
|
@ -104,8 +104,8 @@
|
||||||
|
|
||||||
### mgrote.munin-node
|
### mgrote.munin-node
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
- name: chrony
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
- name: systemd_status
|
- name: systemd_status
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
||||||
- name: systemd_mem
|
- name: systemd_mem
|
||||||
|
|
|
@ -30,8 +30,8 @@
|
||||||
|
|
||||||
### mgrote.munin-node
|
### mgrote.munin-node
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
- name: chrony
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
- name: systemd_status
|
- name: systemd_status
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
||||||
- name: systemd_mem
|
- name: systemd_mem
|
||||||
|
@ -39,11 +39,6 @@
|
||||||
config: |
|
config: |
|
||||||
[systemd_mem]
|
[systemd_mem]
|
||||||
env.all_services true
|
env.all_services true
|
||||||
- name: lvm_
|
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/disk/lvm_
|
|
||||||
config: |
|
|
||||||
[lvm_*]
|
|
||||||
user root
|
|
||||||
- name: fail2ban
|
- name: fail2ban
|
||||||
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
|
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
|
||||||
config: |
|
config: |
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
public_ssh_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJcBwOjanQV6sFWaTetqpl20SVe3aRzGjKbsp7hKkDCE mg@irantu
|
public_ssh_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJcBwOjanQV6sFWaTetqpl20SVe3aRzGjKbsp7hKkDCE mg@irantu
|
||||||
allow_sudo: true
|
allow_sudo: true
|
||||||
allow_passwordless_sudo: true
|
allow_passwordless_sudo: true
|
||||||
|
|
||||||
### mgrote.apt_manage_packages
|
### mgrote.apt_manage_packages
|
||||||
apt_packages_extra:
|
apt_packages_extra:
|
||||||
- ifupdown2
|
- ifupdown2
|
||||||
|
@ -36,8 +36,8 @@
|
||||||
|
|
||||||
### mgrote.munin-node
|
### mgrote.munin-node
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
- name: chrony
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
- name: systemd_status
|
- name: systemd_status
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
||||||
- name: systemd_mem
|
- name: systemd_mem
|
||||||
|
@ -110,6 +110,7 @@
|
||||||
- name: apcupsd_pwr
|
- name: apcupsd_pwr
|
||||||
- name: timesync
|
- name: timesync
|
||||||
- name: http_response
|
- name: http_response
|
||||||
|
- name: timesync
|
||||||
|
|
||||||
### mgrote.apt_manage_sources
|
### mgrote.apt_manage_sources
|
||||||
manage_sources_apt_proxy: ""
|
manage_sources_apt_proxy: ""
|
||||||
|
|
|
@ -97,8 +97,8 @@
|
||||||
### mgrote.munin-node
|
### mgrote.munin-node
|
||||||
munin_node_allowed_cidrs: [0.0.0.0/0] # weil der munin-server aus einem anderen subnet zugreift
|
munin_node_allowed_cidrs: [0.0.0.0/0] # weil der munin-server aus einem anderen subnet zugreift
|
||||||
munin_node_plugins:
|
munin_node_plugins:
|
||||||
- name: timesync
|
- name: chrony
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/timesync_status
|
src: https://git.mgrote.net/Mirror/munin-contrib/raw/branch/master/plugins/chrony/chrony
|
||||||
- name: systemd_status
|
- name: systemd_status
|
||||||
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/systemd/systemd_status
|
||||||
- name: systemd_mem
|
- name: systemd_mem
|
||||||
|
|
168
host_vars/pbs-test.grote.lan.yml
Normal file
168
host_vars/pbs-test.grote.lan.yml
Normal file
|
@ -0,0 +1,168 @@
|
||||||
|
---
|
||||||
|
# pbs_*
|
||||||
|
pbs_datastores:
|
||||||
|
- name: zfs_backup
|
||||||
|
path: /backup/pbs_data
|
||||||
|
gc_schedule: "sat 19:00"
|
||||||
|
|
||||||
|
pbs_prune_jobs:
|
||||||
|
- name: standard
|
||||||
|
schedule: "sat 18:15"
|
||||||
|
store: zfs_backup
|
||||||
|
keep_last: 3
|
||||||
|
keep_hourly: 24
|
||||||
|
keep_daily: 7
|
||||||
|
keep_weekly: 2
|
||||||
|
|
||||||
|
pbs_permissions:
|
||||||
|
- user: user_pve5-test@pbs
|
||||||
|
datastore: zfs_backup
|
||||||
|
role: DatastoreBackup
|
||||||
|
|
||||||
|
pbs_users:
|
||||||
|
- name: user_pve5
|
||||||
|
password: "{{ lookup('keepass', 'pbs_pve_user', 'password') }}"
|
||||||
|
realm: pbs
|
||||||
|
- name: user_pve5-test
|
||||||
|
password: "{{ lookup('keepass', 'pbs_pve_user-test', 'password') }}"
|
||||||
|
realm: pbs
|
||||||
|
# rpool ist unverschlüsselt als Boot-Medium
|
||||||
|
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
|
||||||
|
|
||||||
|
## backup
|
||||||
|
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase backup /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
|
||||||
|
|
||||||
|
# mgrote.zfs_manage_datasets
|
||||||
|
### mgrote.zfs_extra
|
||||||
|
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||||
|
zfs_datasets: # DatenPools werden hier nicht verwaltet
|
||||||
|
# rpool - System-Datasets
|
||||||
|
- dataset: rpool
|
||||||
|
state: present
|
||||||
|
compression: zstd
|
||||||
|
sync: disabled
|
||||||
|
xattr: sa
|
||||||
|
dnodesize: auto
|
||||||
|
atime: on
|
||||||
|
snapdir: hidden
|
||||||
|
reservation: 1G
|
||||||
|
refreservation: 1G
|
||||||
|
- dataset: rpool/ROOT
|
||||||
|
state: present
|
||||||
|
refreservation: 1G
|
||||||
|
- dataset: rpool/ROOT/pbs-1
|
||||||
|
state: present
|
||||||
|
refreservation: 1G
|
||||||
|
# backup-pool
|
||||||
|
- dataset: backup/pbs_data
|
||||||
|
state: present
|
||||||
|
quota: 10GB
|
||||||
|
- dataset: backup/pve5
|
||||||
|
state: present
|
||||||
|
canmount: off
|
||||||
|
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||||
|
zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes
|
||||||
|
zfs_extra_zfs_pools:
|
||||||
|
- name: "rpool"
|
||||||
|
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
|
||||||
|
- name: "backup"
|
||||||
|
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
|
||||||
|
|
||||||
|
### mgrote.zfs_sanoid
|
||||||
|
sanoid_snaps_enable: true
|
||||||
|
## syncoid
|
||||||
|
sanoid_syncoid_destination_host: false
|
||||||
|
sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
|
||||||
|
sanoid_syncoid_timer: '*-*-* *:00:00' # jede Stunde
|
||||||
|
sanoid_syncoid_bwlimit: 30m # 30MB/s
|
||||||
|
sanoid_syncoid_datasets_sync:
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/pve_backup
|
||||||
|
source_dataset: hdd_data_raidz/pve_backup
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/videos
|
||||||
|
source_dataset: hdd_data_raidz/videos
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/music
|
||||||
|
source_dataset: hdd_data_raidz/music
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/tmp
|
||||||
|
source_dataset: hdd_data_raidz/tmp
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/archiv
|
||||||
|
source_dataset: hdd_data_raidz/archiv
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/bilder
|
||||||
|
source_dataset: hdd_data_raidz/bilder
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/hm
|
||||||
|
source_dataset: hdd_data_raidz/hm
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/scans
|
||||||
|
source_dataset: hdd_data_raidz/scans
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/restic
|
||||||
|
source_dataset: hdd_data_raidz/restic
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/backup
|
||||||
|
source_dataset: hdd_data_raidz/backup
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/buecher
|
||||||
|
source_dataset: hdd_data_raidz/buecher
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/programme
|
||||||
|
source_dataset: hdd_data_raidz/programme
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/vm
|
||||||
|
source_dataset: hdd_data_raidz/vm
|
||||||
|
|
||||||
|
|
||||||
|
# sanoid
|
||||||
|
sanoid_datasets:
|
||||||
|
### rpool
|
||||||
|
- path: rpool
|
||||||
|
recursive: 'no'
|
||||||
|
snapshots: true
|
||||||
|
template: 'pve3tage'
|
||||||
|
- path: rpool/ROOT
|
||||||
|
recursive: 'no'
|
||||||
|
snapshots: true
|
||||||
|
template: 'pve3tage'
|
||||||
|
- path: rpool/ROOT/pbs-1
|
||||||
|
recursive: 'no'
|
||||||
|
snapshots: true
|
||||||
|
template: 'pve3tage'
|
||||||
|
### backup
|
||||||
|
- path: backup
|
||||||
|
recursive: 'no'
|
||||||
|
snapshots: true
|
||||||
|
template: '14tage'
|
||||||
|
- path: backup/pbs_data
|
||||||
|
recursive: 'no'
|
||||||
|
snapshots: true
|
||||||
|
template: '14tage'
|
|
@ -10,9 +10,9 @@
|
||||||
schedule: "sat 18:15"
|
schedule: "sat 18:15"
|
||||||
store: zfs_backup
|
store: zfs_backup
|
||||||
keep_last: 3
|
keep_last: 3
|
||||||
keep_hourly: 24
|
keep_hourly: 12
|
||||||
keep_daily: 7
|
keep_daily: 7
|
||||||
keep_weekly: 2
|
keep_weekly: 1
|
||||||
|
|
||||||
pbs_permissions:
|
pbs_permissions:
|
||||||
- user: user_pve5@pbs
|
- user: user_pve5@pbs
|
||||||
|
@ -21,9 +21,8 @@
|
||||||
|
|
||||||
pbs_users:
|
pbs_users:
|
||||||
- name: user_pve5
|
- name: user_pve5
|
||||||
password: hallowelt
|
password: "{{ lookup('keepass', 'pbs_pve_user', 'password') }}"
|
||||||
realm: pbs
|
realm: pbs
|
||||||
|
|
||||||
# rpool ist unverschlüsselt als Boot-Medium
|
# rpool ist unverschlüsselt als Boot-Medium
|
||||||
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
|
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
|
||||||
|
|
||||||
|
@ -55,7 +54,9 @@
|
||||||
- dataset: backup/pbs_data
|
- dataset: backup/pbs_data
|
||||||
state: present
|
state: present
|
||||||
quota: 1TB
|
quota: 1TB
|
||||||
|
- dataset: backup/pve5
|
||||||
|
state: present
|
||||||
|
canmount: off
|
||||||
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
|
||||||
zfs_extra_arc_max_size: "4294967296" # 4GB in Bytes
|
zfs_extra_arc_max_size: "4294967296" # 4GB in Bytes
|
||||||
zfs_extra_zfs_pools:
|
zfs_extra_zfs_pools:
|
||||||
|
@ -65,6 +66,80 @@
|
||||||
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
|
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
|
||||||
|
|
||||||
### mgrote.zfs_sanoid
|
### mgrote.zfs_sanoid
|
||||||
|
sanoid_snaps_enable: true
|
||||||
|
## syncoid
|
||||||
|
sanoid_syncoid_destination_host: true
|
||||||
|
sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
|
||||||
|
sanoid_syncoid_timer: '*-*-* *:00:00' # jede Stunde
|
||||||
|
sanoid_syncoid_bwlimit: 50M # 30MB/s
|
||||||
|
sanoid_syncoid_datasets_sync:
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/pve_backup
|
||||||
|
source_dataset: hdd_data_raidz/pve_backup
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/videos
|
||||||
|
source_dataset: hdd_data_raidz/videos
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/music
|
||||||
|
source_dataset: hdd_data_raidz/music
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/tmp
|
||||||
|
source_dataset: hdd_data_raidz/tmp
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/archiv
|
||||||
|
source_dataset: hdd_data_raidz/archiv
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/bilder
|
||||||
|
source_dataset: hdd_data_raidz/bilder
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/hm
|
||||||
|
source_dataset: hdd_data_raidz/hm
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/scans
|
||||||
|
source_dataset: hdd_data_raidz/scans
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/restic
|
||||||
|
source_dataset: hdd_data_raidz/restic
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/backup
|
||||||
|
source_dataset: hdd_data_raidz/backup
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/buecher
|
||||||
|
source_dataset: hdd_data_raidz/buecher
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/programme
|
||||||
|
source_dataset: hdd_data_raidz/programme
|
||||||
|
|
||||||
|
- source_host: pve5.grote.lan
|
||||||
|
destination_mount_check: backup
|
||||||
|
destination_dataset: backup/pve5/vm
|
||||||
|
source_dataset: hdd_data_raidz/vm
|
||||||
|
|
||||||
|
|
||||||
|
# sanoid
|
||||||
sanoid_datasets:
|
sanoid_datasets:
|
||||||
### rpool
|
### rpool
|
||||||
- path: rpool
|
- path: rpool
|
||||||
|
@ -80,11 +155,7 @@
|
||||||
snapshots: true
|
snapshots: true
|
||||||
template: 'pve3tage'
|
template: 'pve3tage'
|
||||||
### backup
|
### backup
|
||||||
- path: backup
|
|
||||||
recursive: 'no'
|
|
||||||
snapshots: true
|
|
||||||
template: '14tage'
|
|
||||||
- path: backup/pbs_data
|
- path: backup/pbs_data
|
||||||
recursive: 'no'
|
recursive: 'no'
|
||||||
snapshots: true
|
snapshots: true
|
||||||
template: '14tage'
|
template: '3tage'
|
||||||
|
|
|
@ -221,3 +221,12 @@
|
||||||
mp_nr: 16
|
mp_nr: 16
|
||||||
mp_path_host: /hdd_data_raidz/vm
|
mp_path_host: /hdd_data_raidz/vm
|
||||||
mp_path_guest: /shares_vm
|
mp_path_guest: /shares_vm
|
||||||
|
|
||||||
|
# mgrote.pbs_pve_integration
|
||||||
|
pve_pbs_datastore:
|
||||||
|
- name: pbs
|
||||||
|
server: 192.168.2.18
|
||||||
|
datastore: zfs_backup
|
||||||
|
username: user_pve5-test@pbs
|
||||||
|
password: "{{ lookup('keepass', 'pbs_pve_user-test', 'password') }}"
|
||||||
|
fingerprint: "7F:AC:54:75:1C:33:55:84:1E:1E:3A:15:5A:5E:AF:79:33:C9:D4:E1:C0:A0:1C:0D:9E:6A:EA:82:F9:27:57:79"
|
||||||
|
|
|
@ -90,6 +90,10 @@
|
||||||
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
|
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
|
||||||
|
|
||||||
### mgrote.zfs_sanoid
|
### mgrote.zfs_sanoid
|
||||||
|
sanoid_snaps_enable: true
|
||||||
|
## enable sending snaps
|
||||||
|
sanoid_syncoid_source_host: true
|
||||||
|
sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}"
|
||||||
sanoid_datasets:
|
sanoid_datasets:
|
||||||
### hdd_data_raidz
|
### hdd_data_raidz
|
||||||
- path: 'hdd_data_raidz/videos'
|
- path: 'hdd_data_raidz/videos'
|
||||||
|
@ -228,3 +232,12 @@
|
||||||
mp_nr: 16
|
mp_nr: 16
|
||||||
mp_path_host: /hdd_data_raidz/vm
|
mp_path_host: /hdd_data_raidz/vm
|
||||||
mp_path_guest: /shares_vm
|
mp_path_guest: /shares_vm
|
||||||
|
|
||||||
|
# mgrote.pbs_pve_integration
|
||||||
|
pve_pbs_datastore:
|
||||||
|
- name: pbs
|
||||||
|
server: 192.168.2.239
|
||||||
|
datastore: zfs_backup
|
||||||
|
username: user_pve5@pbs
|
||||||
|
password: "{{ lookup('keepass', 'pbs_pve_user', 'password') }}"
|
||||||
|
fingerprint: "7F:AC:54:75:1C:33:55:84:1E:1E:3A:15:5A:5E:AF:79:33:C9:D4:E1:C0:A0:1C:0D:9E:6A:EA:82:F9:27:57:79"
|
||||||
|
|
|
@ -27,7 +27,7 @@ all:
|
||||||
pbs:
|
pbs:
|
||||||
hosts:
|
hosts:
|
||||||
pbs.grote.lan:
|
pbs.grote.lan:
|
||||||
#pbs-test.grote.lan:
|
pbs-test.grote.lan:
|
||||||
physical:
|
physical:
|
||||||
hosts:
|
hosts:
|
||||||
pve5.grote.lan:
|
pve5.grote.lan:
|
||||||
|
@ -50,4 +50,4 @@ all:
|
||||||
hosts:
|
hosts:
|
||||||
vm-test-2204.grote.lan:
|
vm-test-2204.grote.lan:
|
||||||
pve5-test.grote.lan:
|
pve5-test.grote.lan:
|
||||||
#pbs-test.grote.lan:
|
pbs-test.grote.lan:
|
||||||
|
|
BIN
keepass_db.kdbx
BIN
keepass_db.kdbx
Binary file not shown.
|
@ -1,4 +1,4 @@
|
||||||
---
|
---
|
||||||
- hosts: all,!pbs
|
- hosts: all
|
||||||
roles:
|
roles:
|
||||||
- { role: mgrote.systemd-timesyncd, tags: "ntp"}
|
- { role: mgrote.ntp_chrony_client, tags: "ntp"}
|
||||||
|
|
|
@ -4,4 +4,5 @@
|
||||||
systemd:
|
systemd:
|
||||||
name: chrony
|
name: chrony
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
masked: false
|
||||||
state: restarted
|
state: restarted
|
||||||
|
|
|
@ -1,12 +1,17 @@
|
||||||
---
|
---
|
||||||
- name: mask systemd-timesyncd service
|
- name: Check systemd-timesyncd status
|
||||||
become: yes
|
become: true
|
||||||
systemd:
|
shell: systemctl is-active --quiet systemd-timesyncd && echo "Active" || echo "Inactive"
|
||||||
|
register: timesyncd_status
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Disable systemd-timesyncd
|
||||||
|
become: true
|
||||||
|
ansible.builtin.systemd:
|
||||||
name: systemd-timesyncd
|
name: systemd-timesyncd
|
||||||
state: stopped
|
|
||||||
masked: yes
|
masked: yes
|
||||||
when:
|
state: stopped
|
||||||
- not "'proxmox' in group_names"
|
when: timesyncd_status.stdout == "Active"
|
||||||
|
|
||||||
- name: gather package facts
|
- name: gather package facts
|
||||||
become: yes
|
become: yes
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
---
|
---
|
||||||
|
# datatsores to create
|
||||||
pbs_datastores:
|
pbs_datastores:
|
||||||
- name: zfs_backup
|
- name: zfs_backup
|
||||||
path: /backup/pbs_data
|
path: /backup/pbs_data
|
||||||
gc_schedule: "sat 19:00"
|
gc_schedule: "sat 19:00"
|
||||||
|
|
||||||
|
# prune jobs to create
|
||||||
pbs_prune_jobs:
|
pbs_prune_jobs:
|
||||||
- name: standard
|
- name: standard
|
||||||
schedule: "sat 18:15"
|
schedule: "sat 18:15"
|
||||||
|
@ -13,6 +15,8 @@ pbs_prune_jobs:
|
||||||
keep_daily: 7
|
keep_daily: 7
|
||||||
keep_weekly: 2
|
keep_weekly: 2
|
||||||
|
|
||||||
|
# user permissions per datastore
|
||||||
|
# for user creation see mgrote.pbs_users
|
||||||
pbs_permissions:
|
pbs_permissions:
|
||||||
- user: user_pve5@pbs
|
- user: user_pve5@pbs
|
||||||
datastore: zfs_backup
|
datastore: zfs_backup
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
---
|
---
|
||||||
|
# add this datastore to pve
|
||||||
pve_pbs_datastore:
|
pve_pbs_datastore:
|
||||||
- name: pbs
|
- name: pbs
|
||||||
server: 192.168.2.239
|
server: 192.168.2.239
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
---
|
---
|
||||||
|
# pbs users to create
|
||||||
pbs_users:
|
pbs_users:
|
||||||
- name: user_pve5
|
- name: user_pve5
|
||||||
password: hallowelt
|
password: hallowelt
|
||||||
|
|
|
@ -1,10 +1,85 @@
|
||||||
## mgrote.zfs_sanoid
|
## mgrote.zfs_sanoid
|
||||||
|
|
||||||
### Beschreibung
|
### Beschreibung
|
||||||
Installiert und konfiguriert sanoid.
|
Installiert und konfiguriert ``sanoid`` + ``syncoid``.
|
||||||
|
|
||||||
|
Es gibt 3 Funktionen:
|
||||||
|
|
||||||
|
1. Snapshots erstellen und entfernen
|
||||||
|
2. Snapshots senden
|
||||||
|
3. Snapshots empfangen
|
||||||
|
|
||||||
### getestet auf
|
### getestet auf
|
||||||
- [x] ProxMox 7*
|
- ProxMox 7.*
|
||||||
|
- Ubuntu 20.04
|
||||||
|
|
||||||
### Variablen + Defaults
|
### Variablen + Defaults
|
||||||
- see [defaults](./defaults/main.yml)
|
- see [defaults](./defaults/main.yml)
|
||||||
|
|
||||||
|
|
||||||
|
### Beispiel Playbook
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
- hosts: host1,host2
|
||||||
|
roles:
|
||||||
|
- { role: mgrote.zfs_sanoid, tags: "sanoid" }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beispiel - Snapshots erstellen
|
||||||
|
|
||||||
|
|
||||||
|
#### Variablen
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
sanoid_snaps_enable: true
|
||||||
|
sanoid_datasets:
|
||||||
|
- path: 'hdd_data_raidz/videos'
|
||||||
|
template: '31tage'
|
||||||
|
recursive: 'yes'
|
||||||
|
snapshots: true
|
||||||
|
sanoid_templates:
|
||||||
|
- name: '31tage'
|
||||||
|
keep_hourly: '24' # Aufheben (Stunde)
|
||||||
|
keep_daily: '31' # Aufheben (Tage)
|
||||||
|
keep_monthly: '3' # Aufheben (Monate)
|
||||||
|
keep_yearly: '0' # Aufheben (Jahre)
|
||||||
|
frequently: '16' # Aufheben (Minuten)
|
||||||
|
frequent_period: '15' # Intervall (alle 5 Minuten)
|
||||||
|
autosnap: 'yes' # Automatisches erstellen von Snapshots
|
||||||
|
autoprune: 'yes'
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beispiel - Snapshots senden und empfangen
|
||||||
|
|
||||||
|
- Host 1 = Source
|
||||||
|
- Host 2 = Destination
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### Variablen - Host 1
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
sanoid_syncoid_source_host: true
|
||||||
|
sanoid_syncoid_ssh_pubkey: |
|
||||||
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3U37DGPRPDLlgxZcM0Zj/x6RVZxs7hcWBYfPywujH4+mjbpzJckr2tx3QLfxsCCjQVb4LNSEB0xsOvzDjfDsaPuG4wzqFVyZOtjI4iWg/it4ARndun33r+xSlWc5JKHH9GRK8SBOd4lXv5ylENdhWQ7z5ZF/FtCysb1JHTTYlobgXfTZ4NswJj6BBk669l13uL6zSXq6x6vm1GWiFIcIYqwM5WGSGHFoD2RNn0TJKI9A3AULPloMzWeHG3fJhoVfNY6ZB0kqpTHGoAmJUURkBFki1cJkzx3tyto4VpTzZmUyYg+qqIWbv7Me3YVJCln8JYD10uDb2oPRx6G3C9DlnzRmAVVbqCHzwvOY0H5TLTW7AXCHHgSdHaRym4oTUY9dDS/XFU3rHgexerBbi3sy1Tm0/dEU3cZFm4YOJXY/l4TeTRlhg2VbctsWE1BN1CZcoJRR+qNdJzM7Vl70Y6RGU92Y1rzSpooYVuyCFDrEIp0hAHidb5rs4paCvoxtVqak+LK8dcq0IbWxcxomEimeRG4+Opd3vo+U6subp5jqkOY0uYkFVJXaMHkP5ZIxlCFgif2A3YAPhz9IczRJaaNY3pbVgU7ybOBp+S8KRK8Ysk6OP5ApOTQVTlRhYeNqo7mpuW6139VRY5luekSCy3ehHCI9/MObhu2juF1Nz0HMeMQ== mg@irantu
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Variablen - Host 2
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
sanoid_syncoid_timer: '*:*'
|
||||||
|
sanoid_syncoid_bwlimit: 30m
|
||||||
|
sanoid_syncoid_datasets_sync:
|
||||||
|
- source_host: host1.lan
|
||||||
|
source_dataset: hdd_data_mirror
|
||||||
|
destination_mount_check: hdd_data_raidz/encrypted # Wenn dieses Dataset nicht gemountet ist(z.B. durch Verschlüsselung, dann bricht syncoid ab)
|
||||||
|
destination_dataset: hdd_data_raidz/encrypted/syncoid/zfs1
|
||||||
|
skip_parent: false
|
||||||
|
sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
|
||||||
|
sanoid_syncoid_destination_host: true
|
||||||
|
|
||||||
|
```
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
---
|
---
|
||||||
### when should the script be run
|
### when should sanoid be run (every 5 minutes)
|
||||||
sanoid_timer: '*-*-* *:00/5'
|
sanoid_timer: '*-*-* *:00/5'
|
||||||
|
### when should syncoid be run
|
||||||
|
sanoid_syncoid_timer: '*-*-* *:00:00'
|
||||||
|
### where to download the package
|
||||||
|
sanoid_deb_url: http://docker10.grote.lan:3344/sanoid_2.1.0.deb
|
||||||
|
|
||||||
# ### "Default" Datasets
|
# ### "Default" Datasets
|
||||||
# sanoid_datasets: # dictionary
|
# sanoid_datasets: # dictionary
|
||||||
|
@ -8,10 +12,6 @@
|
||||||
# template: 'fiveminutes' # name
|
# template: 'fiveminutes' # name
|
||||||
# recursive: 'no' # recursive snapshotting
|
# recursive: 'no' # recursive snapshotting
|
||||||
# snapshots: true # (de)activate; can be used to disable snapshotting of subdatasets if recursive is set
|
# snapshots: true # (de)activate; can be used to disable snapshotting of subdatasets if recursive is set
|
||||||
# - path: 'hdd_data_raidz/videos'
|
|
||||||
# template: 'hourly'
|
|
||||||
# recursive: 'yes'
|
|
||||||
# snapshots: true
|
|
||||||
# - path: 'hdd_data_raidz/test'
|
# - path: 'hdd_data_raidz/test'
|
||||||
# snapshots: false # deaktiviert sanoid für das dataset
|
# snapshots: false # deaktiviert sanoid für das dataset
|
||||||
#
|
#
|
||||||
|
@ -26,22 +26,27 @@
|
||||||
# frequent_period: '5' # Intervall (alle 5 Minuten)
|
# frequent_period: '5' # Intervall (alle 5 Minuten)
|
||||||
# autosnap: 'yes' # Automatisches erstellen von Snapshots
|
# autosnap: 'yes' # Automatisches erstellen von Snapshots
|
||||||
# autoprune: 'yes'
|
# autoprune: 'yes'
|
||||||
# - name: 'hourly'
|
|
||||||
# keep_hourly: '24'
|
### user and group for sanoid
|
||||||
# keep_daily: '31'
|
sanoid_user: sanoid
|
||||||
# keep_monthly: '6'
|
sanoid_user_group: sanoid
|
||||||
# keep_yearly: '1'
|
|
||||||
# frequently: '0'
|
|
||||||
# frequent_period: '0'
|
### enable/disable features
|
||||||
# autosnap: 'yes'
|
## enable snapshotting
|
||||||
# autoprune: 'yes'
|
# sanoid_snaps_enable: true
|
||||||
# - name: 'daily'
|
## enable sending snaps
|
||||||
# keep_hourly: '0'
|
# sanoid_syncoid_source_host: true
|
||||||
# keep_daily: '31'
|
## enable receiving snaps
|
||||||
# keep_monthly: '6'
|
# sanoid_syncoid_destination_host: true
|
||||||
# keep_yearly: '1'
|
|
||||||
# frequently: '0'
|
# syncoid
|
||||||
# frequent_period: '0'
|
#sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key', 'notes') }}"
|
||||||
# autosnap: 'yes'
|
#sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}"
|
||||||
# autoprune: 'yes'
|
|
||||||
#
|
### mgrote.sanoid
|
||||||
|
#sanoid_syncoid_datasets_sync:
|
||||||
|
# - source_host: pve5.grote.lan
|
||||||
|
# source_dataset: hdd_data_raidz/tmp
|
||||||
|
# destination_mount_check: hdd_data_raidz/tmp # zielpool
|
||||||
|
# destination_dataset: backup/pve5/tmp
|
||||||
|
|
77
roles/mgrote.zfs_sanoid/tasks/destination.yml
Normal file
77
roles/mgrote.zfs_sanoid/tasks/destination.yml
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
---
|
||||||
|
- name: template ssh private key
|
||||||
|
become: true
|
||||||
|
ansible.builtin.copy:
|
||||||
|
content: "{{ sanoid_syncoid_ssh_privkey }}"
|
||||||
|
dest: "/etc/sanoid/.ssh/id_sanoid"
|
||||||
|
owner: "{{ sanoid_user }}"
|
||||||
|
group: "{{ sanoid_user_group }}"
|
||||||
|
mode: 0400
|
||||||
|
no_log: true
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_destination_host
|
||||||
|
|
||||||
|
- name: add user to sudoers
|
||||||
|
become: true
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
path: /etc/sudoers
|
||||||
|
state: present
|
||||||
|
block: |
|
||||||
|
{{ sanoid_user }} ALL=(ALL) NOPASSWD:ALL
|
||||||
|
validate: '/usr/sbin/visudo -cf %s'
|
||||||
|
backup: yes
|
||||||
|
marker_begin: sanoid-sudoers BEGIN
|
||||||
|
marker_end: sanoid-sudoers END
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_destination_host
|
||||||
|
|
||||||
|
- name: template syncoid.service
|
||||||
|
become: yes
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "syncoid.service.j2"
|
||||||
|
dest: /etc/systemd/system/syncoid.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
notify:
|
||||||
|
- systemctl daemon-reload
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_destination_host
|
||||||
|
|
||||||
|
- name: template syncoid_mail.service
|
||||||
|
become: yes
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "syncoid_mail.service.j2"
|
||||||
|
dest: /etc/systemd/system/syncoid_mail.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
notify:
|
||||||
|
- systemctl daemon-reload
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_destination_host
|
||||||
|
|
||||||
|
- name: template syncoid.timer
|
||||||
|
become: yes
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "syncoid.timer.j2"
|
||||||
|
dest: "/etc/systemd/system/syncoid.timer"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
notify:
|
||||||
|
- systemctl daemon-reload
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_destination_host
|
||||||
|
|
||||||
|
- name: enable syncoid.timer
|
||||||
|
become: yes
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: "syncoid.timer"
|
||||||
|
enabled: yes
|
||||||
|
masked: no
|
||||||
|
state: started
|
||||||
|
notify:
|
||||||
|
- systemctl daemon-reload
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_destination_host
|
|
@ -1,66 +1,49 @@
|
||||||
---
|
---
|
||||||
- name: install packages
|
- name: include user tasks
|
||||||
|
include_tasks: user.yml
|
||||||
|
|
||||||
|
- name: install packages from repo
|
||||||
become: true
|
become: true
|
||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
name:
|
name:
|
||||||
- sanoid
|
- mbuffer
|
||||||
|
- lzop
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
- name: Create Sanoid Directory
|
- name: install packages from self-build
|
||||||
|
become: true
|
||||||
|
ansible.builtin.apt:
|
||||||
|
deb: "{{ sanoid_deb_url }}"
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: create sanoid directories
|
||||||
become: true
|
become: true
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "/etc/sanoid"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: root
|
owner: "{{ sanoid_user }}"
|
||||||
group: root
|
group: "{{ sanoid_user_group }}"
|
||||||
recurse: true
|
mode: 0700
|
||||||
|
with_items:
|
||||||
|
- "/etc/sanoid"
|
||||||
|
- "/etc/sanoid/.ssh"
|
||||||
|
|
||||||
- name: Generate Sanoid Configuration
|
- name: include snaps tasks
|
||||||
become: true
|
include_tasks: snaps.yml
|
||||||
ansible.builtin.template:
|
when:
|
||||||
src: sanoid.conf.j2
|
- sanoid_datasets is defined
|
||||||
dest: "/etc/sanoid/sanoid.conf"
|
- sanoid_templates is defined
|
||||||
owner: root
|
- sanoid_snaps_enable is defined
|
||||||
group: root
|
- sanoid_snaps_enable
|
||||||
mode: 0644
|
|
||||||
when: sanoid_datasets is defined and sanoid_templates is defined
|
|
||||||
|
|
||||||
- name: template sanoid_mail.service
|
- name: include source-host tasks
|
||||||
become: yes
|
include_tasks: source.yml
|
||||||
ansible.builtin.template:
|
when:
|
||||||
src: "sanoid_mail.service.j2"
|
- sanoid_syncoid_source_host is defined and sanoid_syncoid_source_host is true
|
||||||
dest: /etc/systemd/system/sanoid_mail.service
|
- sanoid_syncoid_ssh_pubkey is defined
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0644
|
|
||||||
notify:
|
|
||||||
- systemctl daemon-reload
|
|
||||||
|
|
||||||
- name: add sanoid_mail.service to sanoid.service
|
- name: include destination-host tasks
|
||||||
become: true
|
include_tasks: destination.yml
|
||||||
ansible.builtin.blockinfile:
|
when:
|
||||||
create: yes
|
- sanoid_syncoid_destination_host is defined and sanoid_syncoid_destination_host is true
|
||||||
mode: 0644
|
- sanoid_syncoid_ssh_privkey is defined
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
path: /lib/systemd/system/sanoid.service.d/override.conf
|
|
||||||
block: |
|
|
||||||
[Unit]
|
|
||||||
OnFailure = sanoid_mail.service
|
|
||||||
notify:
|
|
||||||
- systemctl daemon-reload
|
|
||||||
|
|
||||||
- name: set timer
|
|
||||||
become: true
|
|
||||||
ansible.builtin.blockinfile:
|
|
||||||
create: yes
|
|
||||||
mode: 0644
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
path: /lib/systemd/system/sanoid.timer.d/override.conf
|
|
||||||
block: |
|
|
||||||
[Timer]
|
|
||||||
OnCalendar = {{ sanoid_timer }}
|
|
||||||
when: sanoid_timer is defined
|
|
||||||
notify:
|
|
||||||
- systemctl daemon-reload
|
|
||||||
|
|
49
roles/mgrote.zfs_sanoid/tasks/snaps.yml
Normal file
49
roles/mgrote.zfs_sanoid/tasks/snaps.yml
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
---
|
||||||
|
- name: Generate Sanoid Configuration
|
||||||
|
become: true
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: sanoid.conf.j2
|
||||||
|
dest: "/etc/sanoid/sanoid.conf"
|
||||||
|
owner: "{{ sanoid_user }}"
|
||||||
|
group: "{{ sanoid_user_group }}"
|
||||||
|
mode: 0400
|
||||||
|
|
||||||
|
- name: template sanoid_mail.service
|
||||||
|
become: yes
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "sanoid_mail.service.j2"
|
||||||
|
dest: /etc/systemd/system/sanoid_mail.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
notify:
|
||||||
|
- systemctl daemon-reload
|
||||||
|
|
||||||
|
- name: add sanoid_mail.service to sanoid.service
|
||||||
|
become: true
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
create: yes
|
||||||
|
mode: 0644
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
path: /lib/systemd/system/sanoid.service.d/override.conf
|
||||||
|
block: |
|
||||||
|
[Unit]
|
||||||
|
OnFailure = sanoid_mail.service
|
||||||
|
notify:
|
||||||
|
- systemctl daemon-reload
|
||||||
|
|
||||||
|
- name: set timer
|
||||||
|
become: true
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
create: yes
|
||||||
|
mode: 0644
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
path: /lib/systemd/system/sanoid.timer.d/override.conf
|
||||||
|
block: |
|
||||||
|
[Timer]
|
||||||
|
OnCalendar = {{ sanoid_timer }}
|
||||||
|
when: sanoid_timer is defined
|
||||||
|
notify:
|
||||||
|
- systemctl daemon-reload
|
23
roles/mgrote.zfs_sanoid/tasks/source.yml
Normal file
23
roles/mgrote.zfs_sanoid/tasks/source.yml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
- name: template ssh public key
|
||||||
|
become: true
|
||||||
|
ansible.posix.authorized_key:
|
||||||
|
user: "{{ sanoid_user }}"
|
||||||
|
key: "{{ sanoid_syncoid_ssh_pubkey }}"
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_source_host
|
||||||
|
|
||||||
|
- name: add user to sudoers
|
||||||
|
become: true
|
||||||
|
ansible.builtin.blockinfile:
|
||||||
|
path: /etc/sudoers
|
||||||
|
state: present
|
||||||
|
block: |
|
||||||
|
{{ sanoid_user }} ALL=(ALL) NOPASSWD:ALL
|
||||||
|
validate: '/usr/sbin/visudo -cf %s'
|
||||||
|
backup: yes
|
||||||
|
marker_begin: sanoid-sudoers BEGIN
|
||||||
|
marker_end: sanoid-sudoers END
|
||||||
|
when:
|
||||||
|
- sanoid_syncoid_source_host
|
19
roles/mgrote.zfs_sanoid/tasks/user.yml
Normal file
19
roles/mgrote.zfs_sanoid/tasks/user.yml
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
- name: ensure group exists
|
||||||
|
become: true
|
||||||
|
ansible.builtin.group:
|
||||||
|
name: "{{ sanoid_user_group }}"
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- sanoid_user_group is defined
|
||||||
|
- sanoid_user is defined
|
||||||
|
|
||||||
|
- name: ensure user exists
|
||||||
|
become: true
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ sanoid_user }}"
|
||||||
|
group: "{{ sanoid_user_group }}"
|
||||||
|
create_home: yes
|
||||||
|
when:
|
||||||
|
- sanoid_user_group is defined
|
||||||
|
- sanoid_user is defined
|
15
roles/mgrote.zfs_sanoid/templates/syncoid.service.j2
Normal file
15
roles/mgrote.zfs_sanoid/templates/syncoid.service.j2
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
{{ file_header | default () }}
|
||||||
|
|
||||||
|
[Unit]
|
||||||
|
Description=Send zfs snapshots with sanoid/syncoid.
|
||||||
|
OnFailure=syncoid_mail.service
|
||||||
|
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
# check if dest-dataset is mounted (sed: entferne 1. Zeile; awk: zeige nur yes/no; grep: RC1 when != yes)
|
||||||
|
{% for item in sanoid_syncoid_datasets_sync %}
|
||||||
|
ExecStart=/bin/sh -c '/usr/sbin/zfs get mounted {{ item.destination_mount_check }} | sed 1d | awk "{print $3}" | grep yes'
|
||||||
|
# syncoid
|
||||||
|
ExecStart=/usr/bin/syncoid --sshoption=StrictHostKeyChecking=no --delete-target-snapshots --use-hold --preserve-recordsize --sshkey "/etc/sanoid/.ssh/id_sanoid" --source-bwlimit {{ sanoid_syncoid_bwlimit }} {{ sanoid_user }}@{{ item.source_host }}:{{ item.source_dataset }} {{ item.destination_dataset }}
|
||||||
|
{% endfor %}
|
9
roles/mgrote.zfs_sanoid/templates/syncoid.timer.j2
Normal file
9
roles/mgrote.zfs_sanoid/templates/syncoid.timer.j2
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
{{ file_header | default () }}
|
||||||
|
[Unit]
|
||||||
|
Description=Timer for syncoid.
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnCalendar={{ sanoid_syncoid_timer }}
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target multi-user.target zfs.target
|
|
@ -0,0 +1,8 @@
|
||||||
|
{{ file_header | default () }}
|
||||||
|
|
||||||
|
[Unit]
|
||||||
|
Description=Send a Mail in case of an error in sanoid.service.
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
ExecStart=/bin/bash -c '/bin/systemctl status syncoid.service | mail -s "[ERROR] syncoid - %H" {{ empfaenger_mail }}'
|
Loading…
Reference in a new issue