fileserver/zfs: jeder share als eigenes dataset (#212)

Co-authored-by: Michael Grote <michael.grote@posteo.de>
Reviewed-on: mg/ansible#212
Co-authored-by: mg <mg@noreply.git.mgrote.net>
Co-committed-by: mg <mg@noreply.git.mgrote.net>
This commit is contained in:
Michael Grote 2021-10-09 20:23:23 +02:00
parent edc60f173f
commit 71aa41cb24
16 changed files with 222 additions and 88 deletions

View file

@ -66,7 +66,7 @@
### mgrote.restic
restic_folders_to_backup: "/usr/local /etc /root /home"
restic_cron_hours: "19"
restic_repository: "//fileserver2.grote.lan/backup/restic"
restic_repository: "//fileserver2.grote.lan/restic"
restic_repository_password: "{{ lookup('keepass', 'restic_repository_password', 'password') }}"
restic_mount: "/mnt/restic"
restic_mount_user: restic
@ -142,6 +142,7 @@
groups: root, docker
servers:
- production
- test
- username: root
password: "{{ lookup('keepass', 'root_linux_password_hash_proxmox', 'password') }}"
update_password: always

View file

@ -7,8 +7,7 @@
pip_install_packages:
- name: docker # für munin-plugin docker_
### mgrote.restic
restic_folders_to_backup: /usr/local /etc /root /home /var/lib/docker
restic_cron_hours: "*/4"
restic_cron_hours: "*/6"
restic_exclude: |
._*
desktop.ini
@ -18,7 +17,6 @@
**/**AppData***/**
/var/lib/docker/volumes/***Musik***
/var/lib/docker/volumes/***musik***
/var/lib/docker/volumes/docker-photoprism_pp_smb_bilder***/**
# https://github.com/restic/restic/issues/1005
# https://forum.restic.net/t/exclude-syntax-confusion/1531/12
### geerlingguy.munin-node

View file

@ -22,7 +22,7 @@
## playbook
pihole_homer_fqdn: docker.grote.lan # unter welchem host ist docker erreichbar? notwendig für die pihole stats in homer; fur die cors abfrage
### mgrote.restic
restic_repository: "//192.168.2.36/backup/restic" # weil pihole den fqdn nicht auflösen kann
restic_repository: "//192.168.2.36/restic"
### mgrote.ntp_chrony_server
ntp_chrony_servers: # weil pihole den fqdn nicht auflösen kann
- address: pool.ntp.org

View file

@ -7,8 +7,6 @@
smart_smartctlmail_cron_minutes: "15"
smart_smartctlmail_cron_hours: "6"
smart_smartctlmail_cron_weekday: "3"
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_max_usage_health: "80"
### geerlingguy.munin-node
munin_node_disabled_plugins:
- name: meminfo # zu hohe last

View file

@ -13,12 +13,6 @@
repository_user: mg
repository_user_password: "{{ lookup('keepass', 'gitea_mg_https_password', 'password') }}"
state: present
- name: munin-master
dir_name: docker-munin-master
repository_url: git.mgrote.net/mg/docker-munin-master
repository_user: mg
repository_user_password: "{{ lookup('keepass', 'gitea_mg_https_password', 'password') }}"
state: absent
- name: lazydocker
dir_name: docker-lazydocker
repository_url: git.mgrote.net/mg/docker-lazydocker
@ -45,3 +39,6 @@
protocol: tcp
comment: 'munin'
from_ip: 0.0.0.0/0
### mgrote.restic
restic_folders_to_backup: /usr/local /etc /root /home

View file

@ -144,3 +144,6 @@
env.enable_xfer_radio no
# Show detailed graphs for each radio
env.enable_detail_xfer_radio no
### mgrote.restic
restic_folders_to_backup: /usr/local /etc /root /home /var/lib/docker

View file

@ -30,3 +30,6 @@
dir_name: docker-watchtower
repository_url: git.mgrote.net/mg/docker-watchtower
state: present
### mgrote.restic
restic_folders_to_backup: /usr/local /etc /root /home /var/lib/docker

View file

@ -9,9 +9,12 @@
dir_name: docker-ansible-ara
repository_url: git.mgrote.net/mg/docker-ansible-ara
state: present
- name: photoprism
- name: photoprism # wird der container woanders hin verschoben restic ausnahmen wieder eintrage, oder /var/lib/docker aus restic entfernen
dir_name: docker-photoprism
repository_url: git.mgrote.net/mg/docker-photoprism
state: present
repository_user: mg
repository_user_password: "{{ lookup('keepass', 'gitea_mg_https_password', 'password') }}"
### mgrote.restic
restic_folders_to_backup: /usr/local /etc /root /home

View file

@ -0,0 +1,129 @@
---
# Bind Mounts - fileserver
# wird ohne bind-mounts betrieben
### mgrote.smb_fileserver
smb_users:
- name: 'annemariedroessler2'
password: "{{ lookup('keepass', 'fileserver_smb_user_amd', 'password') }}"
- name: 'restic'
password: "{{ lookup('keepass', 'fileserver_smb_user_restic', 'password') }}"
- name: 'win10'
password: "{{ lookup('keepass', 'fileserver_smb_user_win10', 'password') }}"
- name: 'kodi'
password: "{{ lookup('keepass', 'fileserver_smb_user_kodi', 'password') }}"
- name: 'michaelgrote'
password: "{{ lookup('keepass', 'fileserver_smb_user_mg', 'password') }}"
- name: 'navidrome'
password: "{{ lookup('keepass', 'fileserver_smb_user_navidrome', 'password') }}"
- name: 'docker'
password: "{{ lookup('keepass', 'fileserver_smb_user_docker', 'password') }}"
- name: 'pve'
password: "{{ lookup('keepass', 'fileserver_smb_user_pve', 'password') }}"
- name: 'brother_ads2700w'
password: "{{ lookup('keepass', 'fileserver_smb_user_brother_ads2700w', 'password') }}"
- name: 'photoprism'
password: "{{ lookup('keepass', 'fileserver_smb_user_photoprism', 'password') }}"
smb_shares:
- name: 'videos'
path: '/shares_videos'
users_ro: 'kodi'
users_rw: 'annemariedroessler2 michaelgrote win10'
- name: 'scans'
path: '/shares_scans'
users_ro: 'annemariedroessler2 michaelgrote'
users_rw: 'brother_ads2700w'
- name: 'papa_backup'
path: '/shares_papa_backup'
users_ro: 'michaelgrote'
users_rw: 'win10'
- name: 'amd'
path: '/shares_amd'
users_ro: 'michaelgrote win10'
users_rw: 'annemariedroessler2'
- name: 'backup'
path: '/shares_backup'
users_ro: 'annemariedroessler2'
users_rw: 'win10 michaelgrote'
- name: 'archiv'
path: '/shares_archiv'
users_ro: ''
users_rw: 'michaelgrote win10'
- name: 'hm'
path: '/shares_hm'
users_ro: 'win10'
users_rw: 'michaelgrote'
- name: 'mg'
path: '/shares_data_crypt'
users_ro: ''
users_rw: 'win10 michaelgrote'
- name: 'musik'
path: '/shares_music'
users_ro: 'navidrome kodi annemariedroessler2 '
users_rw: 'win10 michaelgrote'
- name: 'tmp'
path: '/shares_tmp'
users_ro: 'win10'
users_rw: 'kodi annemariedroessler2 win10 michaelgrote'
- name: 'bilder'
path: '/shares_bilder'
users_ro: 'photoprism'
users_rw: 'annemariedroessler2 michaelgrote win10'
- name: 'proxmox'
path: '/shares_pve_backup'
users_ro: 'michaelgrote'
users_rw: 'pve win10'
- name: 'restic'
path: '/shares_restic'
users_ro: ''
users_rw: 'annemariedroessler2 restic win10 michaelgrote'
smb_workgroup: WORKGROUP
smb_min_protocol: "SMB2"
smb_client_min_protocol: "SMB2"
smb_client_max_protocol: "SMB3_11"
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 445
comment: 'smb'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 139
comment: 'smb'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.144/24
### geerlingguy.munin-node
munin_node_disabled_plugins:
- name: meminfo # zu hohe last
- name: hddtemp2 # ersetzt durch hddtemp_smartctl
- name: ntp # verursacht zu viele dns ptr request
- name: hddtempd # ersetzt durch hddtemp_smartctl
- name: ipmi_power # für pve2, leeres diagramm
- name: lvm_
- name: samba_locked
- name: samba_users
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/chrony
- name: systemd_status
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/systemd_status
- name: samba
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/samba
config: |
[samba]
user root
group root
env.smbstatus /usr/bin/smbstatus
env.ignoreipcshare 1

View file

@ -1,20 +1,18 @@
---
# Bind Mounts - fileserver
# pct set 109 -mp0 /hdd_data_raidz/videos,mp=/shares_videos
# pct set 109 -mp1 /hdd_data_raidz/data_crypt,mp=/shares
# pct set 109 -mp2 /hdd_data_raidz/pve_backup,mp=/shares_pve_backup
# pct set 109 -mp3 /hdd_data_raidz/papa_backup,mp=/shares_papa_backup
# pct set 109 -mp4 /hdd_data_raidz/music,mp=/shares_music
# pct set 109 -mp5 /hdd_data_raidz/tmp,mp=/shares_tmp
# pct set 109 -mp6 /hdd_data_raidz/archiv,mp=/shares_archiv
# Bind Mounts - fileserver-test
# pct set 158 -mp0 /rpool/vm/dir/vm-158/videos,mp=/shares_videos
# pct set 158 -mp1 /rpool/vm/dir/vm-158/data,mp=/shares
# pct set 158 -mp2 /rpool/vm/dir/vm-158/proxmox,mp=/shares_pve_backup
# pct set 158 -mp3 /rpool/vm/dir/vm-158/papa,mp=/shares_papa_backup
# pct set 158 -mp4 /rpool/vm/dir/vm-158/music,mp=/shares_music
# pct set 158 -mp5 /rpool/vm/dir/vm-158/tmp,mp=/shares_tmp
# pct set 158 -mp6 /rpool/vm/dir/vm-158/archiv,mp=/shares_archiv
# pct set 127 -mp0 /hdd_data_raidz/videos,mp=/shares_videos
# pct set 127 -mp1 /hdd_data_raidz/data_crypt,mp=/shares_data_crypt
# pct set 127 -mp2 /hdd_data_raidz/pve_backup,mp=/shares_pve_backup
# pct set 127 -mp3 /hdd_data_raidz/papa_backup,mp=/shares_papa_backup
# pct set 127 -mp4 /hdd_data_raidz/music,mp=/shares_music
# pct set 127 -mp5 /hdd_data_raidz/tmp,mp=/shares_tmp
# pct set 127 -mp6 /hdd_data_raidz/archiv,mp=/shares_archiv
# pct set 127 -mp7 /hdd_data_raidz/bilder,mp=/shares_bilder
# pct set 127 -mp8 /hdd_data_raidz/hm,mp=/shares_hm
# pct set 127 -mp9 /hdd_data_raidz/scans,mp=/shares_scans
# pct set 127 -mp10 /hdd_data_raidz/restic,mp=/shares_restic
# pct set 127 -mp11 /hdd_data_raidz/amd,mp=/shares_amd
# pct set 127 -mp12 /hdd_data_raidz/backup,mp=/shares_backup
### mgrote.smb_fileserver
smb_users:
@ -45,31 +43,31 @@
users_ro: 'kodi'
users_rw: 'annemariedroessler2 michaelgrote win10'
- name: 'scans'
path: '/shares/scans'
path: '/shares_scans'
users_ro: 'annemariedroessler2 michaelgrote'
users_rw: 'brother_ads2700w'
- name: 'papa_backup'
path: '/shares_papa_backup'
users_ro: ''
users_rw: 'win10 michaelgrote'
users_ro: 'michaelgrote'
users_rw: 'win10'
- name: 'amd'
path: '/shares/amd'
users_ro: 'navidrome michaelgrote'
path: '/shares_amd'
users_ro: 'michaelgrote win10'
users_rw: 'annemariedroessler2 win10'
- name: 'backup'
path: '/shares/Backup'
users_ro: ''
users_rw: 'annemariedroessler2 restic win10 michaelgrote'
path: '/shares_backup'
users_ro: 'annemariedroessler2'
users_rw: 'win10 michaelgrote'
- name: 'archiv'
path: '/shares_archiv'
users_ro: ''
users_rw: 'michaelgrote win10'
- name: 'hm'
path: '/shares/hm'
path: '/shares_hm'
users_ro: 'win10'
users_rw: 'michaelgrote'
- name: 'mg'
path: '/shares/mg'
path: '/shares_data_crypt'
users_ro: ''
users_rw: 'win10 michaelgrote'
- name: 'musik'
@ -79,15 +77,19 @@
- name: 'tmp'
path: '/shares_tmp'
users_ro: 'win10'
users_rw: 'kodi annemariedroessler2 restic win10 michaelgrote'
users_rw: 'kodi annemariedroessler2 win10 michaelgrote'
- name: 'bilder'
path: '/shares/bilder'
path: '/shares_bilder'
users_ro: 'photoprism'
users_rw: 'annemariedroessler2 michaelgrote win10'
- name: 'proxmox'
path: '/shares_pve_backup'
users_ro: 'michaelgrote'
users_rw: 'pve win10'
- name: 'restic'
path: '/shares_restic'
users_ro: ''
users_rw: 'annemariedroessler2 restic win10 michaelgrote'
smb_workgroup: WORKGROUP
smb_min_protocol: "SMB2"
smb_client_min_protocol: "SMB2"

View file

@ -7,6 +7,10 @@
# zpool create -f -o ashift=12 rpool mirror ata-SAMSUNG_MZ7LH960HAJR-00005_S45NNC0R105094 /dev/disk/by-id/ata-SAMSUNG_MZ7LH960HAJR-00005_S45NNC0R105095
# HDD_DATA_RAIDZ
# zpool create -f -o ashift=12 hdd_data_raidz raidz /dev/disk/by-id/ata-WDC_WD80EZAZ-11TDBA0_2SG991TJ /dev/disk/by-id/ata-WDC_WD80EZAZ-11TDBA0_2SGA23EJ /dev/disk/by-id/ata-ST8000DM004-2CX188_ZCT1AK0F
### mgrote.zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: rpool/vm
state: present
@ -56,6 +60,7 @@
dnodesize: auto
atime: on
snapdir: hidden
recordsize: 1M
- dataset: hdd_data_raidz/videos
state: present
compression: lz4
@ -64,6 +69,7 @@
dnodesize: auto
atime: on
snapdir: hidden
recordsize: 1M
- dataset: hdd_data_raidz/music
state: present
compression: lz4
@ -96,66 +102,55 @@
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir/vm-158/data
- dataset: hdd_data_raidz/amd
state: present
compression: lz4
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir/vm-158/papa
- dataset: hdd_data_raidz/bilder
state: present
compression: lz4
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir/vm-158/proxmox
- dataset: hdd_data_raidz/hm
state: present
compression: lz4
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir/vm-158/videos
- dataset: hdd_data_raidz/scans
state: present
compression: lz4
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir/vm-158/music
- dataset: hdd_data_raidz/restic
state: present
compression: lz4
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir/vm-158/tmp
- dataset: hdd_data_raidz/backup
state: present
compression: lz4
compression: zstd
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir/vm-158/archiv
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "8589934592" # 8GB in Bytes
zfs_extra_max_usage_health: "90"
zfs_extra_zfs_pools:
- name: "rpool"
cron_minutes_zfs_scrub: "0"
@ -195,9 +190,32 @@
- path: 'hdd_data_raidz/pve_backup'
snapshots: false # deaktiviert sanoid für das dataset
- path: 'hdd_data_raidz/archiv'
template: '3monate'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: hdd_data_raidz/amd
recursive: 'no'
snapshots: true
template: '31tage'
- path: hdd_data_raidz/bilder
recursive: 'no'
snapshots: true
template: '14tage'
- path: hdd_data_raidz/hm
recursive: 'no'
snapshots: true
template: '14tage'
- path: hdd_data_raidz/scans
recursive: 'no'
snapshots: true
template: '3tage'
- path: hdd_data_raidz/backup
recursive: 'no'
snapshots: true
template: '31tage'
- path: hdd_data_raidz/restic
recursive: 'no'
snapshots: false
sanoid_templates:
- name: '31tage'
keep_hourly: '24' # Aufheben (Stunde)

View file

@ -7,7 +7,7 @@ all:
fileserver:
hosts:
fileserver2.grote.lan:
fileserver-test.grote.lan:
fileserver2-test.grote.lan:
pihole:
hosts:
pihole2-test.grote.lan:
@ -68,7 +68,6 @@ all:
test:
hosts:
dokuwiki-test.grote.lan:
fileserver-test.grote.lan:
acng-test.grote.lan:
ansible-test.grote.lan:
docker-test.grote.lan:
@ -77,3 +76,4 @@ all:
gitea-test.grote.lan:
pihole2-test.grote.lan:
ntp-server-test.grote.lan:
fileserver2-test.grote.lan:

View file

@ -2,8 +2,8 @@
restic_anzahl_versuche_backup: "3" # wie oft soll restic versuchen ein backup zu starten
restic_wartezeit: "60" # wartezeit zwischen den versuchen
restic_folders_to_backup: "/usr/local /etc /root /var/www /home" # welche ordner sollen gesichert werden
restic_cron_hours: "19" # zu welcher stunde soll das script gestartet werden(nibute wird aus dem hostnamen generiert)
restic_repository: "ANY.SMB.SHARE" # smb-share mit dem repository: z.B. "//fileserver2.grote.lan/backup/restic"
restic_cron_hours: "19" # zu welcher stunde soll das script gestartet werden(minute wird aus dem hostnamen generiert)
restic_repository: "ANY.SMB.SHARE" # smb-share mit dem repository: z.B. "//fileserver2.grote.lan/restic"
restic_repository_password: XXXXX # password für das repo
restic_mount: "/mnt/restic" # wohin soll das repo gemountet werden
restic_mount_user: restic # nutzer für den share/mount

View file

@ -1,4 +1,3 @@
---
zfs_extra_cron_minutes_zfs_health: "0,15,30,45"
zfs_extra_cron_hours_zfs_health: "*"
zfs_extra_max_usage_health: "80"

View file

@ -15,24 +15,6 @@ if [ "${condition}" ]; then
fi
# Capacity - Make sure pool capacities are below 80% for best performance. The
# percentage really depends on how large your volume is. If you have a 128GB
# SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can
# probably set the warning closer to 95%.
maxCapacity={{ zfs_extra_max_usage_health }}
if [ ${problems} -eq 0 ]; then
capacity=$(/sbin/zpool list -H -o capacity)
for line in ${capacity//%/}
do
if [ $line -ge $maxCapacity ]; then
emailSubject="$emailSubject - Capacity Exceeded"
problems=1
fi
done
fi
# Errors - Check the columns for READ, WRITE and CKSUM (checksum) drive errors
# on all volumes and all drives using "zpool status". If any non-zero errors

View file

@ -19,5 +19,6 @@
dnodesize: "{{ item.dnodesize | default('auto') }}"
atime: "{{ item.atime | default('off') }}"
snapdir: "{{ item.snapdir | default('hidden') }}"
recordsize: "{{ item.recordsize | default('128K') }}"
loop: "{{ zfs_datasets }}"
when: zfs_datasets is defined # führe nur aus wenn die Variable gesetzt ist