Abbau Proxmox Cluster (#191)

Co-authored-by: Michael Grote <michael.grote@posteo.de>
Reviewed-on: mg/ansible#191
Co-authored-by: mg <mg@noreply.git.mgrote.net>
Co-committed-by: mg <mg@noreply.git.mgrote.net>
This commit is contained in:
Michael Grote 2021-08-20 08:42:14 +02:00
parent 85405f8315
commit 137814c6ba
6 changed files with 1 additions and 213 deletions

View File

@ -21,7 +21,7 @@
local_path: "/home/mg/.config/flameshot/flameshot.ini"
- repo_path: "{{ dotfiles_repo_path}}/.ssh/config"
local_path: "/home/mg/.ssh/config"
- repo_path: "/home/mg/Nextcloud/Rest/ssh-keys/heimserver/private_key" # smylink in .ssh-dir
- repo_path: "/home/mg/Nextcloud/Rest/ssh-keys/heimserver/private_key" # symlink in .ssh-dir
local_path: "/home/mg/.ssh/private_key_heimserver"
dotfiles_dirs:
- path: /home/mg/.config/i3

View File

@ -1,49 +0,0 @@
---
### mgrote.zfs_manage_datasets
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: rpool/vm
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/zvol
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/qcow
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
cron_minutes_zfs_scrub: "15"
cron_hour_zfs_scrub: "23"
cron_day_of_month_zfs_scrub: "14"
cron_day_of_week_zfs_scrub: "*"
cron_month_zfs_scrub: "*/2"
### mgrote.apcupsd
apcupsd_nis_master: false
apcupsd_nis_master_hostname: pve2-test.grote.lan
apcupsd_slave_polltime: 360 #in Sekunden

View File

@ -1,50 +0,0 @@
---
### mgrote.zfs_manage_datasets
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: rpool/vm
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/zvol
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/qcow
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
cron_minutes_zfs_scrub: "20"
cron_hour_zfs_scrub: "23"
cron_day_of_month_zfs_scrub: "14"
cron_day_of_week_zfs_scrub: "*"
cron_month_zfs_scrub: "*/2"
### mgrote.apcupsd
apcupsd_nis_master: false
apcupsd_nis_master_hostname: pve2.grote.lan
### mgrote.r8152_kernel_module
r8152_module_needed: true

View File

@ -1,49 +0,0 @@
---
### mgrote.zfs_manage_datasets
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: rpool/vm
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/zvol
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/qcow
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "2147483648" # 2GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
cron_minutes_zfs_scrub: "25"
cron_hour_zfs_scrub: "23"
cron_day_of_month_zfs_scrub: "14"
cron_day_of_week_zfs_scrub: "*"
cron_month_zfs_scrub: "*/2"
### mgrote.apcupsd
apcupsd_nis_master: false
apcupsd_nis_master_hostname: pve2-test.grote.lan
apcupsd_slave_polltime: 360 #in Sekunden

View File

@ -1,50 +0,0 @@
---
### mgrote.zfs_manage_datasets
# rpool wird von pve bei installation erstellt wenn die installation mit zfs durchgeführt wird
zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: rpool/vm
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/dir
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/zvol
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
- dataset: rpool/vm/qcow
state: present
compression: lz4
sync: disabled
xattr: sa
dnodesize: auto
atime: on
snapdir: hidden
### mgrote.zfs_extra # Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "6442450944" # 6GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
cron_minutes_zfs_scrub: "30"
cron_hour_zfs_scrub: "23"
cron_day_of_month_zfs_scrub: "14"
cron_day_of_week_zfs_scrub: "*"
cron_month_zfs_scrub: "*/2"
### mgrote.apcupsd
apcupsd_nis_master: false
apcupsd_nis_master_hostname: pve2.grote.lan
### mgrote.r8152_kernel_module
r8152_module_needed: true

View File

@ -36,27 +36,17 @@ all:
proxmox:
hosts:
pve2-test.grote.lan:
pve3-test.grote.lan:
pve4-test.grote.lan:
pve2.grote.lan:
pve3.grote.lan:
pve4.grote.lan:
children:
proxmoxprod:
hosts:
pve2.grote.lan:
pve3.grote.lan:
pve4.grote.lan:
proxmoxtest:
hosts:
pve2-test.grote.lan:
pve3-test.grote.lan:
pve4-test.grote.lan:
physical:
hosts:
pve2.grote.lan:
pve3.grote.lan:
pve4.grote.lan:
gitea:
hosts:
gitea-test.grote.lan:
@ -69,8 +59,6 @@ all:
acng.grote.lan:
ansible2.grote.lan:
pve2.grote.lan:
pve3.grote.lan:
pve4.grote.lan:
gitea.grote.lan:
pihole2.grote.lan:
ntp-server.grote.lan:
@ -86,8 +74,6 @@ all:
docker-test.grote.lan:
vm-test2.grote.lan:
pve2-test.grote.lan:
pve3-test.grote.lan:
pve4-test.grote.lan:
gitea-test.grote.lan:
pihole2-test.grote.lan:
ntp-server-test.grote.lan: