Compare commits

..

14 Commits
master ... mfd

Author SHA1 Message Date
Michael Grote 5dc4fc0afc s 2023-11-25 13:56:28 +01:00
Michael Grote 49a3f3671d sonmderzeichen 2023-11-25 13:39:38 +01:00
Michael Grote a51f140b5b t 2023-11-25 13:35:03 +01:00
Michael Grote f98aa74606 syntax 2023-11-25 13:21:50 +01:00
Michael Grote ce6d199408 test 2023-11-25 13:20:38 +01:00
Michael Grote 64d4cddfda nach id 2023-11-25 13:17:36 +01:00
Michael Grote 84779c3f6e add files 2023-11-25 12:54:39 +01:00
Michael Grote 3261591cc7 a 2023-11-25 12:07:32 +01:00
Michael Grote ca6544c0f5 anderer 2023-11-25 12:06:12 +01:00
Michael Grote 5c658fe12a typo 2023-11-25 11:51:27 +01:00
Michael Grote c9a7de4a19 user 2023-11-25 11:50:31 +01:00
Michael Grote bb2ecaa937 port 2023-11-25 11:49:07 +01:00
Michael Grote 0641cb8af8 change cleanup 2023-11-25 11:46:28 +01:00
Michael Grote 14d518af25 oxi 2023-11-25 11:43:55 +01:00
149 changed files with 7654 additions and 2554 deletions

View File

@ -12,14 +12,15 @@ exclude_paths:
- .gitlab-ci.yml
- friedhof/
- playbooks/on-off
- roles/ansible-role-pip
- roles/ansible-role-bootstrap
- roles/ansible_role_ctop
- roles/ansible-role-docker
- roles/ansible-role-helm
- roles/ansible-role-nfs
- roles/ansible-role-unattended-upgrades
- roles/ansible-manage-lvm
- roles/ansible-ufw
- roles/ansible_role_gitea
- roles/ansible-role-postgresql
- roles/geerlingguy-ansible-role-pip
- roles/pyratlabs-ansible-role-k3s
- roles/robertdebock-ansible-role-bootstrap
- roles/gantsign-ansible-role-ctop
- roles/geerlingguy-ansible-role-docker
- roles/geerlingguy-ansible-role-helm
- roles/geerlingguy-ansible-role-nfs
- roles/hifis-net-ansible-role-unattended-upgrades
- roles/mrlesmithjr-ansible-manage-lvm
- roles/oefenweb-ansible-ufw
- roles/pandemonium1986-ansible-role-k9s
- roles/pyratlabs-ansible-role-gitea

25
.gitignore vendored
View File

@ -2,19 +2,16 @@
vault-pass.yml
id_ed25519
id_ed25519.pub
roles/ansible-role-pip
roles/ansible-role-k3s
roles/ansible-role-bootstrap
roles/ansible_role_ctop
roles/ansible-role-docker
roles/ansible-role-helm
roles/ansible-role-nfs
roles/ansible_role_gitea
roles/ansible-role-unattended-upgrades
roles/ansible-manage-lvm
roles/ansible-ufw
roles/geerlingguy-ansible-role-pip
roles/pyratlabs-ansible-role-k3s
roles/robertdebock-ansible-role-bootstrap
roles/gantsign-ansible-role-ctop
roles/geerlingguy-ansible-role-docker
roles/geerlingguy-ansible-role-helm
roles/geerlingguy-ansible-role-nfs
roles/hifis-net-ansible-role-unattended-upgrades
roles/mrlesmithjr-ansible-manage-lvm
roles/oefenweb-ansible-ufw
roles/pandemonium1986-ansible-role-k9s
roles/ansible_role_gitea
roles/pyratlabs-ansible-role-gitea
collections/
plugins/lookup/__pycache__/
roles/ansible-role-postgresql

View File

@ -1,18 +1,19 @@
---
kind: pipeline
type: docker
name: ansible-lint
depends_on:
- gitleaks
steps:
ansible-lint:
image: quay.io/ansible/creator-ee:v24.2.0
image: quay.io/ansible/creator-ee
commands:
- ansible-lint --version
- echo $${VAULTPASS} > ./vault-pass.yml # nach des Secret in Großschreibung
- ansible-galaxy install -r requirements.yaml
- echo $VAULT-PASS > ./vault-pass.yml # nach des Secret in Großschreibung
- ansible-galaxy install -r requirements.yml
- ansible-lint --force-color --format pep8
# https://woodpecker-ci.org/docs/usage/secrets#use-secrets-in-commands
secrets: [vaultpass]
when:
- event: [push, pull_request, cron]
evaluate: 'CI_COMMIT_AUTHOR_EMAIL != "renovate@mgrote.net"'
...
event:
exclude:
- tag
secret: [vault-pass] #dieses Secret darf verwendet werden

View File

@ -1,10 +1,13 @@
---
kind: pipeline
type: docker
name: gitleaks
steps:
gitleaks:
image: zricethezav/gitleaks:v8.18.2
image: zricethezav/gitleaks:latest
commands:
- gitleaks detect --no-git --verbose --source $CI_WORKSPACE
when:
- event: [push, pull_request, cron]
evaluate: 'CI_COMMIT_AUTHOR_EMAIL != "renovate@mgrote.net"'
...
event:
exclude:
- tag

View File

@ -4,7 +4,7 @@ nocows = 1
retry_files_enabled = False
roles_path = ./roles
lookup_plugins = ./plugins/lookup
collections_path = ./collections
collections_paths = ./collections
private_key_file = ./id_ed25519
vault_password_file = vault-pass.yml
gathering = smart

View File

@ -2,22 +2,19 @@ version: '3'
services:
httpd-registry:
container_name: "httpd-registry"
image: "registry.mgrote.net/httpd:latest"
image: httpd:bullseye
restart: always
volumes:
- uploads:/usr/local/apache2/htdocs/
- "{{ compose_dest_basedir }}/httpd/httpd.conf:/usr/local/apache2/conf/httpd.conf:ro"
ports:
- 3344:80
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
python-api-server:
container_name: httpd-api
image: "registry.mgrote.net/python-api-server:latest"
image: registry.mgrote.net/python-api-server:latest
restart: always
ports:
- "5040:5000"
@ -28,8 +25,10 @@ services:
# FLASK_APP: app # for debugging
MAX_CONTENT_LENGTH: 500
UPLOAD_DIRECTORY: /uploads
AUTH_TOKEN: "{{ lookup('keepass', 'httpd-api-server-token', 'password') }}"
AUTH_TOKEN: {{ lookup('keepass', 'httpd-api-server-token', 'password') }}
ENABLE_WEBSERVER: false
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
uploads:

View File

@ -1,33 +1,25 @@
version: '3.3'
services:
postfix:
image: "registry.mgrote.net/postfix:latest"
container_name: mail-relay
restart: always
ports:
- 1025:25
environment:
SMTP_SERVER: smtp.strato.de
SMTP_USERNAME: info@mgrote.net
SMTP_PASSWORD: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
SERVER_HOSTNAME: mgrote.net
# DEBUG: "yes" # as string not boolean
ALWAYS_ADD_MISSING_HEADERS: "no" # as string not boolean
# LOG_SUBJECT: "yes" # as string not boolean
INET_PROTOCOL: ipv4
SMTP_GENERIC_MAP: |
/nobody@lldap/ lldap@mgrote.net
/mg@pbs.localdomain/ pbs@mgrote.net
/root@pbs.localdomain/ pbs@mgrote.net
# rewrite FROM "nobody@lldap" to "lldap@mgrote.net"
# /.*/ würde alle absender adressen ersetzen
networks:
- mail-relay
healthcheck:
test: ["CMD", "sh", "-c", "echo 'EHLO localhost' | nc -w 1 127.0.0.1 25 | grep -q '220 '"]
interval: 30s
timeout: 10s
retries: 3
postfix:
image: registry.mgrote.net/postfix:master
container_name: mail-relay
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
ports:
- 1025:25
environment:
SMTP_SERVER: smtp.strato.de
SMTP_USERNAME: info@mgrote.net
SMTP_PASSWORD: {{ lookup('keepass', 'strato_smtp_password', 'password') }}
SERVER_HOSTNAME: mgrote.net
# DEBUG: "yes" # literal
ALWAYS_ADD_MISSING_HEADERS: "no" # literal
# LOG_SUBJECT: "yes" # literal
INET_PROTOCOL: ipv4
SMTP_GENERIC_MAP: "/.*/ info@mgrote.net"
networks:
- mail-relay
######## Networks ########
networks:

View File

@ -1,18 +1,17 @@
version: '3'
services:
######## Miniflux ########
miniflux:
container_name: "mf-frontend"
image: "ghcr.io/miniflux/miniflux:2.1.3"
container_name: "mf-app"
image: miniflux/miniflux:latest
restart: always
depends_on:
- mf-db16
- db
environment:
DATABASE_URL: "postgres://miniflux:{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}@mf-db16/miniflux?sslmode=disable"
DATABASE_URL: postgres://miniflux:{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}@mf-db/miniflux?sslmode=disable
RUN_MIGRATIONS: 1
# CREATE_ADMIN: 1
# ADMIN_USERNAME: adminmf
# ADMIN_PASSWORD: "{{ lookup('keepass', 'miniflux_admin_password', 'password') }}"
# ADMIN_PASSWORD: {{ lookup('keepass', 'miniflux_admin_password', 'password') }}
WORKER_POOL_SIZE: 10
POLLING_FREQUENCY: 10
CLEANUP_ARCHIVE_UNREAD_DAYS: -1
@ -21,8 +20,6 @@ services:
networks:
- intern
- traefik
healthcheck:
test: ["CMD", "/usr/bin/miniflux", "-healthcheck", "auto"]
labels:
traefik.http.routers.miniflux.rule: Host(`miniflux.mgrote.net`)
traefik.enable: true
@ -31,46 +28,48 @@ services:
traefik.http.routers.miniflux.entrypoints: entry_https
traefik.http.services.miniflux.loadbalancer.server.port: 8080
######## Postgres ########
mf-db16:
container_name: "mf-db16"
image: "postgres:16.3"
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: mf-db
db:
container_name: "mf-db"
image: postgres:13
restart: always
environment:
POSTGRES_USER: miniflux
POSTGRES_PASSWORD: "{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}"
POSTGRES_PASSWORD: {{ lookup('keepass', 'miniflux_postgres_password', 'password') }}
TZ: Europe/Berlin
POSTGRES_HOST_AUTH_METHOD: "md5" # Workaround beim Migration von 13 -> 16; https://eelkevdbos.medium.com/upgrade-postgresql-with-docker-compose-99d995e464 ;
volumes:
- db16:/var/lib/postgresql/data
- db:/var/lib/postgresql/data
networks:
- intern
healthcheck:
test: ["CMD", "pg_isready", "-U", "miniflux"]
interval: 10s
start_period: 30s
labels:
com.centurylinklabs.watchtower.enable: true
######## Miniflux-Filter ########
mf-filter:
miniflux-filter:
image: tborychowski/miniflux-filter:latest
container_name: mf-filter
depends_on:
- miniflux
restart: always
environment:
TZ: Europe/Berlin
MF_AUTH_TOKEN: "{{ lookup('keepass', 'miniflux_auth_token', 'password') }}"
MF_API_URL: https://miniflux.mgrote.net/v1
MF_SLEEP: 600
#MF_DEBUG: 1
image: "registry.mgrote.net/miniflux-filter:latest"
- TZ=Europe/Berlin
# if not present - there will be no auth
# - ADMIN_PASSWORD=admin1
# ERROR, WARNING, INFO, DEBUG
- LOG_LEVEL=DEBUG
ports:
- "5020:80"
volumes:
- ./filter.txt:/data/filter.txt
- filter:/var/www/html/store
networks:
- intern
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: miniflux
######## Volumes ########
volumes:
db16:
db:
filter:
######## Networks ########
networks:
traefik:

View File

@ -1,117 +1,97 @@
9to5linux.com::9to5Linux Weekly Roundup:
apnic.net::Podcast
apnic.net::Event Wrap
astralcodexten.substack.com::Open Thread
astralcodexten.substack.com::Book Review Contest
augengeradeaus.net::Sicherheitshalber der Podcast
axios.com::Axios on HBO
axios.com::football
axios.com::PGA
computerbase.de::Adrenalin 2020 Edition
computerbase.de::Adrenalin 2021 Edition
computerbase.de::CB-Funk
computerbase.de::CB-Funk-Podcast
computerbase.de::Community: Fotowettbewerb
computerbase.de::Community-Umfrage
computerbase.de::Community
computerbase.de::Elon Musk
computerbase.de::Fotowettbewerb:
computerbase.de::Fotowettbewerb
computerbase.de::Fussball
computerbase.de::Fußball
computerbase.de::Screenshot-Wettbewerb
computerbase.de::Sonntagsfrage:
computerbase.de::Twitter
computerbase.de::wettbewerb
computerbase.de::WM
computerbase.de::Wochenrück- und Ausblick:
computerbase.de::Xbox Game Pass
computerbase.de::GeForce
facebook.com::Bridge returned error
golem.de::Anzeige
computerbase.de::wettbewerb
golem.de::(g+)
golem.de::Aus dem Verlag:
golem.de::Elon Musk
golem.de::Fifa
golem.de::Fussball
golem.de::Fußball
golem.de::(g+)
golem.de::Golem Karrierewelt
www.thedrive.com::Bunker Talk:
golem.de::in aller Kürze
golem.de::In eigener Sache
golem.de::kurznews
golem.de::Marvel
golem.de::Podcast
golem.de::PODCAST BESSER WISSEN:
golem.de::Podcast
golem.de::Sonst noch was?
golem.de::Star Trek:
golem.de::Tech Talks:
golem.de::Twitter
golem.de::Wochenrückblick
golem.de::in aller Kürze
golem.de::kurznews
hardwareluxx.de::Der Hardwareluxx-Webwatch:
hardwareluxx.de::Die Artikel unserer Partner
hardwareluxx.de::Shopping Club
hardwareluxx.de::KW
hardwareluxx.de::Unsere Artikel der letzten Woche
heise.de::#TGIQF
heise.de::#heiseshow
heise.de::Anzeige
heise.de::Auslegungssache
heise.de::Bit-Rauschen
heise.de::Bit-Rauschen, der Prozessor-Podcast
heise.de::c't <webdev>
heise.de::ct-Webinar:
heise.de::Desinfec
heise.de::Die Bilder der Woche
heise.de::Die Bilder der Woche (KW
heise.de::Die Highlights bei
heise.de::Die Produktwerker
heise.de::Dienstag
heise.de::Dienstag:
heise.de::Die Produktwerker
heise.de::Elon Musk
heise.de::Ferris Talk
heise.de::FIFA
heise.de::Ferris Talk
heise.de::Freitag
heise.de::Fußball-WM
heise.de::heise+
heise.de::heise-Angebot:
heise.de::heise Jobs IT Tag
heise.de::Heise-Konferenz
heise.de::heise meets
heise.de::heise meets…
heise.de::#heiseshow
heise.de::heiseshow
heise.de::Heise spielt
heise.de::iX-Workshop
heise.de::Heise-Konferenz
heise.de::Kurz informiert:
heise.de::Mac & i Extra:
heise.de::Missing Link
heise.de::Mittwoch
heise.de::Montag
heise.de::Die Bilder der Woche
heise.de::Podcast
heise.de::Podcast "Die Hupe"
heise.de::Podcast "Die Produktwerker"
heise.de::samstag
heise.de::SoftwareArchitekTOUR
heise.de::Sonderheft
heise.de::sonntag
heise.de::t 3003
heise.de::TGIQF
heise.de::Tech2go-Podcast:
heise.de::TechStage
heise.de::TechStage |
heise.de::Twitter
heise.de::WM 2022
heise.de::Was war. Was wird.
heise.de::Zugriff auf alle Inhalte von heise+
heise.de::c't <webdev>
heise.de::ct-Webinar:
heise.de::heise Jobs IT Tag
heise.de::heise meets
heise.de::heise meets…
heise.de::heise+
heise.de::heise-Angebot:
heise.de::heiseshow
heise.de::iX-Workshop
heise.de::samstag
heise.de::sonntag
heise.de::t 3003
heise.de::t Fotografie
heise.de::t Fotografie-Wettbewerb
heise.de::#TGIQF
heise.de::TGIQF
heise.de::t uplink
heise.de::Twitter
heise.de::t zockt
heise.de::uplink
heise.de::Was war. Was wird.
heise.de::WM 2022
heise.de::zockt
heise.de::Zugriff auf alle Inhalte von heise+
instagram.com::Bridge returned error
ipspace.net::Built.fm
ipspace.net::Podcast
mdr.de::Schwimm-WM
mdr.de::DSV
mdr.de::#MDRklärt:
mdr.de::Basketball
mdr.de::Volleyball
mdr.de::DFB
mdr.de::DFB-Pokal
mdr.de::Fussball
@ -121,15 +101,13 @@ mdr.de::Leichtathletik:
mdr.de::Link des Audios
mdr.de::Link des Videos
mdr.de::Livestream
mdr.de::#MDRklärt:
mdr.de::Para-WM
mdr.de::Pferdesport:
mdr.de::Podcast:
mdr.de::Podcast "digital Leben"
mdr.de::Podcast "digital Leben":
mdr.de::Podcast:
mdr.de::Podcastserie
mdr.de::Schwimmen:
falseknees.tumblr.com::Kneesvember
monkeyuser.com::AdLitteram
netzpolitik.org::KW
netzpolitik.org::NPP
@ -148,11 +126,10 @@ planet3dnow.de::KiTTY
planet3dnow.de::LibreOffice 7
planet3dnow.de::MC Extractor
planet3dnow.de::Media Player Classic
planet3dnow.de::NVCleanstall v
planet3dnow.de::Neue Downloads der KW
planet3dnow.de::Notepad++
planet3dnow.de::NVCleanstall v
planet3dnow.de::Nvidia GeForce-Treiber
planet3dnow.de::paint.net
planet3dnow.de::PowerToys v
planet3dnow.de::Prime95
planet3dnow.de::Process Lasso
@ -168,16 +145,355 @@ planet3dnow.de::Universal Media Server
planet3dnow.de::WinRAR
planet3dnow.de::WinSCP
planet3dnow.de::ZenTimings
planet3dnow.de::paint.net
portuguesegeese.com::portuguesegeese.com
reddit.com::UEFA
stackoverflow.blog::Podcast
stackoverflow.blog::The Overflow
stadt-bremerhaven.de::(Werbung)
stadt-bremerhaven.de::Basketball-WM
stadt-bremerhaven.de::Bundesliga
stadt-bremerhaven.de::Cloud-Gaming-Rückblick
stadt-bremerhaven.de::DAZN
stadt-bremerhaven.de::Disney+
stadt-bremerhaven.de::Eishockey
stadt-bremerhaven.de::Elon Musk
stadt-bremerhaven.de::FIFA
stadt-bremerhaven.de::FUSSBALL
stadt-bremerhaven.de::Formel 1
stadt-bremerhaven.de::Immer wieder sonntags KW
stadt-bremerhaven.de::MagentaSport
stadt-bremerhaven.de::Podcast
stadt-bremerhaven.de::Rückblick
stadt-bremerhaven.de::Sky Ticket
stadt-bremerhaven.de::Twitter
stadt-bremerhaven.de::WM 2022
stadt-bremerhaven.de::eFootball
tagesschau.de::11KM
tagesschau.de::11KM-Podcast
tagesschau.de::Achtelfinale
tagesschau.de::Alpine-Super-Kombination:
tagesschau.de::American Football:
tagesschau.de::Auslandspodcast
tagesschau.de::BVB
tagesschau.de::Bahnrad
tagesschau.de::Basketball
tagesschau.de::Bayern München
@ -186,19 +502,18 @@ tagesschau.de::Boateng
tagesschau.de::Bremer SV
tagesschau.de::Bundesliga
tagesschau.de::Bundesliga:
tagesschau.de::BVB
tagesschau.de::Carlsen
tagesschau.de::Champions League
tagesschau.de::Darts-
tagesschau.de::Darts-WM:
tagesschau.de::DFB
tagesschau.de::DFB-Bundesgericht
tagesschau.de::Darts-
tagesschau.de::Darts-WM:
tagesschau.de::Dressurreit
tagesschau.de::Eintracht
tagesschau.de::Eishockey:
tagesschau.de::Eishockey-WM:
tagesschau.de::EM:
tagesschau.de::ESC-Finale
tagesschau.de::Eintracht
tagesschau.de::Eishockey-WM:
tagesschau.de::Eishockey:
tagesschau.de::European Championships
tagesschau.de::Eurovision Song Contest
tagesschau.de::Fashion Week
@ -211,8 +526,8 @@ tagesschau.de::Fußball-Bundesliga
tagesschau.de::Fußball-EM
tagesschau.de::Gladbach
tagesschau.de::Halbfinale
tagesschau.de::Handball:
tagesschau.de::Handball-EM:
tagesschau.de::Handball:
tagesschau.de::Hertha BSC
tagesschau.de::Hockey
tagesschau.de::Hoffenheim
@ -234,23 +549,23 @@ tagesschau.de::Neymar
tagesschau.de::Nordische Kombination:
tagesschau.de::Olympia-
tagesschau.de::Olympia:
tagesschau.de::Olympischen Winterspiele:
tagesschau.de::Olympische Winterspiele:
tagesschau.de::Olympischen Winterspiele:
tagesschau.de::Paralympics
tagesschau.de::Podcast
tagesschau.de::Podcast 11KM
tagesschau.de::Profisport
tagesschau.de::RKI meldet
tagesschau.de::Remis
tagesschau.de::Riesenslalom:
tagesschau.de::RKI meldet
tagesschau.de::Rodel-
tagesschau.de::Schach-WM:
tagesschau.de::Schalke
tagesschau.de::Schwimm-EM
tagesschau.de::Schwimm-WM:
tagesschau.de::Ski Alpin:
tagesschau.de::Skispring
tagesschau.de::Ski-WM
tagesschau.de::Skispring
tagesschau.de::Sondersendung:
tagesschau.de::Springreiter
tagesschau.de::Sprintstaffel
@ -265,25 +580,20 @@ tagesschau.de::Viererbob
tagesschau.de::Vierschanzentournee
tagesschau.de::Viertelfinale
tagesschau.de::Volleyball-WM
tagesschau.de::WM-Auftakt
tagesschau.de::WM-Finale:
tagesschau.de::WM-Gold
tagesschau.de::WM-Qualifikation
tagesschau.de::WM-Viertelfinale
tagesschau.de::Wasserspringen:
tagesschau.de::Weitsprung
tagesschau.de::Weltcup
tagesschau.de::Weltcup-Sieg
tagesschau.de::Weltmeister
tagesschau.de::Werders
tagesschau.de::WM-Auftakt
tagesschau.de::WM-Finale:
tagesschau.de::WM-Gold
tagesschau.de::WM-Qualifikation
tagesschau.de::WM-Viertelfinale
tagesschau.de::Zukunftspodcast
tagesschau.de::Zweierbob:
theguardian.com::Guardiola
theguardian.com::Manchester United
theycantalk.com::Tinyview
toonhole.com::Bernai
www.army-technology.com::who are the leaders
www.army-technology.com::files patent
www.army-technology.com::sees highest patent filings
www.army-technology.com::theme innovation strategy
www.army-technology.com::gets grant

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,122 @@
221 Mr. Lovenstein
365 Otterly Human
344 Recent Writing - Mark Story
285 The New Stack
208 CommitStrip
346 ADHDinos
325 NetworkProfile.org
272 Richtersicht
287 top scoring links : SCPDeclassified
302 1lumen.com
317 Anonleaks
357 apenwarr
321 Astral Codex Ten
304 Austin's Nerdy Things
243 Axios
244 bit-booster
382 Blog on Dalton Durst
326 bundeswehr-journal
375 Burg Theater - Aktuelles
195 But a Jape
380 but she's a girl...
245 Cabin Porn
206 Channelate
322 Chefkoch.de: Rezepte des Tages
247 Chris Custom Works
248 Colossal
209 Danby Draws Comics
315 Dave's Tech Reviews
249 DevOps Dudes - Medium
250 Die Gewissensfrage - SZ-Magazin
376 Discord Blog
300 Dwarf Fortress Development Log
251 EleFacts
309 Everyday Commentary
253 Experiencing Technology
360 Explosm.net
343 extrafabulouscomics
213 Extra Ordinary
354 False Knees
254 Fefes Blog
255 Finanztip.de
390 Freifall Xpress
371 GoodBearComics©2021
369 Graphography
303 Grizzly's Reviews
215 Invisible Bread
368 IPng Networks
389 irq5.io
216 Jake Likes Onions
348 Janes news RSS
308 Jorge Israel Peña
257 Josef Adamčík
217 Joy of Tech (RSS Feed)
258 Keyboard Builders' Digest
386 Knot Handcrafted Knives
336 kofler.info
335 Krebs on Security
260 Last Place Comics
297 Last Place Comics
261 law blog
262 Lexikaliker
263 lxkr
219 Mandatory Roller Coaster
264 Masterzen&#39;s Blog
352 Michael Stapelbergs Website
265 MikroTik blog
220 Moonbeard
333 Munin Plugin Gallery
222 Nedroid Picture Diary 2
268 Netflix TechBlog - Medium
269 Netzfrequenz.info
318 News Desk: Breaking News, Reporting, and Political Analysis
270 Outdoormesser.net
345 Pid Eins
363 Pizza Cake Comics
223 Poorly Drawn Lines
PortugueseGeese
305 Posteo.de - Aktuelles
307 Posts on YetiOps
381 Posts on λ ryan. himmelwright. net
329 Recent Commits to contrib:master
331 Recent Commits to xdripswift:master
353 Release notes from sanoid
330 Release notes from xdripswift
340 Rühlemann's Blog
385 Sarah's Scribbles
196 Saturday Morning Breakfast Cereal
273 scinexx | Das Wissensmagazin
274 ServeTheHome
275 Shady Characters
276 SPARTANAT
277 splitkb.com Blog - Everything split keyboard.
294 Stack Overflow Blog
378 Stadtmagazin DATEs
278 stallmer
197 sticky comics
279 Stories by Dane Bulat on Medium
280 Stories by Erez Zukerman on Medium
355 Stories by Pinterest Engineering on Medium
296 Switch And Click
281 Techniktagebuch
282 The Bartleby
301 The GitHub Blog
370 The Jenkins
388 The Life of Kenneth
198 The Perry Bible Fellowship
393 The War Zone | The Drive
199 they can talk
364 Things in Squares
286 Thomas Baart
295 Toonhole
310 Updates from the Tor Project
202 VectorBelly
293 War Is Boring
358 Weberblog.net
383 We're Out of Cornflakes
290 Wikipedia - Beobachtungsliste [de]
299 Windows Command Line
203 Wumo
204 xkcd
291 Zak Reviews
292 ZeroAir Reviews

View File

@ -1,43 +0,0 @@
version: '3'
services:
munin:
container_name: "munin-master"
image: registry.mgrote.net/munin-server:latest
restart: always
environment:
MAILCONTACT: michael.grote@posteo.de
MAILSERVER: mail-relay
MAILPORT: 25
MAILFROM: munin@mgrote.net
MAILUSER: munin@mgrote.net
MAILNAME: Munin
MAILDOMAIN: mgrote.net
TZ: Europe/Berlin
CRONDELAY: 5
NODES: |
fileserver3.mgrote.net:fileserver3.mgrote.net
ansible2.mgrote.net:ansible2.mgrote.net
pve5.mgrote.net:pve5.mgrote.net
forgejo.mgrote.net:forgejo.mgrote.net
docker10.mgrote.net:docker10.mgrote.net
pbs.mgrote.net:pbs.mgrote.net
blocky.mgrote.net:blocky.mgrote.net
ldap.mgrote.net:ldap.mgrote.net
# z.B.
# computer-test.mgrote.net.test:192.68.2.4
# computer.mgrote.net:computer.mgrote.net
volumes:
- db:/var/lib/munin
- logs:/var/log/munin
- cache:/var/cache/munin
ports:
- 1234:80
volumes:
db:
logs:
cache:
networks:
mail-relay:
external: true

View File

@ -3,7 +3,7 @@ services:
######## navidrome-mg ########
navidrome-mg:
container_name: "navidrome-mg"
image: "deluan/navidrome:0.52.5"
image: deluan/navidrome:latest
restart: always
environment:
ND_LOGLEVEL: info
@ -35,6 +35,8 @@ services:
traefik.http.routers.navidrome-mg.tls.certresolver: resolver_letsencrypt
traefik.http.routers.navidrome-mg.entrypoints: entry_https
traefik.http.services.navidrome-mg.loadbalancer.server.port: 4533
com.centurylinklabs.watchtower.enable: true
ports:
- "4533:4533"

View File

@ -2,7 +2,7 @@ version: '3.3'
services:
######## Datenbank ########
nextcloud-db:
image: "mariadb:11.3.2"
image: mariadb:10
container_name: nextcloud-db
command: --transaction-isolation=READ-COMMITTED --log-bin=ROW --innodb_read_only_compressed=OFF
restart: unless-stopped
@ -11,75 +11,59 @@ services:
- /etc/timezone:/etc/timezone:ro
- db:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_root_password', 'password') }}"
MYSQL_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}"
MYSQL_ROOT_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_root_password', 'password') }}
MYSQL_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}
MYSQL_DATABASE: nextcloud
MYSQL_USER: nextcloud
MYSQL_INITDB_SKIP_TZINFO: 1
networks:
- intern
healthcheck:
interval: 30s
retries: 3
test:
[
"CMD",
"healthcheck.sh",
"--su-mysql",
"--connect"
]
timeout: 30s
# Error
## [ERROR] Incorrect definition of table mysql.column_stats: expected column 'histogram' at position 10 to have type longblob, found type varbinary(255).
## [ERROR] Incorrect definition of table mysql.column_stats: expected column 'hist_type' at position 9 to have type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB','JSON_HB'), found type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB').
# Fix
## docker exec nextcloud-db mysql nextcloud -p<MySQL-Root-Pw> -e "ALTER TABLE mysql.column_stats MODIFY histogram longblob;"
## docker exec nextcloud-db mysql nextcloud -p<MySQL-Root-Pw> -e "ALTER TABLE mysql.column_stats MODIFY hist_type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB','JSON_HB');"
labels:
com.centurylinklabs.watchtower.enable: true
######## Redis ########
nextcloud-redis:
image: "redis:7.2.4"
image: redis:7-alpine
container_name: nextcloud-redis
hostname: nextcloud-redis
networks:
- intern
restart: unless-stopped
command: "redis-server --requirepass {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}"
healthcheck:
test: ["CMD", "redis-cli", "--pass", "{{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}", "--no-auth-warning", "ping"]
interval: 5s
timeout: 2s
retries: 3
command: redis-server --requirepass {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}
labels:
com.centurylinklabs.watchtower.enable: true
######## cron ########
nextcloud-cron:
container_name: nextcloud-cron
image: "registry.mgrote.net/nextcloud-cronjob:latest"
image: registry.mgrote.net/nextcloud-cronjob:master
restart: unless-stopped
network_mode: none
depends_on:
- nextcloud-app
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /etc/localtime:/etc/localtime:ro
environment:
NEXTCLOUD_CONTAINER_NAME: nextcloud-app
NEXTCLOUD_CRON_MINUTE_INTERVAL: 1
labels:
com.centurylinklabs.watchtower.enable: true
######## Nextcloud ########
nextcloud-app:
image: "nextcloud:29.0.0"
image: nextcloud:27
container_name: nextcloud-app
restart: unless-stopped
depends_on:
- nextcloud-db
- nextcloud-redis
- nextcloud-cron
environment:
REDIS_HOST: nextcloud-redis
REDIS_HOST_PASSWORD: "{{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}"
REDIS_HOST_PASSWORD: {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}
MYSQL_DATABASE: nextcloud
MYSQL_USER: nextcloud
MYSQL_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}"
MYSQL_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}
MYSQL_HOST: nextcloud-db
NEXTCLOUD_TRUSTED_DOMAINS: "nextcloud.mgrote.net"
SMTP_HOST: mail-relay
@ -87,15 +71,12 @@ services:
SMTP_PORT: 25
#SMTP_AUTHTYPE: LOGIN
SMTP_NAME: info@mgrote.net
#SMTP_PASSWORD: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
#SMTP_PASSWORD: {{ lookup('keepass', 'strato_smtp_password', 'password') }}
MAIL_FROM_ADDRESS: info@mgrote.net
PHP_MEMORY_LIMIT: 1024M
PHP_UPLOAD_LIMIT: 10G
APACHE_DISABLE_REWRITE_IP: 1
TRUSTED_PROXIES: "192.168.48.0/24" # Subnetz in dem sich traefik befindet
NEXTCLOUD_UPLOAD_LIMIT: 10G
NEXTCLOUD_MAX_TIME: 3600
APACHE_BODY_LIMIT: 0 # unlimited, https://github.com/nextcloud/docker/issues/1796
volumes:
- app:/var/www/html
- data:/var/www/html/data
@ -103,12 +84,10 @@ services:
- intern
- traefik
- mail-relay
healthcheck:
test: ["CMD", "curl", "-f", "--insecure", "http://localhost:80"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: nextcloud-redis,nextcloud-db
traefik.http.routers.nextcloud.rule: Host(`nextcloud.mgrote.net`)
traefik.enable: true
traefik.http.routers.nextcloud.tls: true
@ -143,6 +122,3 @@ volumes:
######## Doku ########
# Telefonregion
# docker exec --user www-data nextcloud-app php occ config:system:set default_phone_region --value="DE"
# https://help.nextcloud.com/t/nextcloud-wont-load-any-mixed-content/13565/3
# docker exec --user www-data nextcloud-app php occ config:system:set overwriteprotocol --value="https"
# docker exec --user www-data nextcloud-app php occ config:system:set overwrite.cli.url --value="http://nextcloud.mgrote.net"

View File

@ -0,0 +1,88 @@
version: '3.5'
# ------------------------------------------------------------------
# DOCKER COMPOSE COMMAND REFERENCE
# ------------------------------------------------------------------
# Start | docker-compose up -d
# Stop | docker-compose stop
# Update | docker-compose pull
# Logs | docker-compose logs --tail=25 -f
# Terminal | docker-compose exec photoprism bash
# Help | docker-compose exec photoprism photoprism help
# Config | docker-compose exec photoprism photoprism config
# Reset | docker-compose exec photoprism photoprism reset
# Backup | docker-compose exec photoprism photoprism backup -a -i
# Restore | docker-compose exec photoprism photoprism restore -a -i
# Index | docker-compose exec photoprism photoprism index
# Reindex | docker-compose exec photoprism photoprism index -a
# Import | docker-compose exec photoprism photoprism import
# -------------------------------------------------------------------
services:
photoprism:
# Use photoprism/photoprism:preview instead for testing preview builds:
image: photoprism/photoprism:latest
container_name: photoprism-frontend
restart: always
security_opt:
- seccomp:unconfined
- apparmor:unconfined
ports:
- 2342:2342
environment:
PHOTOPRISM_ADMIN_PASSWORD: "{{ lookup('keepass', 'photoprism_admin_password', 'password') }}"
PHOTOPRISM_HTTP_PORT: 2342
PHOTOPRISM_HTTP_COMPRESSION: "gzip" # none or gzip
PHOTOPRISM_DEBUG: "false"
PHOTOPRISM_PUBLIC: "false" # No authentication required (disables password protection)
PHOTOPRISM_READONLY: "true" # Don't modify originals directory (reduced functionality)
PHOTOPRISM_EXPERIMENTAL: "false"
PHOTOPRISM_DISABLE_WEBDAV: "true"
PHOTOPRISM_DISABLE_SETTINGS: "false"
PHOTOPRISM_DISABLE_TENSORFLOW: "false"
PHOTOPRISM_DARKTABLE_PRESETS: "false"
PHOTOPRISM_DETECT_NSFW: "true"
PHOTOPRISM_UPLOAD_NSFW: "true"
PHOTOPRISM_DATABASE_DRIVER: "mysql"
PHOTOPRISM_DATABASE_SERVER: "mariadb:3306"
PHOTOPRISM_DATABASE_NAME: "photoprism"
PHOTOPRISM_DATABASE_USER: "photoprism"
PHOTOPRISM_DATABASE_PASSWORD: "{{ lookup('keepass', 'photoprism_database_password', 'password') }}"
PHOTOPRISM_SITE_URL: "http://docker10.grote.lan:2342/"
PHOTOPRISM_SITE_TITLE: "PhotoPrism"
PHOTOPRISM_SITE_CAPTION: "Browse Your Life"
PHOTOPRISM_SITE_DESCRIPTION: ""
PHOTOPRISM_SITE_AUTHOR: "mgrote"
# You may optionally set a user / group id using environment variables if your Docker version or NAS does not
# support this natively (see next example):
UID: 5000
GID: 5000
# UMASK: 0000
# Uncomment and edit the following line to set a specific user / group id (native):
user: "5000:5000"
volumes:
- /mnt/fileserver3_photoprism_bilder_ro:/photoprism/originals/:ro
- "storage:/photoprism/storage"
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: photoprism-db
mariadb:
image: mariadb:10
container_name: photoprism-db
restart: always
security_opt:
- seccomp:unconfined
- apparmor:unconfined
command: mysqld --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=50
volumes: # Don't remove permanent storage for index database files!
- "database:/var/lib/mysql"
environment:
MYSQL_ROOT_PASSWORD: {{ lookup('keepass', 'photoprism_mysql_root_password', 'password') }}
MYSQL_DATABASE: photoprism
MYSQL_USER: photoprism
MYSQL_PASSWORD: {{ lookup('keepass', 'photoprism_database_password', 'password') }}
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
storage:
database:

View File

@ -3,7 +3,7 @@ services:
oci-registry:
restart: always
container_name: oci-registry
image: "registry:2.8.3"
image: registry:2
volumes:
- oci:/var/lib/registry
- ./htpasswd:/auth/htpasswd
@ -11,25 +11,15 @@ services:
- traefik
- intern
depends_on:
- oci-registry-ui
- oci-registry-redis
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:5000/v2/"]
interval: 30s
timeout: 10s
retries: 3
environment:
TZ: Europe/Berlin
REGISTRY_AUTH: none
REGISTRY_REDIS_ADDR: oci-registry-redis:6379
REGISTRY_REDIS_PASSWORD: "{{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}"
REGISTRY_REDIS_PASSWORD: {{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}
REGISTRY_STORAGE_DELETE_ENABLED: true
REGISTRY_CATALOG_MAXENTRIES: 100000 # https://github.com/Joxit/docker-registry-ui/issues/306
# https://joxit.dev/docker-registry-ui/#using-cors
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin: '[https://registry.mgrote.net/ui/]'
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Methods: '[HEAD,GET,OPTIONS,DELETE]'
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Credentials: '[true]'
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Headers: '[Authorization,Accept,Cache-Control]'
REGISTRY_HTTP_HEADERS_Access-Control-Expose-Headers: '[Docker-Content-Digest]'
labels:
traefik.http.routers.registry.rule: Host(`registry.mgrote.net`)
traefik.enable: true
@ -38,10 +28,13 @@ services:
traefik.http.routers.registry.entrypoints: entry_https
traefik.http.services.registry.loadbalancer.server.port: 5000
traefik.http.routers.registry.middlewares: registry-ipallowlist
traefik.http.routers.registry.middlewares: registry-ipwhitelist
traefik.http.middlewares.registry-ipallowlist.ipallowlist.sourcerange: 192.168.2.0/24,10.25.25.0/24,192.168.48.0/24,172.18.0.0/16 # .48. ist Docker
traefik.http.middlewares.registry-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.registry-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24,10.25.25.0/24,192.168.48.0/24,172.18.0.0/16 # .48. ist Docker
traefik.http.middlewares.registry-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
com.centurylinklabs.watchtower.depends-on: oci-registry-redis
com.centurylinklabs.watchtower.enable: true
# registry aufräumen: docker exec -it oci-registry /bin/registry garbage-collect --delete-untagged=true /etc/docker/registry/config.yml
@ -52,24 +45,21 @@ services:
# docker pull registry.mgrote.net/myfirstimage
oci-registry-redis:
image: "redis:7.2.4"
image: redis:7
container_name: oci-registry-redis
networks:
- intern
restart: always
environment:
REDIS_PASSWORD: "{{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}"
REDIS_PASSWORD: {{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}
MAXMEMORY POLICY: allkeys-lru
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
oci-registry-ui:
restart: always
# url: registry.mgrote.net/ui/index.html
image: "joxit/docker-registry-ui:2.5.7"
image: joxit/docker-registry-ui:latest
container_name: oci-registry-ui
environment:
DELETE_IMAGES: true
@ -77,20 +67,12 @@ services:
NGINX_PROXY_PASS_URL: http://oci-registry:5000
SHOW_CONTENT_DIGEST: true # https://github.com/Joxit/docker-registry-ui/issues/297
SHOW_CATALOG_NB_TAGS: true
PULL_URL: registry.mgrote.net
depends_on:
- oci-registry
networks:
- traefik
- intern
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://127.0.0.1"]
interval: 30s
timeout: 10s
retries: 3
labels:
traefik.http.routers.registry-ui.rule: Host(`registry.mgrote.net`)&&PathPrefix(`/ui`) # mache unter /ui erreichbar, damit wird demPfad dieser Prefix hinzugefügt, die Anwendung "hört" dort abrer nicht
traefik.http.routers.registry-ui.middlewares: registry-ui-strip-prefix,registry-ui-ipallowlist # also entferne den Prefix danach wieder
traefik.http.routers.registry-ui.middlewares: registry-ui-strip-prefix,registry-ui-ipwhitelist # also entferne den Prefix danach wieder
traefik.http.middlewares.registry-ui-strip-prefix.stripprefix.prefixes: /ui # hier ist die Middleware definiert
traefik.enable: true
traefik.http.routers.registry-ui.tls: true
@ -98,8 +80,13 @@ services:
traefik.http.routers.registry-ui.entrypoints: entry_https
traefik.http.services.registry-ui.loadbalancer.server.port: 80
traefik.http.middlewares.registry-ui-ipallowlist.ipallowlist.sourcerange: 192.168.2.0/24,10.25.25.0/24 # .48. ist Docker
traefik.http.middlewares.registry-ui-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.registry-ui-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24,10.25.25.0/24 # .48. ist Docker
traefik.http.middlewares.registry-ui-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
com.centurylinklabs.watchtower.depends-on: oci-registry-redis,oci-registry
com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:

View File

@ -3,7 +3,7 @@ services:
routeros-config-export:
container_name: routeros-config-export
restart: always
image: "registry.mgrote.net/routeros-config-export:latest"
image: registry.mgrote.net/oxidized-selfmade:check # wieder ändern!
volumes:
- ./key_rb5009:/key_rb5009:ro
- ./key_hex:/key_hex:ro
@ -11,11 +11,11 @@ services:
- ./deploy_token:/deploy_token:ro
environment:
DEVICES: |-
rb5009.mgrote.net,routeros-config-backup,/key_rb5009
hex.mgrote.net,routeros-config-backup,/key_hex
crs305.mgrote.net,routeros-config-backup,/key_crs305
rb5009.grote.lan,routeros-config-backup,/key_rb5009
hex.grote.lan,routeros-config-backup,/key_hex
crs305.grote.lan,routeros-config-backup,/key_crs305
GIT_REPO_BRANCH: "master"
GIT_REPO_URL: "ssh://gitea@forgejo.mgrote.net:2222/mg/routeros-configs.git"
GIT_REPO_URL: "ssh://gitea@gitea.grote.lan:2222/mg/routeros-configs.git"
GIT_REPO_DEPLOY_KEY: "/deploy_token"
GIT_USERNAME: oxidized-selfmade
GIT_USER_MAIL: michael.grote@posteo.de

View File

@ -0,0 +1,27 @@
version: '2.3'
services:
statping:
container_name: statping
image: adamboutcher/statping-ng:latest
restart: always
volumes:
- statping_data:/app
environment:
DB_CONN: sqlite
ALLOW_REPORT: false
ADMIN_USER: statadmin
ADMIN_PASSWORD: {{ lookup('keepass', 'statping_admin_password', 'password') }}
SAMPLE_DATA: false
ports:
- 8083:8080
networks:
- mail-relay
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
statping_data:
networks:
mail-relay:
external: true

View File

@ -3,7 +3,7 @@ services:
######## traefik ########
traefik:
container_name: traefik
image: "traefik:v3.0.0"
image: traefik:latest
restart: always
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
@ -19,19 +19,16 @@ services:
- "2222:2222" # SSH
environment:
TZ: Europe/Berlin
healthcheck:
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
######## nforwardauth ########
nforwardauth:
restart: always
image: "nosduco/nforwardauth:v1.4.0"
image: nosduco/nforwardauth:v1
container_name: traefik-nforwardauth
environment:
TOKEN_SECRET: "{{ lookup('keepass', 'nforwardauth_token_secret', 'password') }}"
TOKEN_SECRET: {{ lookup('keepass', 'nforwardauth_token_secret', 'password') }}
AUTH_HOST: auth.mgrote.net
labels:
traefik.enable: true
@ -43,15 +40,13 @@ services:
traefik.http.routers.nforwardauth.tls: true
traefik.http.routers.nforwardauth.tls.certresolver: resolver_letsencrypt
traefik.http.routers.nforwardauth.entrypoints: entry_https
com.centurylinklabs.watchtower.depends-on: traefik
com.centurylinklabs.watchtower.enable: true
volumes:
- "./passwd:/passwd:ro" # Mount local passwd file at /passwd as read only
networks:
- traefik
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:3000/login"]
interval: 30s
timeout: 10s
retries: 3
######## Networks ########
networks:

View File

@ -14,4 +14,4 @@ http:
service_gitea:
loadBalancer:
servers:
- url: "http://forgejo.mgrote.net:3000/"
- url: "http://gitea.grote.lan:3000/"

View File

@ -37,8 +37,6 @@ api:
insecure: true
dashboard: true # unter Port 8081 erreichbar
ping: {} # für healthcheck
#experimental:
# plugins:
# ldapAuth:

View File

@ -2,14 +2,14 @@
version: "2.1"
services:
unifi-network-application:
image: "lscr.io/linuxserver/unifi-network-application:8.0.28-ls27"
image: lscr.io/linuxserver/unifi-network-application:latest
container_name: unifi-network-application
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
MONGO_USER: unifiuser
MONGO_PASS: "{{ lookup('keepass', 'unifi-mongodb-pass', 'password') }}"
MONGO_PASS: {{ lookup('keepass', 'unifi-mongodb-pass', 'password') }}
MONGO_HOST: unifi-db
MONGO_PORT: 27017
MONGO_DBNAME: unifidb
@ -28,37 +28,28 @@ services:
- 6789:6789 #optional
- 5514:5514/udp #optional
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: unifi-db
networks:
- mail-relay
- unifi-internal
healthcheck:
test: ["CMD", "curl", "-f", "--insecure", "https://localhost:8443"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
- unifi-db
unifi-db:
# Starte Container OHNE init-script
# In Container
# 1. mongosh
# IN Container
# 1. mongo
# 2. db.getSiblingDB("unifidb").createUser({user: "unifiuser", pwd: "GEHEIM", roles: [{role: "dbOwner", db: "unifidb"}, {role: "dbOwner", db: "unifidb_stat"}]});
# https://discourse.linuxserver.io/t/cant-connect-to-mongodb-for-unifi-network-application/8166
image: "docker.io/mongo:7.0.9"
image: docker.io/mongo:4
container_name: unifi-db
volumes:
- db-data:/data/db
restart: always
environment:
MARIADB_AUTO_UPGRADE: "1"
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- unifi-internal
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.stats().ok"]
interval: 30s
timeout: 10s
retries: 3
######## Volumes ########
volumes:

View File

@ -0,0 +1,42 @@
version: "3"
services:
watchtower:
restart: always
container_name: watchtower
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
TZ: Europe/Berlin
WATCHTOWER_CLEANUP: true
WATCHTOWER_INCLUDE_RESTARTING: true
WATCHTOWER_INCLUDE_STOPPED: true
WATCHTOWER_REVIVE_STOPPED: false
WATCHTOWER_SCHEDULE: "0 20 3 * * *" # jeden Tag um 03:20
WATCHTOWER_LABEL_ENABLE: true
WATCHTOWER_NOTIFICATIONS: email
WATCHTOWER_NOTIFICATION_EMAIL_FROM: info@mgrote.net
WATCHTOWER_NOTIFICATION_EMAIL_TO: info@mgrote.net
WATCHTOWER_NOTIFICATION_EMAIL_SERVER: mail-relay # "container_name" des Relays
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PORT: 25 # nicht benötigt, nur als Referenz stehen gelassen
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER: "" # nicht benötigt, nur als Referenz stehen gelassen
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD: "" # nicht benötigt, nur als Referenz stehen gelassen
WATCHTOWER_NOTIFICATION_EMAIL_DELAY: 2
WATCHTOWER_NO_STARTUP_MESSAGE: true
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- mail-relay # binde externe Netzwerk an Container
# monitore diesen Container nur
# labels:
# com.centurylinklabs.watchtower.monitor-only: true
# dieser container hängt von x ab
# com.centurylinklabs.watchtower.depends-on: mf-db
# aktualisiere container
# com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:
mail-relay: # damit das mail-relaay im anderen Container erreicht werden kann
external: true

View File

@ -2,7 +2,7 @@ version: '3'
services:
wiki-webserver:
container_name: wiki-webserver
image: "registry.mgrote.net/httpd:latest"
image: httpd:2.4
restart: always
networks:
- traefik
@ -13,11 +13,6 @@ services:
# /docker/wiki/site ist ein lokales Verzeichnis auf docker10
# dieser Verzeichnis wird direkt in der wiki ci gemountet
# und die daten werden dort reingeschrieben
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
labels:
traefik.http.routers.wiki.rule: Host(`wiki.mgrote.net`)
traefik.enable: true
@ -28,6 +23,8 @@ services:
traefik.http.routers.wiki.middlewares: nforwardauth
com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:
traefik:

View File

@ -5,7 +5,7 @@ services:
woodpecker-server:
restart: always
container_name: woodpecker-server
image: "woodpeckerci/woodpecker-server:v2.4.1"
image: woodpeckerci/woodpecker-server:v2.0
ports:
- 8000:8000
volumes:
@ -13,12 +13,12 @@ services:
environment:
WOODPECKER_OPEN: false
WOODPECKER_HOST: https://ci.mgrote.net
WOODPECKER_WEBHOOK_HOST: http://docker10.mgrote.net:8000
WOODPECKER_WEBHOOK_HOST: http://docker10.grote.lan:8000
WOODPECKER_GITEA: true
WOODPECKER_GITEA_URL: https://git.mgrote.net
WOODPECKER_GITEA_CLIENT: "{{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}"
WOODPECKER_GITEA_SECRET: "{{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}"
WOODPECKER_AGENT_SECRET: "{{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}"
WOODPECKER_GITEA_CLIENT: {{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}
WOODPECKER_GITEA_SECRET: {{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}
WOODPECKER_AGENT_SECRET: {{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}
WOODPECKER_ADMIN: mg
WOODPECKER_LOG_LEVEL: info
WOODPECKER_DEBUG_PRETTY: true
@ -26,6 +26,8 @@ services:
- intern
- traefik
labels:
com.centurylinklabs.watchtower.enable: true
traefik.http.routers.woodpecker.rule: Host(`ci.mgrote.net`)
traefik.enable: true
traefik.http.routers.woodpecker.tls: true
@ -33,15 +35,15 @@ services:
traefik.http.routers.woodpecker.entrypoints: entry_https
traefik.http.services.woodpecker.loadbalancer.server.port: 8000
traefik.http.routers.woodpecker.middlewares: woodpecker-ipallowlist
traefik.http.routers.woodpecker.middlewares: woodpecker-ipwhitelist
traefik.http.middlewares.woodpecker-ipallowlist.ipallowlist.sourcerange: "192.168.2.0/24,10.25.25.0/24"
traefik.http.middlewares.woodpecker-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.woodpecker-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24
traefik.http.middlewares.woodpecker-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
woodpecker-agent:
container_name: woodpecker-agent
image: "woodpeckerci/woodpecker-agent:v2.4.1"
image: woodpeckerci/woodpecker-agent:v2.0
command: agent
restart: always
depends_on:
@ -53,12 +55,14 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
environment:
WOODPECKER_SERVER: woodpecker-server:9000
WOODPECKER_AGENT_SECRET: "{{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}"
WOODPECKER_MAX_WORKFLOWS: 20
WOODPECKER_AGENT_SECRET: {{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}
WOODPECKER_MAX_WORKFLOWS: 4
WOODPECKER_DEBUG_PRETTY: true
WOODPECKER_LOG_LEVEL: info
WOODPECKER_HEALTHCHECK: true
WOODPECKER_BACKEND: docker
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- intern
@ -68,8 +72,8 @@ volumes:
agent-config:
# git.mgrote.net -> Settings -> Applications -> woodpecker
# WOODPECKER_GITEA_CLIENT: "{{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}"
# WOODPECKER_GITEA_SECRET: "{{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}"
# WOODPECKER_GITEA_CLIENT: {{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}
# WOODPECKER_GITEA_SECRET: {{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}
# Redirect URL: https://ci.mgrote.net/authorize
######## Networks ########

View File

@ -42,7 +42,7 @@ services:
- com.centurylinklabs.watchtower.depends-on=lldap-db
######## DB ########
lldap-db:
image: mariadb:10.6.14
image: mariadb:10
container_name: lldap-db
restart: always
volumes:

View File

@ -2,7 +2,7 @@ version: '3'
services:
wiki-webserver:
container_name: wiki-webserver
image: httpd:2.4@sha256:ba846154ade27292d216cce2d21f1c7e589f3b66a4a643bff0cdd348efd17aa3
image: httpd:2.4
restart: always
networks:
- traefik

View File

@ -22,7 +22,7 @@ munin_plugin_dest_path: /etc/munin/plugins/
munin_plugin_conf_dest_path: /etc/munin/plugin-conf.d/
# munin_node_plugins: #plugins to install
# - name: docker_volumes # name
# src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_ #src
# src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/docker/docker_ #src
# config_file_name: /etc/munin/plugin-conf.d/docker # where to put plugin config
# content of config
# config: |

View File

@ -10,6 +10,6 @@
changed_when: "output_conf.rc != 0"
ignore_errors: true # ignoriere fehler
- name: munin-node-configure --shell - 2 # noqa ignore-errors no-changed-when
- name: munin-node-configure --shell - 2 # noqa ignore-errors
ansible.builtin.command: munin-node-configure --shell --families=contrib,auto | sh -x
ignore_errors: true # ignoriere fehler

View File

@ -0,0 +1,12 @@
## mgrote.munin-node
### Beschreibung
Installiert munin-node + Plugins.
### getestet auf
- [x] Ubuntu (>=18.04)
- [ ] Debian
- [x] ProxMox 6.1
### Variablen + Defaults
see [defaults](./defaults/main.yml)

View File

@ -13,7 +13,7 @@
state: directory
owner: root
group: root
mode: "0755"
mode: "0644"
loop:
- /etc/munin
- /etc/munin/plugin-conf.d
@ -25,5 +25,5 @@
dest: /etc/munin/munin-node.conf
owner: root
group: root
mode: "0755"
mode: "0644"
notify: restart munin-node

View File

@ -1,7 +1,7 @@
---
- name: remove unwanted plugins
ansible.builtin.file:
path: "{{ munin_plugin_dest_path }}{{ item }}"
path: "{{ munin_plugin_dest_path }}{{ item.name }}"
state: absent
loop: "{{ munin_node_disabled_plugins }}"
notify: restart munin-node
@ -10,7 +10,7 @@
- name: remove additional plugin-config
ansible.builtin.file:
state: absent
dest: "{{ munin_plugin_conf_dest_path }}{{ item }}"
dest: "{{ munin_plugin_conf_dest_path }}{{ item.name }}"
notify: restart munin-node
loop: "{{ munin_node_disabled_plugins }}"
when: munin_node_disabled_plugins is defined

View File

@ -1,6 +1,5 @@
---
### wird in vielen Rollen verwendet
ansible_facts_parallel: true
ssh_public_key_mg: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKL8opSQ0rWVw9uCfbuiqmXq188OP4xh66MBTO3zV5jo heimserver_mg_v3
my_mail: michael.grote@posteo.de
file_header: |
@ -8,19 +7,33 @@ file_header: |
# This file is managed with ansible! #
#----------------------------------------------------------------#
### mgrote_user_setup
dotfiles:
- user: mg
home: /home/mg
- user: root
home: /root
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles
dotfiles_vim_vundle_repo_url: https://git.mgrote.net/mirrors/Vundle.vim.git
### mgrote_netplan
netplan_configure: true
### mgrote_user
### mgrote.restic
restic_user: root
restic_group: restic
restic_conf_dir: /etc/restic
restic_exclude: |
._*
desktop.ini
.Trash-*
**/**cache***/**
**/**Cache***/**
**/**AppData***/**
# https://github.com/restic/restic/issues/1005
# https://forum.restic.net/t/exclude-syntax-confusion/1531/12
restic_mount_timeout: "10 min"
restic_failure_delay: "30 s"
restic_schedule: "0/6:00" # alle 6 Stunden
restic_folders_to_backup: "/" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
restic_repository: "//fileserver3.grote.lan/restic"
restic_repository_password: "{{ lookup('keepass', 'restic_repository_password', 'password') }}"
restic_mount_user: restic
restic_mount_password: "{{ lookup('keepass', 'fileserver_smb_user_restic', 'password') }}"
restic_fail_mail: "{{ my_mail }}"
### mgrote.user
users:
- username: mg
password: "{{ lookup('keepass', 'mg_linux_password_hash', 'password') }}"
@ -39,6 +52,18 @@ users:
allow_sudo: true
allow_passwordless_sudo: true
### mgrote.dotfiles
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles
dotfiles_repo_path: /home/mg/dotfiles
dotfiles_files:
- repo_path: "{{ dotfiles_repo_path }}/.vimrc"
local_path: "/home/mg/.vimrc"
- repo_path: "{{ dotfiles_repo_path }}/.tmux.conf"
local_path: "/home/mg/.tmux.conf"
- repo_path: "{{ dotfiles_repo_path }}/.gitconfig"
local_path: "/home/mg/.gitconfig"
dotfiles_owner: mg
### jnv.unattended_upgrades
unattended_mail: "{{ my_mail }}"
unattended_mail_only_on_error: true
@ -47,7 +72,7 @@ unattended_origins_patterns:
- 'origin=Ubuntu,archive=${distro_codename}-security'
- 'o=Ubuntu,a=${distro_codename}-updates'
### mgrote_ntp_chrony_client
### mgrote.ntp_chrony_client
ntp_chrony_timezone: "Europe/Berlin" # Zeitzone in der sich der Computer befindet
ntp_chrony_driftfile_directory: "/var/lib/chrony" # Ordner für das driftfile
ntp_chrony_servers: # welche Server sollen befragt werden
@ -57,11 +82,16 @@ ntp_chrony_user: _chrony # Nutzer + Gruppe für den Dienst
ntp_chrony_group: _chrony # Nutzer + Gruppe für den Dienst
ntp_chrony_logging: false
### mgrote_postfix
postfix_smtp_server: docker10.mgrote.net
### mgrote.postfix
postfix_smtp_server: docker10.grote.lan
postfix_smtp_server_port: 1025
### mgrote_fail2ban
### mgrote.tmux
tmux_conf_destination: "/home/mg/.tmux.conf"
tmux_bashrc_destination: "/home/mg/.bashrc"
tmux_standardsession_name: "default"
### mgrote.fail2ban
f2b_bantime: 300
f2b_findtime: 300
f2b_maxretry: 5
@ -75,15 +105,10 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
ufw_default_incoming_policy: deny
ufw_default_outgoing_policy: allow
### mgrote_apt_manage_packages
### mgrote.apt_manage_packages
apt_packages_common:
- locales
- python3
@ -114,8 +139,6 @@ apt_packages_common:
- pwgen
- keychain
- fwupd
- bc
- jq
apt_packages_physical:
- s-tui
- smartmontools
@ -128,8 +151,10 @@ apt_packages_absent:
- nano
- snapd
- ubuntu-advantage-tools
apt_packages_internet:
- http://docker10.grote.lan:3344/bash-helper-scripts-mgrote-latest.deb
### mgrote_zfs_sanoid
### mgrote.zfs_sanoid
sanoid_templates:
- name: '31tage'
keep_hourly: '24' # Aufheben (Stunde)
@ -177,45 +202,8 @@ sanoid_templates:
autosnap: 'yes'
autoprune: 'yes'
### mgrote_zfs_sanoid
sanoid_deb_url: http://docker10.mgrote.net:3344/sanoid_v2.2.0.deb
### mgrote_munin_node
munin_node_bind_host: "0.0.0.0"
munin_node_bind_port: "4949"
munin_node_allowed_cidrs: [192.168.2.0/24]
munin_node_disabled_plugins:
- name: meminfo # zu hohe last
- name: hddtemp2 # ersetzt durch hddtemp_smartctl
- name: ntp # verursacht zu viele dns ptr request
- name: hddtempd # ersetzt durch hddtemp_smartctl
- name: squid_cache # proxmox
- name: squid_objectsize # proxmox
- name: squid_requests # proxmox
- name: squid_traffic # proxmox
- name: timesync
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
### mgrote.zfs_sanoid
sanoid_deb_url: http://docker10.grote.lan:3344/sanoid_3.0.4.deb
# Ansible Variablen
### User

View File

@ -9,6 +9,6 @@ pip_install_packages:
- name: ansible
- name: docker-compose
### mgrote_apt_manage_packages
### mgrote.apt_manage_packages
apt_packages_extra:
- sshpass

View File

@ -1,125 +0,0 @@
---
### mgrote_systemd_resolved
systemd_resolved_nameserver: 9.9.9.9
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 53
comment: 'dns'
from_ip: 0.0.0.0/0
### mgrote.apt_manage_packages
apt_packages_extra:
- libnet-dns-perl # für munin: dnsresponse_
### mgrote_user_setup
dotfiles_vim_vundle_repo_url: http://192.168.2.42:3000/mirrors/Vundle.vim.git
dotfiles:
- user: mg
home: /home/mg
- user: root
home: /root
dotfiles_repo_url: http://192.168.2.42:3000/mg/dotfiles
### mgrote_blocky
blocky_version: v0.23
blocky_block_type: zeroIp
blocky_local_upstream: 192.168.2.1
blocky_conditional_mapping: # optional
- domain: mgrote.net
resolver: 192.168.2.1
blocky_dns_upstream:
- 9.9.9.9
- 1.1.1.1
- 8.8.8.8
- 5.9.164.112
blocky_dns_blocklists:
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
blocky_custom_lookups: # optional
# Internet
- name: wiki.mgrote.net
ip: 192.168.2.43
- name: audio.mgrote.net
ip: 192.168.2.43
- name: auth.mgrote.net
ip: 192.168.2.43
- name: ci.mgrote.net
ip: 192.168.2.43
- name: miniflux.mgrote.net
ip: 192.168.2.43
- name: nextcloud.mgrote.net
ip: 192.168.2.43
- name: registry.mgrote.net
ip: 192.168.2.43
- name: git.mgrote.net
ip: 192.168.2.43
# Intern
- name: ads2700w.mgrote.net
ip: 192.168.2.147
- name: crs305.mgrote.net
ip: 192.168.2.225
- name: hex.mgrote.net
ip: 192.168.3.144
- name: pbs-test.mgrote.net
ip: 192.168.2.18
- name: pbs.mgrote.net
ip: 192.168.3.239
- name: pve5-test.mgrote.net
ip: 192.168.2.17
- name: pve5.mgrote.net # bleibt im Router auch angelegt, weil wenn pve aus auch kein blocky mehr ;-)
ip: 192.168.2.16
- name: rb5009.mgrote.net
ip: 192.168.2.1
- name: fritz.box
ip: 192.168.5.1
- name: ldap.mgrote.net
ip: 192.168.2.47
### mgrote_munin_node
# kann git.mgrote.net nicht auflösen, deshalb hiermit IP
munin_node_plugins:
- name: chrony
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: http://192.168.2.42:3000/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: dnsresponse_192.168.2.1
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
- name: dnsresponse_192.168.2.37
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
- name: dnsresponse_127.0.0.1
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
config: |
[dnsresponse_*]
env.site www.heise.de
env.times 20

View File

@ -15,16 +15,10 @@ lvm_groups:
manage_lvm: true
pvresize_to_max: true
### geerlingguy.pip
pip_package: python3-pip
pip_install_packages:
- name: docker # für munin-plugin docker_
### mgrote.restic
restic_folders_to_backup: "/ /var/lib/docker" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
### mgrote.apt_manage_packages
apt_packages_extra:
- libnet-dns-perl # für munin: dnsresponse_*
### mgrote_user
### mgrote.user
users:
- username: mg
password: "{{ lookup('keepass', 'mg_linux_password_hash', 'password') }}"
@ -56,12 +50,12 @@ docker_users:
- mg
- docker-user
docker_install_compose: true
docker_add_repo: false # erstelle kein Repo-Eintrag unter /etc/apt/sources.list.d/, steht explizit unter "repos_override", wird nur zum installieren benötigt
docker_add_repo: false # erstelle kein Repo-Eintrag unter /etc/apt/sources.list.d/, steht explizit unter "repos_override"
### mgrote_docker-compose-deploy
### mgrote.docker-compose-deploy
docker_compose_base_dir: /home/docker-user
### mgrote_apt_manage_sources
### mgrote.apt_manage_sources
repos_override: # mit docker-repos
- deb [arch=amd64] https://download.docker.com/linux/ubuntu jammy stable
- "deb http://de.archive.ubuntu.com/ubuntu/ {{ ansible_distribution_release }} main restricted"
@ -74,66 +68,3 @@ repos_override: # mit docker-repos
- "deb http://security.ubuntu.com/ubuntu {{ ansible_distribution_release }}-security main restricted"
- "deb http://security.ubuntu.com/ubuntu {{ ansible_distribution_release }}-security universe"
- "deb http://security.ubuntu.com/ubuntu {{ ansible_distribution_release }}-security multiverse"
### mgrote_systemd_resolved
systemd_resolved_nameserver: 192.168.2.37
### mgrote_munin_node
munin_node_allowed_cidrs: [0.0.0.0/0] # weil der munin-server aus einem anderen subnet zugreift
munin_node_plugins:
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: docker_containers
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
config: |
[docker_*]
user root
env.DOCKER_HOST unix://run/docker.sock
- name: docker_cpu
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_memory
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_network
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_volumes
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_volumesize
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_volumesize
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
from_ip: 192.168.0.0/16
comment: 'docker networks'
- rule: allow
from_ip: 172.0.0.0/8
comment: 'docker networks'

View File

@ -9,11 +9,6 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 445
comment: 'smb'
@ -23,37 +18,13 @@ ufw_rules:
comment: 'smb'
from_ip: 0.0.0.0/0
### mgrote_fileserver_smb
### mgrote.apt_manage_packages
apt_packages_internet:
- http://docker10.grote.lan:3344/bash-helper-scripts-mgrote-latest.deb
### mgrote.fileserver_smb
smb_workgroup: WORKGROUP
smb_min_protocol: "SMB2"
smb_client_min_protocol: "SMB2"
smb_client_max_protocol: "SMB3_11"
smb_enable_snapshots_dir: true
smb_enable_snapshots_shadow: true
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: samba
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/samba
config: |
[samba]
user root
group root
env.smbstatus /usr/bin/smbstatus
env.ignoreipcshare 1

View File

@ -1,154 +0,0 @@
---
### mrlesmithjr.ansible-manage-lvm
lvm_groups:
- vgname: vg_data
disks:
- /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
create: true
lvnames:
- lvname: lv_data
size: +100%FREE
create: true
filesystem: xfs
mount: true
mntp: /var/lib/gitea
manage_lvm: true
pvresize_to_max: true
### mgrote_apt_manage_packages
apt_packages_extra:
- fail2ban
### geerlingguy_postgres
postgresql_databases:
- name: "{{ gitea_db_name }}"
postgresql_users:
- name: "{{ gitea_db_user }}"
password: "{{ gitea_db_password }}"
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: "{{ gitea_http_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_ssh_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
### ansible_role_gitea
# https://git.mgrote.net/ansible-roles-mirrors/ansible_role_gitea
gitea_fork: "forgejo"
# gitea update
gitea_version: "1.21.7-0" # alt zum renovate testen
gitea_version_check: true
gitea_backup_on_upgrade: false
# gitea in the linux world
gitea_group: "gitea"
gitea_user: "gitea"
gitea_home: "/var/lib/gitea"
gitea_user_home: "{{ gitea_home }}"
# config liegt in /etc/gitea/gitea.ini
gitea_configuration_path: "/etc/gitea" # anpassen
gitea_app_name: "forgejo"
gitea_fqdn: "git.mgrote.net"
# ssh
gitea_ssh_port: 2222
gitea_start_ssh: true
gitea_shell: "/bin/false"
# Repository
gitea_default_branch: "master"
gitea_default_private: "public"
gitea_repository_root: "{{ gitea_home }}/repos"
# ui
gitea_show_user_email: false
# server
gitea_protocol: "http"
gitea_http_domain: "{{ gitea_fqdn }}"
gitea_http_port: "3000"
gitea_http_listen: "0.0.0.0"
gitea_root_url: "https://git.mgrote.net"
gitea_landing_page: "login"
# database
gitea_db_type: "postgres"
gitea_db_host: "localhost"
gitea_db_name: "gitea"
gitea_db_user: "gitea"
gitea_db_password: "{{ lookup('keepass', 'forgejo_db_password', 'password') }}"
# indexer
gitea_repo_indexer_enabled: true
# security
gitea_disable_webhooks: false
gitea_password_check_pwn: false
gitea_internal_token: "{{ lookup('keepass', 'forgejo_internal_token', 'password') }}"
gitea_secret_key: "{{ lookup('keepass', 'forgejo_secret_key', 'password') }}"
# service
gitea_disable_registration: true
gitea_register_email_confirm: true
gitea_require_signin: false
gitea_default_keep_mail_private: true
gitea_enable_captcha: false
gitea_show_registration_button: false
gitea_enable_notify_mail: true
gitea_default_user_visibility: "public"
gitea_show_milestones_dashboard_page: false
gitea_default_allow_create_organization: true
gitea_default_org_visibility: "public"
gitea_default_user_is_restricted: false
# Mailer
gitea_mailer_enabled: true
gitea_mailer_protocol: "smtp"
gitea_mailer_smtp_addr: "docker10.mgrote.net"
gitea_mailer_smtp_port: 1025
gitea_mailer_from: "gitea@mgrote.net"
gitea_subject_prefix: "git.mgrote.net - "
# log
gitea_log_systemd: true
gitea_log_level: "Info"
# Metrics
gitea_metrics_enabled: false
# Federation
gitea_federation_enabled: false
# Packages
gitea_packages_enabled: false
# actions
gitea_actions_enabled: false
gitea_extra_config: |
; webhook: wird für drone benötigt, sonst wird der Webhook nicht "gesendet"
[webhook]
ALLOWED_HOST_LIST = *.mgrote.net
; für Import/Migration aus anderen Git-Systemen
[migrations]
ALLOWED_DOMAINS = *
; disabled; see: https://github.com/go-gitea/gitea/issues/25992
[repo-archive]
ENABLED = false
# oauth2
gitea_oauth2_jwt_secret: "{{ lookup('keepass', 'forgejo_oauth2_jwt_secret', 'password') }}"
# Fail2Ban configuration
gitea_fail2ban_enabled: true
gitea_fail2ban_jail_maxretry: "3"
gitea_fail2ban_jail_findtime: "300"
gitea_fail2ban_jail_bantime: "600"
gitea_fail2ban_jail_action: "iptables-allports"
### mgrote_gitea_setup
gitea_ldap_host: "ldap.mgrote.net"
gitea_ldap_base_path: "dc=mgrote,dc=net"
gitea_ldap_bind_user: "forgejo_bind_user"
gitea_ldap_bind_pass: "{{ lookup('keepass', 'lldap_forgejo_bind_user', 'password') }}"
gitea_admin_user: "fadmin"
gitea_admin_user_pass: "{{ lookup('keepass', 'forgejo_admin_user_pass', 'password') }}"

101
group_vars/gitea.yml Normal file
View File

@ -0,0 +1,101 @@
---
### mrlesmithjr.ansible-manage-lvm
lvm_groups:
- vgname: vg_gitea_data
disks:
- /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
create: true
lvnames:
- lvname: lv_gitea_data
size: +100%FREE
create: true
filesystem: xfs
mount: true
mntp: /var/lib/gitea
manage_lvm: true
pvresize_to_max: true
### mgrote.restic
restic_folders_to_backup: "/ /var/lib/gitea" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_http_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_ssh_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
### l3d.gitea
# config liegt in /etc/gitea/gitea.ini
gitea_version: "1.21.0"
gitea_app_name: "Gitea"
gitea_user: "gitea"
gitea_home: "/var/lib/gitea"
gitea_repository_root: "{{ gitea_home }}"
gitea_user_repo_limit: 300
gitea_root_url: https://git.mgrote.net
gitea_offline_mode: true
gitea_lfs_server_enabled: false
gitea_secret_key: "{{ lookup('keepass', 'gitea_secret_key', 'password') }}"
gitea_internal_token: "{{ lookup('keepass', 'gitea_internal_token', 'password') }}"
gitea_disable_git_hooks: false
gitea_show_user_email: false
gitea_disable_gravatar: true
gitea_enable_captcha: true
gitea_only_allow_external_registration: false
gitea_enable_notify_mail: false
gitea_force_private: false
gitea_oauth2_enabled: true
gitea_repo_indexer_enabled: true
gitea_mailer_enabled: true
gitea_mailer_skip_verify: false
gitea_mailer_tls_enabled: true
gitea_mailer_host: smtp.strato.de:465
gitea_mailer_from: info@mgrote.net
gitea_mailer_user: "info@mgrote.net"
gitea_mailer_password: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
gitea_mailer_type: smtp
gitea_default_branch: 'master'
gitea_db_type: sqlite3
gitea_db_path: "{{ gitea_home }}/data/gitea.db" # for sqlite3
gitea_ssh_listen: 0.0.0.0
gitea_ssh_domain: gitea.grote.lan
gitea_ssh_port: 2222
gitea_start_ssh: true
gitea_http_domain: git.mgrote.net
gitea_http_listen: 0.0.0.0
gitea_http_port: 3000
gitea_disable_http_git: false
gitea_protocol: http
gitea_show_registration_button: false
gitea_require_signin: false
gitea_disable_registration: true
gitea_fail2ban_enabled: true
gitea_fail2ban_jail_maxretry: 3
gitea_fail2ban_jail_findtime: 300
gitea_fail2ban_jail_bantime: 600
# wird für drone benötigt, sonst wird der Webhook nicht "gesendet"
gitea_extra_config: |
[webhook]
ALLOWED_HOST_LIST = *.grote.lan
gitea_backup_on_upgrade: false
gitea_backup_location: "{{ gitea_home }}/backups/"

View File

@ -1,58 +0,0 @@
---
### geerlingguy_postgres
postgresql_databases:
- name: "{{ lldap_db_name }}"
postgresql_users:
- name: "{{ lldap_db_user }}"
password: "{{ lldap_db_pass }}"
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: "{{ lldap_http_port }}"
protocol: tcp
comment: 'lldap'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 3890
protocol: tcp
comment: 'lldap'
from_ip: 192.168.2.0/24
### mgrote_lldap
lldap_package_url: "https://download.opensuse.org/repositories/home:/Masgalor:/LLDAP/xUbuntu_22.04/amd64/lldap_0.5.0-1+3.1_amd64.deb"
lldap_logging_verbose: "true" # must be a string not a boolean
lldap_http_port: 17170
lldap_http_host: "0.0.0.0"
lldap_ldap_host: "0.0.0.0"
lldap_public_url: http://ldap.mgrote.net:17170
lldap_jwt_secret: "{{ lookup('keepass', 'lldap_jwt_secret', 'password') }}"
lldap_ldap_base_dn: "dc=mgrote,dc=net"
lldap_admin_username: ladmin # only used on setup
lldap_admin_password: "{{ lookup('keepass', 'lldap_ldap_user_pass', 'password') }}" # only used on setup; also bind-secret
lldap_admin_mailaddress: lldap-admin@mgrote.net # only used on setup
lldap_database_url: "postgres://{{ lldap_db_user }}:{{ lldap_db_pass }}@{{ lldap_db_host }}/{{ lldap_db_name }}"
lldap_key_seed: "{{ lookup('keepass', 'lldap_key_seed', 'password') }}"
#lldap_smtp_from: "lldap@mgrote.net" # unused in role
lldap_smtp_reply_to: "Do not reply <info@mgrote.net>"
lldap_smtp_server: "docker10.mgrote.net"
lldap_smtp_port: "1025"
lldap_smtp_smtp_encryption: "NONE"
#lldap_smtp_user: "info@mgrote.net" # unused in role
lldap_smtp_enable_password_reset: "true" # must be a string not a boolean
# "meta vars"; daraus werden die db-url und die postgres-db abgeleitet
lldap_db_name: "lldap"
lldap_db_user: "lldap"
lldap_db_pass: "{{ lookup('keepass', 'lldap_db_pass', 'password') }}"
lldap_db_host: "localhost"
...

View File

@ -2,10 +2,13 @@
### mgrote_netplan
netplan_configure: false
### mgrote_postfix
### mgrote.postfix
postfix_erlaubte_netzwerke: "127.0.0.0/8 192.168.2.0/24 192.168.3.0/24"
### mgrote_user
### mgrote.restic
restic_folders_to_backup: "/ /etc/proxmox-backup"
### mgrote.user
users:
- username: root
password: "{{ lookup('keepass', 'root_linux_password_hash_proxmox', 'password') }}"
@ -30,55 +33,3 @@ users:
public_ssh_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJcBwOjanQV6sFWaTetqpl20SVe3aRzGjKbsp7hKkDCE mg@irantu
allow_sudo: true
allow_passwordless_sudo: true
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: zfs_arcstats
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_arcstats
- name: zfsonlinux_stats_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfsonlinux_stats_
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zfs_list
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_list
config: |
[zfs_list]
env.ignore_datasets_pattern autodaily
- name: zfs_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_pool_dataset_count
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zpool_capacity
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_capacity
munin_node_disabled_plugins:
- meminfo # zu hohe last
- hddtemp2 # ersetzt durch hddtemp_smartctl
- ntp # verursacht zu viele dns ptr request
- hddtempd # ersetzt durch hddtemp_smartctl
- squid_cache # proxmox
- squid_objectsize # proxmox
- squid_requests # proxmox
- squid_traffic # proxmox
- lvm_
- timesync
- lxc_guests
munin_node_allowed_cidrs:
- 192.168.3.0/24
- 192.168.2.0/24
...

View File

@ -2,7 +2,10 @@
### mgrote_netplan
netplan_configure: false
### mgrote_user
### mgrote.restic
restic_folders_to_backup: "/ /etc/pve"
### mgrote.user
users:
- username: root
password: "{{ lookup('keepass', 'root_linux_password_hash_proxmox', 'password') }}"
@ -28,14 +31,7 @@ users:
allow_sudo: true
allow_passwordless_sudo: true
### mgrote_cv4pve_autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all,-115
cv4pve_keep_snapshots: 5
cv4pve_version: "v1.14.8"
### mgrote_apt_manage_packages
### mgrote.apt_manage_packages
apt_packages_extra:
- ifupdown2
- bmon
@ -43,73 +39,6 @@ apt_packages_extra:
- open-vm-tools
- systemd-boot
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: zfs_arcstats
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_arcstats
- name: zfsonlinux_stats_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfsonlinux_stats_
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zfs_list
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_list
config: |
[zfs_list]
env.ignore_datasets_pattern autodaily
- name: zpool_capacity
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_capacity
- name: kvm_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_mem
- name: kvm_net
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_net
- name: kvm_io
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_io
config: |
[kvm_io]
user root
- name: kvm_cpu
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_cpu
- name: proxmox_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/proxmox/proxmox_vm_count
config: |
[proxmox_count]
user root
group root
- name: zfs_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_pool_dataset_count
- name: ksm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/system/kernel_same_page_merging
munin_node_disabled_plugins:
- meminfo # zu hohe last
- hddtemp2 # ersetzt durch hddtemp_smartctl
- ntp # verursacht zu viele dns ptr request
- hddtempd # ersetzt durch hddtemp_smartctl
- squid_cache # proxmox
- squid_objectsize # proxmox
- squid_requests # proxmox
- squid_traffic # proxmox
- lvm_
- slab
- timesync
- lxc_guests
# Ansible Variablen
### sudo
sudo: false
...

View File

@ -15,21 +15,24 @@ lvm_groups:
manage_lvm: true
pvresize_to_max: true
### mgrote_mount_cifs # löschen
### mgrote_mount_cifs
cifs_mounts:
- name: bilder
type: cifs
state: absent
state: present
dest: /mnt/fileserver3_photoprism_bilder_ro
src: //fileserver3.mgrote.net/bilder
src: //fileserver3.grote.lan/bilder
user: photoprism
password: "{{ lookup('keepass', 'fileserver_smb_user_photoprism', 'password') }}"
domain: mgrote.net
domain: grote.lan
uid: 5000
gid: 5000
extra_opts: ",ro" # komma am Anfang ist notwendig weil die Option hinten angehangen wird
### mgrote_docker-compose-inline
### mgrote.restic
restic_folders_to_backup: "/ /var/lib/docker /mnt/oci-registry" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben
### mgrote.docker-compose-inline
compose_owner: "docker-user"
compose_group: "docker-user"
compose_file_permissions: "644"
@ -56,6 +59,8 @@ compose_files:
- name: navidrome
state: present
network: traefik
- name: watchtower
state: present
- name: routeros-config-export
state: present
- name: mail-relay
@ -64,9 +69,13 @@ compose_files:
- name: woodpecker
state: present
network: traefik
- name: photoprism
state: present
- name: wiki
state: present
network: traefik
- name: statping-ng
state: present
### oefenweb.ufw
ufw_rules:

View File

@ -1,5 +1,5 @@
---
### mgrote_youtubedl
### mgrote.youtubedl
ytdl_dl_url: "https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp"
ytdl_timer: "Tue,Sat 03:00"
ytdl_bin_path: /usr/local/bin/yt-dlp
@ -23,21 +23,22 @@ ytdl_video_urls:
- https://www.youtube.com/watch?v=TowKvEJcYDw&list=PLlQWnS27jXh9aEp7hl54xrk5CgiVbvMBy # arte - zu Tisch in...
- https://www.youtube.com/playlist?list=PLs4hTtftqnlAkiQNdWn6bbKUr-P1wuSm0 # jimmy kimmel mean tweets
- https://www.youtube.com/tomstantonengineering
- https://www.youtube.com/@liamcarps #englandvideos ironisch
ytdl_podcast_urls:
- https://sternengeschichten.podigee.io/feed/aac # Sternengeschichten
- https://feeds.br.de/radiowissen/feed.xml # BR2 RadioWissen
ytdl_video_output: "/shares_videos/Youtube/%(uploader)s/%(title)s-%(id)s.%(ext)s" # Videos werden jetzt IMMER nach "Uploader/Name.ext" geschrieben
ytdl_enable_video_download: true
ytdl_enable_podcast_download: false
ytdl_video_output: "/shares_videos/Youtube/%(uploader)s/%(title)s-%(id)s.%(ext)s" # Videos werden jetzt IMMEr nach "Uploader/Name.ext" geschrieben
ytdl_podcast_output: "/shares_music/Podcasts/%(playlist)s/%(id)s.%(ext)s"
ytdl_video_log_output: "/shares_videos/Youtube/archive-youtube.log"
ytdl_podcast_log_output: "/shares_music/Podcasts/archive-podcast.log"
ytdl_youtube_username: "{{ lookup('keepass', 'youtubedl_youtube_login', 'username') }}"
ytdl_youtube_password: "{{ lookup('keepass', 'youtubedl_youtube_login', 'password') }}"
ytdl_conf_dir: "/etc/youtubedl" # ohne / am ende
ytdl_conf_dir: "/etc/youtubedl" #ohne / am ende
ytdl_download_limit: "10000K"
### mgrote_fileserver_smb
### mgrote.fileserver_smb
smb_users:
- name: 'restic'
password: "{{ lookup('keepass', 'fileserver_smb_user_restic', 'password') }}"
@ -46,7 +47,7 @@ smb_users:
- name: 'kodi'
password: "{{ lookup('keepass', 'fileserver_smb_user_kodi', 'password') }}"
- name: 'michaelgrote'
password: "{{ lookup('keepass', 'fileserver_smb_user_michaelgrote', 'password') }}"
password: "{{ lookup('keepass', 'fileserver_smb_user_mg', 'password') }}"
- name: 'navidrome'
password: "{{ lookup('keepass', 'fileserver_smb_user_navidrome', 'password') }}"
- name: 'docker'
@ -55,6 +56,8 @@ smb_users:
password: "{{ lookup('keepass', 'fileserver_smb_user_pve', 'password') }}"
- name: 'brother_ads2700w'
password: "{{ lookup('keepass', 'fileserver_smb_user_brother_ads2700w', 'password') }}"
- name: 'photoprism'
password: "{{ lookup('keepass', 'fileserver_smb_user_photoprism', 'password') }}"
smb_shares:
- name: 'videos'
@ -77,6 +80,10 @@ smb_shares:
path: '/shares_archiv'
users_ro: ''
users_rw: 'michaelgrote win10'
- name: 'hm'
path: '/shares_hm'
users_ro: ''
users_rw: 'michaelgrote win10'
- name: 'musik'
path: '/shares_music'
users_ro: 'navidrome kodi '
@ -87,7 +94,7 @@ smb_shares:
users_rw: 'kodi win10 michaelgrote'
- name: 'bilder'
path: '/shares_bilder'
users_ro: ''
users_ro: 'photoprism'
users_rw: ' michaelgrote win10'
- name: 'proxmox'
path: '/shares_pve_backup'
@ -96,7 +103,7 @@ smb_shares:
- name: 'restic'
path: '/shares_restic'
users_ro: ''
users_rw: 'restic win10 michaelgrote'
users_rw: ' restic win10 michaelgrote'
- name: 'buecher'
path: '/shares_buecher'
users_ro: ''

View File

@ -33,7 +33,7 @@ pbs_users:
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase backup /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
# mgrote.zfs_manage_datasets
### mgrote_zfs_extra
### mgrote.zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
# rpool - System-Datasets
@ -47,15 +47,12 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 1G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 1G
- dataset: rpool/ROOT/pbs-1
state: present
refreservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# backup-pool
- dataset: backup/pbs_data
state: present
@ -63,7 +60,6 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: backup/pve5
state: present
canmount: off # noqa yaml[truthy]
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes
zfs_extra_zfs_pools:
@ -72,7 +68,7 @@ zfs_extra_zfs_pools:
- name: "backup"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid
### mgrote.zfs_sanoid
sanoid_snaps_enable: true
## syncoid
@ -81,65 +77,70 @@ sanoid_syncoid_ssh_privkey: "{{ lookup('keepass', 'sanoid_syncoid_private_key',
sanoid_syncoid_timer: '*-*-* *:00:00' # jede Stunde
sanoid_syncoid_bwlimit: 30m # 30MB/s
sanoid_syncoid_datasets_sync:
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/pve_backup
source_dataset: hdd_data/pve_backup
source_dataset: hdd_data_raidz/pve_backup
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/videos
source_dataset: hdd_data/videos
source_dataset: hdd_data_raidz/videos
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/music
source_dataset: hdd_data/music
source_dataset: hdd_data_raidz/music
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/tmp
source_dataset: hdd_data/tmp
source_dataset: hdd_data_raidz/tmp
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/archiv
source_dataset: hdd_data/archiv
source_dataset: hdd_data_raidz/archiv
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/bilder
source_dataset: hdd_data/bilder
source_dataset: hdd_data_raidz/bilder
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/hm
source_dataset: hdd_data_raidz/hm
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/scans
source_dataset: hdd_data/scans
source_dataset: hdd_data_raidz/scans
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/restic
source_dataset: hdd_data/restic
source_dataset: hdd_data_raidz/restic
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/backup
source_dataset: hdd_data/backup
source_dataset: hdd_data_raidz/backup
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/buecher
source_dataset: hdd_data/buecher
source_dataset: hdd_data_raidz/buecher
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/programme
source_dataset: hdd_data/programme
source_dataset: hdd_data_raidz/programme
- source_host: pve5.mgrote.net
- source_host: pve5.grote.lan
destination_mount_check: backup
destination_dataset: backup/pve5/vm
source_dataset: hdd_data/vm
source_dataset: hdd_data_raidz/vm
# sanoid
sanoid_datasets:

View File

@ -29,7 +29,7 @@ pbs_users:
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase backup /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A28LFJDH
# mgrote.zfs_manage_datasets
### mgrote_zfs_extra
### mgrote.zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
# rpool - System-Datasets
@ -43,14 +43,12 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 10G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 10G
- dataset: rpool/ROOT/pbs-1
state: present
refreservation: 10G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# backup-pool
- dataset: backup/pbs_data
state: present
@ -66,7 +64,7 @@ zfs_extra_zfs_pools:
- name: "backup"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid
### mgrote.zfs_sanoid
sanoid_snaps_enable: true
## syncoid
sanoid_syncoid_destination_host: true
@ -77,62 +75,67 @@ sanoid_syncoid_datasets_sync:
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/pve_backup
source_dataset: hdd_data/pve_backup
source_dataset: hdd_data_raidz/pve_backup
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/videos
source_dataset: hdd_data/videos
source_dataset: hdd_data_raidz/videos
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/music
source_dataset: hdd_data/music
source_dataset: hdd_data_raidz/music
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/tmp
source_dataset: hdd_data/tmp
source_dataset: hdd_data_raidz/tmp
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/archiv
source_dataset: hdd_data/archiv
source_dataset: hdd_data_raidz/archiv
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/bilder
source_dataset: hdd_data/bilder
source_dataset: hdd_data_raidz/bilder
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/hm
source_dataset: hdd_data_raidz/hm
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/scans
source_dataset: hdd_data/scans
source_dataset: hdd_data_raidz/scans
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/restic
source_dataset: hdd_data/restic
source_dataset: hdd_data_raidz/restic
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/backup
source_dataset: hdd_data/backup
source_dataset: hdd_data_raidz/backup
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/buecher
source_dataset: hdd_data/buecher
source_dataset: hdd_data_raidz/buecher
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/programme
source_dataset: hdd_data/programme
source_dataset: hdd_data_raidz/programme
- source_host: 192.168.2.16 # pve5, weil pbs den fqdn nicht auflösen kann
destination_mount_check: backup
destination_dataset: backup/pve5/vm
source_dataset: hdd_data/vm
source_dataset: hdd_data_raidz/vm
# sanoid
sanoid_datasets:

View File

@ -3,11 +3,11 @@
# der Speicherort fur die VMs ist verschlüsselt
# zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
## hdd_data
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2
## HDD_DATA_RAIDZ
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data_raidz mirror /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3 /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2
# mgrote.zfs_manage_datasets
### mgrote_zfs_extra
### mgrote.zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
# rpool - System-Datasets
@ -21,14 +21,12 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 1G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 1G
- dataset: rpool/ROOT/pve-1
state: present
refreservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# rpool - VMs
- dataset: rpool/vm
@ -37,9 +35,8 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
state: present
- dataset: rpool/vm/lxc
state: present
# hdd_data
- dataset: hdd_data
# hdd_data_raidz
- dataset: hdd_data_raidz
state: present
compression: zstd
sync: disabled
@ -48,95 +45,101 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
atime: on # noqa yaml[truthy]
snapdir: hidden
reservation: 1G
- dataset: hdd_data/papa_backup
- dataset: hdd_data_raidz/papa_backup
state: present
- dataset: hdd_data/pve_backup
- dataset: hdd_data_raidz/pve_backup
state: present
recordsize: 1M
- dataset: hdd_data/videos
- dataset: hdd_data_raidz/videos
state: present
recordsize: 1M
- dataset: hdd_data/music
- dataset: hdd_data_raidz/music
state: present
recordsize: 1M
- dataset: hdd_data/tmp
- dataset: hdd_data_raidz/tmp
state: present
- dataset: hdd_data/archiv
- dataset: hdd_data_raidz/archiv
state: present
- dataset: hdd_data/bilder
- dataset: hdd_data_raidz/bilder
state: present
recordsize: 1M
- dataset: hdd_data/scans
- dataset: hdd_data_raidz/hm
state: present
- dataset: hdd_data/restic
- dataset: hdd_data_raidz/scans
state: present
- dataset: hdd_data/backup
- dataset: hdd_data_raidz/restic
state: present
- dataset: hdd_data/buecher
- dataset: hdd_data_raidz/backup
state: present
- dataset: hdd_data/programme
- dataset: hdd_data_raidz/buecher
state: present
- dataset: hdd_data/vm
- dataset: hdd_data_raidz/programme
state: present
- dataset: hdd_data_raidz/vm
state: present
zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
- name: "hdd_data"
- name: "hdd_data_raidz"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid
### mgrote.zfs_sanoid
sanoid_datasets:
- path: 'hdd_data/videos'
- path: 'hdd_data_raidz/videos'
template: '3tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/music'
- path: 'hdd_data_raidz/music'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/papa_backup'
- path: 'hdd_data_raidz/papa_backup'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/tmp'
- path: 'hdd_data_raidz/tmp'
template: '3tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/pve_backup'
- path: 'hdd_data_raidz/pve_backup'
template: '3tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/archiv'
- path: 'hdd_data_raidz/archiv'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: hdd_data/bilder
- path: hdd_data_raidz/bilder
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/scans
- path: hdd_data_raidz/hm
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data_raidz/scans
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '3tage'
- path: hdd_data/backup
- path: hdd_data_raidz/backup
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '31tage'
- path: hdd_data/restic
- path: hdd_data_raidz/restic
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '3tage'
- path: hdd_data/programme
- path: hdd_data_raidz/programme
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/buecher
- path: hdd_data_raidz/buecher
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/vm
- path: hdd_data_raidz/vm
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: 'pve3tage'
@ -153,59 +156,70 @@ sanoid_datasets:
snapshots: true
template: '3tage'
### mgrote_proxmox_bind_mounts
### mgrote.cv4pve-autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all
cv4pve_keep_snapshots: 5
cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download/v1.10.0/cv4pve-autosnap-linux-x64.zip"
### mgrote.proxmox_bind_mounts
pve_bind_mounts:
- vmid: 100
mp_nr: 0
mp_path_host: /hdd_data/videos
mp_path_host: /hdd_data_raidz/videos
mp_path_guest: /shares_videos
- vmid: 100
mp_nr: 2
mp_path_host: /hdd_data/pve_backup
mp_path_host: /hdd_data_raidz/pve_backup
mp_path_guest: /shares_pve_backup
- vmid: 100
mp_nr: 3
mp_path_host: /hdd_data/papa_backup
mp_path_host: /hdd_data_raidz/papa_backup
mp_path_guest: /shares_papa_backup
- vmid: 100
mp_nr: 4
mp_path_host: /hdd_data/music
mp_path_host: /hdd_data_raidz/music
mp_path_guest: /shares_music
- vmid: 100
mp_nr: 5
mp_path_host: /hdd_data/tmp
mp_path_host: /hdd_data_raidz/tmp
mp_path_guest: /shares_tmp
- vmid: 100
mp_nr: 6
mp_path_host: /hdd_data/archiv
mp_path_host: /hdd_data_raidz/archiv
mp_path_guest: /shares_archiv
- vmid: 100
mp_nr: 7
mp_path_host: /hdd_data/bilder
mp_path_host: /hdd_data_raidz/bilder
mp_path_guest: /shares_bilder
- vmid: 100
mp_nr: 8
mp_path_host: /hdd_data_raidz/hm
mp_path_guest: /shares_hm
- vmid: 100
mp_nr: 9
mp_path_host: /hdd_data/scans
mp_path_host: /hdd_data_raidz/scans
mp_path_guest: /shares_scans
- vmid: 100
mp_nr: 10
mp_path_host: /hdd_data/restic
mp_path_host: /hdd_data_raidz/restic
mp_path_guest: /shares_restic
- vmid: 100
mp_nr: 12
mp_path_host: /hdd_data/backup
mp_path_host: /hdd_data_raidz/backup
mp_path_guest: /shares_backup
- vmid: 100
mp_nr: 14
mp_path_host: /hdd_data/buecher
mp_path_host: /hdd_data_raidz/buecher
mp_path_guest: /shares_buecher
- vmid: 100
mp_nr: 15
mp_path_host: /hdd_data/programme
mp_path_host: /hdd_data_raidz/programme
mp_path_guest: /shares_programme
- vmid: 100
mp_nr: 16
mp_path_host: /hdd_data/vm
mp_path_host: /hdd_data_raidz/vm
mp_path_guest: /shares_vm
# mgrote.pbs_pve_integration

View File

@ -3,15 +3,11 @@
# der Speicherort fur die VMs ist verschlüsselt
# zfs create -o encryption=aes-256-gcm -o keyformat=passphrase rpool/vm
# entschlüsseln nach Boot mit: sudo zpool import -d /dev/disk/by-id/ -a && sudo zfs mount -a -l
## hdd_data
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS
## hdd_data "neu"
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS
## HDD_DATA_RAIDZ
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data_raidz mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A28LFJDH
# mgrote.zfs_manage_datasets
### mgrote_zfs_extra
### mgrote.zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_datasets: # DatenPools werden hier nicht verwaltet
# rpool - System-Datasets
@ -25,19 +21,15 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 10G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 10G
- dataset: rpool/ROOT/pve-1
state: present
refreservation: 10G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# rpool - Data
- dataset: rpool/data
state: present
# rpool - VMs
- dataset: rpool/vm
state: present
@ -47,9 +39,8 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
state: present
- dataset: rpool/data
state: present
# hdd_data
- dataset: hdd_data
# hdd_data_raidz
- dataset: hdd_data_raidz
state: present
compression: zstd
sync: disabled
@ -58,101 +49,106 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
atime: on # noqa yaml[truthy]
snapdir: hidden
reservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
- dataset: hdd_data/papa_backup
- dataset: hdd_data_raidz/papa_backup
state: present
- dataset: hdd_data/pve_backup
- dataset: hdd_data_raidz/pve_backup
state: present
recordsize: 1M
- dataset: hdd_data/videos
- dataset: hdd_data_raidz/videos
state: present
recordsize: 1M
- dataset: hdd_data/music
- dataset: hdd_data_raidz/music
state: present
recordsize: 1M
- dataset: hdd_data/tmp
- dataset: hdd_data_raidz/tmp
state: present
- dataset: hdd_data/archiv
- dataset: hdd_data_raidz/archiv
state: present
- dataset: hdd_data/bilder
- dataset: hdd_data_raidz/bilder
state: present
recordsize: 1M
- dataset: hdd_data/scans
- dataset: hdd_data_raidz/hm
state: present
- dataset: hdd_data/restic
- dataset: hdd_data_raidz/scans
state: present
- dataset: hdd_data/backup
- dataset: hdd_data_raidz/restic
state: present
- dataset: hdd_data/buecher
- dataset: hdd_data_raidz/backup
state: present
- dataset: hdd_data/programme
- dataset: hdd_data_raidz/buecher
state: present
- dataset: hdd_data/vm
- dataset: hdd_data_raidz/programme
state: present
- dataset: hdd_data_raidz/vm
state: present
zfs_extra_arc_max_size: "8589934592" # 8GB in Bytes
zfs_extra_zfs_pools:
- name: "rpool"
systemd_timer_schedule: "*-01,04,07,10-01 23:00" # jeden ersten eines jeden Quartals
- name: "hdd_data"
- name: "hdd_data_raidz"
systemd_timer_schedule: "*-01,04,07,10-01 23:00"
### mgrote_zfs_sanoid
### mgrote.zfs_sanoid
sanoid_snaps_enable: true
## enable sending snaps
sanoid_syncoid_source_host: true
sanoid_syncoid_ssh_pubkey: "{{ lookup('keepass', 'sanoid_syncoid_public_key', 'notes') }}"
sanoid_datasets:
### hdd_data
- path: 'hdd_data/videos'
### hdd_data_raidz
- path: 'hdd_data_raidz/videos'
template: '3tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/music'
- path: 'hdd_data_raidz/music'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/papa_backup'
- path: 'hdd_data_raidz/papa_backup'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/tmp'
- path: 'hdd_data_raidz/tmp'
template: '3tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/pve_backup'
- path: 'hdd_data_raidz/pve_backup'
template: '3tage'
recursive: 'yes'
snapshots: true
- path: 'hdd_data/archiv'
- path: 'hdd_data_raidz/archiv'
template: '14tage'
recursive: 'yes'
snapshots: true
- path: hdd_data/bilder
- path: hdd_data_raidz/bilder
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/scans
- path: hdd_data_raidz/hm
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data_raidz/scans
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '3tage'
- path: hdd_data/backup
- path: hdd_data_raidz/backup
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '31tage'
- path: hdd_data/restic
- path: hdd_data_raidz/restic
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '3tage'
- path: hdd_data/programme
- path: hdd_data_raidz/programme
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/buecher
- path: hdd_data_raidz/buecher
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '14tage'
- path: hdd_data/vm
- path: hdd_data_raidz/vm
recursive: 'no' # noqa yaml[truthy]
snapshots: true
template: '3tage'
@ -170,66 +166,77 @@ sanoid_datasets:
snapshots: true
template: 'pve3tage'
### mgrote_proxmox_bind_mounts
### mgrote.cv4pve-autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all,-115
cv4pve_keep_snapshots: 5
cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download/v1.14.7/cv4pve-autosnap-linux-x64.zip"
### mgrote.proxmox_bind_mounts
pve_bind_mounts:
### fileserver3
- vmid: 115
mp_nr: 0
mp_path_host: /hdd_data/videos
mp_path_host: /hdd_data_raidz/videos
mp_path_guest: /shares_videos
- vmid: 115
mp_nr: 2
mp_path_host: /hdd_data/pve_backup
mp_path_host: /hdd_data_raidz/pve_backup
mp_path_guest: /shares_pve_backup
- vmid: 115
mp_nr: 3
mp_path_host: /hdd_data/papa_backup
mp_path_host: /hdd_data_raidz/papa_backup
mp_path_guest: /shares_papa_backup
- vmid: 115
mp_nr: 4
mp_path_host: /hdd_data/music
mp_path_host: /hdd_data_raidz/music
mp_path_guest: /shares_music
- vmid: 115
mp_nr: 5
mp_path_host: /hdd_data/tmp
mp_path_host: /hdd_data_raidz/tmp
mp_path_guest: /shares_tmp
- vmid: 115
mp_nr: 6
mp_path_host: /hdd_data/archiv
mp_path_host: /hdd_data_raidz/archiv
mp_path_guest: /shares_archiv
- vmid: 115
mp_nr: 7
mp_path_host: /hdd_data/bilder
mp_path_host: /hdd_data_raidz/bilder
mp_path_guest: /shares_bilder
- vmid: 115
mp_nr: 8
mp_path_host: /hdd_data_raidz/hm
mp_path_guest: /shares_hm
- vmid: 115
mp_nr: 9
mp_path_host: /hdd_data/scans
mp_path_host: /hdd_data_raidz/scans
mp_path_guest: /shares_scans
- vmid: 115
mp_nr: 10
mp_path_host: /hdd_data/restic
mp_path_host: /hdd_data_raidz/restic
mp_path_guest: /shares_restic
- vmid: 115
mp_nr: 12
mp_path_host: /hdd_data/backup
mp_path_host: /hdd_data_raidz/backup
mp_path_guest: /shares_backup
- vmid: 115
mp_nr: 14
mp_path_host: /hdd_data/buecher
mp_path_host: /hdd_data_raidz/buecher
mp_path_guest: /shares_buecher
- vmid: 115
mp_nr: 15
mp_path_host: /hdd_data/programme
mp_path_host: /hdd_data_raidz/programme
mp_path_guest: /shares_programme
- vmid: 115
mp_nr: 16
mp_path_host: /hdd_data/vm
mp_path_host: /hdd_data_raidz/vm
mp_path_guest: /shares_vm
# mgrote.pbs_pve_integration
pve_pbs_datastore:
- name: pbs
server: pbs.mgrote.net
server: pbs.grote.lan
datastore: zfs_backup
username: user_pve5@pbs
password: "{{ lookup('keepass', 'pbs_pve_user', 'password') }}"

View File

@ -2,55 +2,47 @@ all:
children:
fileserver:
hosts:
fileserver3.mgrote.net:
blocky:
hosts:
blocky.mgrote.net:
ldap:
hosts:
ldap.mgrote.net:
fileserver3.grote.lan:
lxc:
hosts:
fileserver3.mgrote.net:
fileserver3.grote.lan:
ansible:
hosts:
ansible2.mgrote.net:
ansible2.grote.lan:
docker:
hosts:
docker10.mgrote.net:
docker10.grote.lan:
vmtest:
hosts:
vm-test-2204.mgrote.net:
pbs-test.mgrote.net:
pve5-test.mgrote.net:
vm-test-2204.grote.lan:
pbs-test.grote.lan:
pve5-test.grote.lan:
pve:
hosts:
pve5.mgrote.net:
pve5-test.mgrote.net:
pve5.grote.lan:
pve5-test.grote.lan:
pbs:
hosts:
pbs.mgrote.net:
pbs-test.mgrote.net:
pbs.grote.lan:
pbs-test.grote.lan:
physical:
hosts:
pve5.mgrote.net:
pbs.mgrote.net:
git:
pve5.grote.lan:
pbs.grote.lan:
gitea:
hosts:
forgejo.mgrote.net:
gitea.grote.lan:
production:
hosts:
fileserver3.mgrote.net:
ansible2.mgrote.net:
pve5.mgrote.net:
forgejo.mgrote.net:
docker10.mgrote.net:
pbs.mgrote.net:
blocky.mgrote.net:
ldap.mgrote.net:
fileserver3.grote.lan:
ansible2.grote.lan:
pve5.grote.lan:
gitea.grote.lan:
docker10.grote.lan:
pbs.grote.lan:
test:
hosts:
vm-test-2204.mgrote.net:
pve5-test.mgrote.net:
pbs-test.mgrote.net:
vm-test-2204.grote.lan:
pve5-test.grote.lan:
pbs-test.grote.lan:

Binary file not shown.

View File

@ -2,7 +2,7 @@
- hosts: all
gather_facts: false
roles:
- role: ansible-role-bootstrap
- role: robertdebock-ansible-role-bootstrap
tags: "bootstrap"
become: true
- role: mgrote_apt_manage_sources
@ -34,7 +34,7 @@
ansible_password: hallowelt
ansible_become_password: hallowelt
ansible_ssh_common_args: "'-o StrictHostKeyChecking=no'"
### mgrote_user
### mgrote.user
users:
- username: ansible-user
password: "{{ lookup('keepass', 'ansible_user_linux_password_hash', 'password') }}"

View File

@ -3,4 +3,3 @@
- ansible.builtin.import_playbook: base/system.yml
- ansible.builtin.import_playbook: base/users.yml
- ansible.builtin.import_playbook: base/ufw.yml
- ansible.builtin.import_playbook: base/monitoring.yml

View File

@ -1,6 +1,4 @@
---
- hosts: ansible
roles:
- role: ansible-role-pip
tags: "pip"
become: true
- { role: geerlingguy-ansible-role-pip, tags: "pip", become: true }

View File

@ -1,7 +0,0 @@
---
- hosts: blocky
roles:
- role: mgrote_systemd_resolved
tags: "resolved"
- role: mgrote_blocky
tags: "blocky"

View File

@ -1,21 +1,10 @@
---
- hosts: docker
roles:
- role: mgrote_systemd_resolved
tags: "dns"
become: true
- role: ansible-role-pip
tags: "pip"
become: true
- role: ansible-role-docker
tags: "docker"
become: true
- role: ansible_role_ctop
tags: "ctop"
become: true
- role: mgrote_set_permissions
tags: "perm"
become: true
- role: mgrote_docker_compose_inline
tags: "compose"
become: true
- { role: mgrote_systemd_resolved, tags: "dns", become: true }
- { role: mgrote_mount_cifs, tags: "cifs", become: true }
- { role: geerlingguy-ansible-role-pip, tags: "pip", become: true }
- { role: geerlingguy-ansible-role-docker, tags: "docker", become: true }
- { role: gantsign-ansible-role-ctop, tags: "ctop", become: true }
- { role: mgrote_set_permissions, tags: "perm", become: true }
- { role: mgrote_docker_compose_inline, tags: "compose", become: true }

View File

@ -6,9 +6,6 @@
---
- hosts: fileserver
roles:
- role: mgrote_fileserver_smb
tags: "smb"
- role: mgrote_youtubedl
tags: "youtubedl"
- role: mgrote_disable_oom_killer
tags: "oom"
- { role: mgrote_fileserver_smb, tags: "fileserver_smb" }
- { role: mgrote_youtubedl, tags: "youtubedl" }
- { role: mgrote_disable_oom_killer, tags: "oom" }

View File

@ -1,12 +0,0 @@
---
- hosts: git
roles:
- role: ansible-role-postgresql
tags: "db"
become: true
- role: ansible_role_gitea
tags: "gitea"
become: true
- role: mgrote_gitea_setup
tags: "setup"
become: true

View File

@ -0,0 +1,4 @@
---
- hosts: gitea
roles:
- { role: pyratlabs-ansible-role-gitea, tags: "gitea", become: true }

View File

@ -1,11 +0,0 @@
---
- hosts: ldap
roles:
- role: ansible-role-postgresql
tags: "db"
become: true
- role: mgrote_lldap
tags:
- lldap
- ldap
become: true

View File

@ -1,21 +1,12 @@
---
- hosts: pbs
roles:
- role: mgrote_zfs_packages
tags: "zfs_packages"
- role: mgrote_zfs_arc_mem
tags: "zfs_arc_mem"
- role: mgrote_zfs_manage_datasets
tags: "datasets"
- role: mgrote_zfs_scrub
tags: "zfs_scrub"
- role: mgrote_zfs_zed
tags: "zfs_zed"
- role: mgrote_zfs_sanoid
tags: "sanoid"
- role: mgrote_smart
tags: "smart"
- role: mgrote_pbs_users
tags: "pbs_users"
- role: mgrote_pbs_datastores
tags: "pbs_datastores"
- { role: mgrote_zfs_packages, tags: "zfs_packages" }
- { role: mgrote_zfs_arc_mem, tags: "zfs_arc_mem" }
- { role: mgrote_zfs_manage_datasets, tags: "datasets" }
- { role: mgrote_zfs_scrub, tags: "zfs_scrub" }
- { role: mgrote_zfs_zed, tags: "zfs_zed" }
- { role: mgrote_zfs_sanoid, tags: "sanoid" }
- { role: mgrote_smart, tags: "smart" }
- { role: mgrote_pbs_users, tags: "pbs_users" }
- { role: mgrote_pbs_datastores, tags: "pbs_datastores" }

View File

@ -1,26 +1,14 @@
---
- hosts: pve
roles:
- role: mgrote_zfs_packages
tags: "zfs_packages"
- role: mgrote_zfs_arc_mem
tags: "zfs_arc_mem"
- role: mgrote_zfs_manage_datasets
tags: "datasets"
- role: mgrote_zfs_scrub
tags: "zfs_scrub"
- role: mgrote_zfs_zed
tags: "zfs_zed"
- role: mgrote_zfs_sanoid
tags: "sanoid"
- role: mgrote_smart
tags: "smart"
- role: mgrote_cv4pve_autosnap
tags: cv4pve
become: true
- role: mgrote_proxmox_bind_mounts
tags: "bindmounts"
- role: mgrote_proxmox_lxc_profiles
tags: "lxc-profile"
- role: mgrote_pbs_pve_integration
tags: "pbs"
- { role: mgrote_zfs_packages, tags: "zfs_packages" }
- { role: mgrote_zfs_arc_mem, tags: "zfs_arc_mem" }
- { role: mgrote_zfs_manage_datasets, tags: "datasets" }
- { role: mgrote_zfs_scrub, tags: "zfs_scrub" }
- { role: mgrote_zfs_zed, tags: "zfs_zed" }
- { role: mgrote_zfs_sanoid, tags: "sanoid" }
- { role: mgrote_smart, tags: "smart" }
- { role: mgrote_cv4pve_autosnap, tags: "cv4pve" }
- { role: mgrote_proxmox_bind_mounts, tags: "bindmounts" }
- { role: mgrote_proxmox_lxc_profiles, tags: "lxc-profile" }
- { role: mgrote_pbs_pve_integration, tags: "pbs" }

View File

@ -1,11 +0,0 @@
---
- hosts: all
roles:
- role: mgrote_munin_node
become: true
tags: "munin"
when: "not 'laptop' in group_names"
### Die Host müssen auch beim Docker-Container: "munin-master eingetragen" werden.
### wird nur auf physischen Rechnern ausgeführt.
### Wenn ein Plugin nicht geht: munin-node-configure --shell --families=contrib,auto | sh -x

View File

@ -5,12 +5,14 @@
tags: "apt_sources"
- role: mgrote_apt_manage_packages
tags: "install"
- role: mgrote_exa
tags: "exa"
- role: mgrote_remove_snapd
become: true
tags: "snapd"
- role: mgrote_apt_update_packages
tags: "updates"
- role: ansible-role-unattended-upgrades
- role: hifis-net-ansible-role-unattended-upgrades
become: true
tags: unattended
when: "ansible_facts['distribution'] == 'Ubuntu'"

View File

@ -3,21 +3,21 @@
roles:
- role: mgrote_ntp_chrony_client
tags: "ntp"
- role: mgrote_etckeeper
tags: "etckeeper"
- role: mgrote_postfix
tags: "postfix"
- role: mgrote_restic
tags: "restic"
- role: mgrote_fail2ban
tags: "f2b"
- role: mgrote_fwupd_settings
become: true
tags: fwupd
when: "ansible_facts['distribution'] == 'Ubuntu'"
- role: ansible-manage-lvm
- role: mrlesmithjr-ansible-manage-lvm
tags: "lvm"
become: true
when: manage_lvm == true and manage_lvm is defined
# $manage_lvm gehört zu dieser Rolle, wird aber extra abgefragt um das Playbook zu "aktivieren"
# $manage_lvm gehört zu dieser Rolle, wird aber extra abgefragt um das PLaybook zu "aktivieren"
- role: mgrote_ssh
tags: "ssh"
- role: mgrote_netplan

View File

@ -1,6 +1,6 @@
---
- hosts: all:!pve:!pbs
roles:
- role: ansible-ufw # Regeln werden in den Group/Host-Vars gesetzt
tags: ufw
become: true
- { role: oefenweb-ansible-ufw, # Regeln werden in den Group/Host-Vars gesetzt
tags: "ufw",
become: true}

View File

@ -1,10 +1,10 @@
---
- hosts: all
become: true
roles:
- role: mgrote_users
tags: users
become: true
- role: mgrote_user_setup
tags:
- user_setup
- dotfiles
tags: "user"
- role: mgrote_dotfiles
tags: "dotfiles"
- role: mgrote_vim
tags: "vim"

View File

@ -1,7 +0,0 @@
---
- hosts: all
tasks:
- name: apt autoremove
become: true
ansible.builtin.apt:
autoremove: true

View File

@ -1,22 +0,0 @@
---
- hosts: all
become: true
tasks:
- name: remove files
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /home/mg/.gitconfig
- /home/mg/.tmux.conf
- /home/mg/.vimrc
- /home/mg/dotfiles
- name: remove from .bashrc
ansible.builtin.lineinfile:
path: "{{ item }}"
state: absent
line: "source /home/mg/dotfiles/.bash_extra"
loop:
- /root/.bashrc
- /home/mg/.bashrc

View File

@ -1,18 +0,0 @@
---
- hosts: all
tasks:
- name: remove user
become: true
ansible.builtin.user:
name: "{{ item }}"
state: absent
remove: true
loop:
- drone
- drone-user
- name: Ensure dir is removed
become: true
ansible.builtin.file:
path: /home/drone
state: absent

View File

@ -1,22 +0,0 @@
---
- hosts: all
become: yes
tasks:
- name: Ensure packages are absent
become: yes
ansible.builtin.apt:
autoremove: yes
autoclean: yes
purge: yes
name:
- munin-node
state: absent
- name: Ensure directories are absent
become: yes
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /usr/share/munin
- /etc/munin

View File

@ -1,78 +0,0 @@
---
- hosts: all
tasks:
- name: ensure group exists
become: true
ansible.builtin.group:
name: restic
state: absent
- name: install restic-packages
become: true
ansible.builtin.package:
name:
- restic
state: absent
- name: create "/etc/restic"
become: true
ansible.builtin.file:
path: "/etc/restic"
state: absent
- name: systemctl start restic.timer
become: true
ansible.builtin.systemd:
name: restic.timer
state: stopped
enabled: false
- name: systemctl enable units
become: true
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
masked: true
with_items:
- media-restic.automount
- media-restic.mount
- restic.service
- restic.timer
- restic_mail.service
- name: template restic.mount
become: true
ansible.builtin.file:
state: absent
path: /etc/systemd/system/media-restic.mount # media-restic == /media/restic
- name: template restic.automount
become: true
ansible.builtin.file:
path: /etc/systemd/system/media-restic.automount
state: absent
- name: template restic.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic.service
state: absent
- name: template restic.timer
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic.timer
state: absent
- name: template restic_mail.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic_mail.service
state: absent
- name: template restic_mail.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/media-restic.automount
state: absent

View File

@ -1,26 +0,0 @@
---
- hosts: all
become: true
tasks:
- name: update apt cache
ansible.builtin.apt:
update_cache: true
- name: update installed packages
ansible.builtin.package:
upgrade: dist
register: upgrade
- name: apt autoremove
ansible.builtin.apt:
autoremove: true
clean: yes
- name: reboot
ansible.builtin.reboot:
when: (upgrade.changed and (inventory_hostname != 'pve5.mgrote.net' and inventory_hostname != 'ansible2.mgrote.net'))
- name: Info
ansible.builtin.debug:
msg: Pool auf pbs.mgrote.net mounten!
when: (upgrade.changed and inventory_hostname == 'pbs.mgrote.net')

View File

@ -1,5 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended"],
"ignorePaths": ["**/friedhof/**"]
}

View File

@ -1,30 +0,0 @@
collections:
- name: community.general
version: "8.6.0"
- name: community.crypto
version: "2.19.1"
- name: ansible.posix
version: "1.5.4"
- name: community.docker
version: "3.9.0"
roles:
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-bootstrap
version: "6.2.5"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-ufw
version: "v4.1.13"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-manage-lvm
version: "v0.2.11"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-unattended-upgrades
version: "v4.1.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-pip
version: "3.0.3"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-nfs
version: "2.0.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-docker
version: "7.1.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible_role_ctop
version: "1.1.6"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible_role_gitea
version: "v3.4.2"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-postgresql
version: "3.5.1"

30
requirements.yml Normal file
View File

@ -0,0 +1,30 @@
collections:
- git+https://git.mgrote.net/ansible-collections-mirrors/community.general
- git+https://git.mgrote.net/ansible-collections-mirrors/community.crypto
- git+https://git.mgrote.net/ansible-collections-mirrors/ansible.posix
- git+https://git.mgrote.net/ansible-collections-mirrors/community.docker
roles:
- src: https://git.mgrote.net/ansible-roles-mirrors/pyratlabs-ansible-role-k3s
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/robertdebock-ansible-role-bootstrap
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/pandemonium1986-ansible-role-k9s
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/oefenweb-ansible-ufw
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/mrlesmithjr-ansible-manage-lvm
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/hifis-net-ansible-role-unattended-upgrades
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-pip
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-nfs
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-helm
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-docker
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/gantsign-ansible-role-ctop
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/pyratlabs-ansible-role-gitea
scm: git

View File

@ -15,7 +15,7 @@ aus.
- Extra-Parameter für einzelne Hosts
apt_packages_internet:
- für URLs
- http://docker10.mgrote.net:3344/bash-helper-scripts-mgrote-latest.deb
- http://docker10.grote.lan:3344/bash-helper-scripts-mgrote-latest.deb
```

View File

@ -29,9 +29,10 @@
- name: install packages from the internet
become: true
ansible.builtin.apt:
deb: "{{ apt_packages_internet }}"
deb: "{{ item }}"
state: present
when: apt_packages_internet is defined
loop: "{{ apt_packages_internet }}"
- name: remove packages
become: true

View File

@ -1,6 +1,10 @@
---
- name: update apt cache and installed packages
- name: update apt cache
become: true
ansible.builtin.apt:
update_cache: true
- name: update installed packages
become: true
ansible.builtin.package:
upgrade: dist
update_cache: true

View File

@ -1,68 +0,0 @@
---
# Docs in config.yml and https://0xerr0r.github.io/blocky/configuration/
blocky_user: blocky
blocky_group: blocky
blocky_version: v0.22
blocky_arch: x86_64
blocky_download_url: "https://github.com/0xERR0R/blocky/releases/download/{{ blocky_version }}/blocky_{{ blocky_version }}_Linux_{{ blocky_arch }}.tar.gz"
blocky_conf_dir: /etc/blocky
blocky_block_type: zeroIp
blocky_block_ttl: 1m
blocky_blacklists_strategy: failOnError
blocky_local_upstream: 192.168.2.1
blocky_prometheus: false
blocky_fqdn_only: false
blocky_port_dns: 53
blocky_log_level: info
blocky_dns_upstream:
- 9.9.9.9
- 1.1.1.1
- 8.8.8.8
- 5.9.164.112
blocky_dns_blocklists:
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- http://sysctl.org/cameleon/hosts
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
#blocky_custom_lookups: # optional
# # Internet
# - name: wiki.mgrote.net
# ip: 192.168.2.43
# - name: audio.mgrote.net
# ip: 192.168.2.43
# - name: auth.mgrote.net
# ip: 192.168.2.43
# - name: ci.mgrote.net
# ip: 192.168.2.43
# - name: git.mgrote.net
# ip: 192.168.2.43
# - name: miniflux.mgrote.net
# ip: 192.168.2.43
# - name: nextcloud.mgrote.net
# ip: 192.168.2.43
# - name: registry.mgrote.net
# ip: 192.168.2.43
# # Intern
# - name: ads2700w.mgrote.net
# ip: 192.168.2.147
# - name: crs305.mgrote.net
# ip: 192.168.2.225
# - name: hex.mgrote.net
# ip: 192.168.3.144
# - name: pbs-test.mgrote.net
# ip: 192.168.2.18
# - name: pbs.mgrote.net
# ip: 192.168.3.239
# - name: pve5-test.mgrote.net
# ip: 192.168.2.17
# - name: pve5.mgrote.net # bleibt im Router auch angelegt, weil wenn pve aus auch kein blocky ;-)
# ip: 192.168.2.16
# - name: rb5009.mgrote.net
# ip: 192.168.2.1
# - name: fritz.box
# ip: 192.168.5.1
#blocky_conditional_mapping: # optional
# - domain: mgrote.net
# resolver: 192.168.2.1
#

View File

@ -1,17 +0,0 @@
---
- name: set cap_net_bind_service # noqa no-changed-when
become: true
ansible.builtin.command: setcap 'cap_net_bind_service=+ep' /usr/local/bin/blocky
- name: systemctl daemon-reload
become: true
ansible.builtin.systemd:
daemon_reload: true
- name: restart service unit
become: true
ansible.builtin.systemd:
name: blocky.service
state: restarted
enabled: true

View File

@ -1,61 +0,0 @@
---
- name: ensure group exists
become: true
ansible.builtin.group:
name: "{{ blocky_group }}"
state: present
- name: ensure user exists
become: true
ansible.builtin.user:
name: "{{ blocky_user }}"
state: present
create_home: false
- name: ensure binaries are installed
become: true
ansible.builtin.unarchive:
src: "{{ blocky_download_url }}"
dest: /usr/local/bin
remote_src: true
owner: "{{ blocky_user }}"
group: "{{ blocky_group }}"
mode: "0755"
exclude:
- LICENSE
- README.md
notify:
- set cap_net_bind_service
- restart service unit
- name: ensure conf dir exists
become: true
ansible.builtin.file:
path: "{{ blocky_conf_dir }}"
state: directory
owner: "{{ blocky_user }}"
group: "{{ blocky_group }}"
mode: "0755"
- name: template configuration
become: true
ansible.builtin.template:
src: "config.yml.j2"
dest: "{{ blocky_conf_dir }}/config.yml"
owner: "{{ blocky_user }}"
group: "{{ blocky_group }}"
mode: "0600"
notify:
- restart service unit
- name: template service
become: true
ansible.builtin.template:
src: "blocky.service.j2"
dest: /etc/systemd/system/blocky.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- restart service unit

View File

@ -1,15 +0,0 @@
{{ file_header | default () }}
[Unit]
Description=Blocky is a DNS proxy and ad-blocker for the local network written in Go.
Requires=network.target
Wants=nss-lookup.target
Before=nss-lookup.target
After=network.target
[Service]
Type=simple
ExecStart=/usr/local/bin/blocky --config {{ blocky_conf_dir }}/config.yml
[Install]
WantedBy=multi-user.target

View File

@ -1,196 +0,0 @@
{{ file_header | default () }}
upstreams:
init:
# Configure startup behavior.
# accepted: blocking, failOnError, fast
# default: blocking
strategy: fast
groups:
default:
{% for item in blocky_dns_upstream %}
- {{ item }}
{% endfor %}
strategy: parallel_best
timeout: 2s
# optional: Determines how blocky will create outgoing connections. This impacts both upstreams, and lists.
# accepted: dual, v4, v6
# default: dual
connectIPVersion: v4
# optional: use black and white lists to block queries (for example ads, trackers, adult pages etc.)
blocking:
# definition of blacklist groups. Can be external link (http/https) or local file
blackLists:
ads:
{% for item in blocky_dns_blocklists %}
- {{ item }}
{% endfor %}
# which response will be sent, if query is blocked:
# zeroIp: 0.0.0.0 will be returned (default)
# nxDomain: return NXDOMAIN as return code
# comma separated list of destination IP addresses (for example: 192.100.100.15, 2001:0db8:85a3:08d3:1319:8a2e:0370:7344). Should contain ipv4 and ipv6 to cover all query types. Useful with running web server on this address to display the "blocked" page.
blockType: {{ blocky_block_type | default ("zeroIp") }}
# optional: TTL for answers to blocked domains
# default: 6h
blockTTL: {{ blocky_block_ttl | default ("6h") }}
clientGroupsBlock:
# default will be used, if no special definition for a client name exists
default:
- ads # siehe blocking.blacklists.ads
# optional: Configure how lists, AKA sources, are loaded
loading:
# optional: list refresh period in duration format.
# Set to a value <= 0 to disable.
# default: 4h
refreshPeriod: 4h
# optional: Applies only to lists that are downloaded (HTTP URLs).
downloads:
# optional: timeout for list download (each url). Use large values for big lists or slow internet connections
# default: 5s
timeout: 60s
# optional: Maximum download attempts
# default: 3
attempts: 5
# optional: Time between the download attempts
# default: 500ms
cooldown: 10s
# optional: Maximum number of lists to process in parallel.
# default: 4
concurrency: 16
# Configure startup behavior.
# accepted: blocking, failOnError, fast
# default: blocking
strategy: {{ blocky_blacklists_strategy | default ("blocking") }}
# Number of errors allowed in a list before it is considered invalid.
# A value of -1 disables the limit.
# default: 5
maxErrorsPerSource: 5
{% if blocky_conditional_mapping is defined %}
# optional: definition, which DNS resolver(s) should be used for queries to the domain (with all sub-domains). Multiple resolvers must be separated by a comma
# Example: Query client.fritz.box will ask DNS server 192.168.178.1. This is necessary for local network, to resolve clients by host name
conditional:
# optional: if false (default), return empty result if after rewrite, the mapped resolver returned an empty answer. If true, the original query will be sent to the upstream resolver
# Example: The query "blog.example.com" will be rewritten to "blog.fritz.box" and also redirected to the resolver at 192.168.178.1. If not found and if `fallbackUpstream` was set to `true`, the original query "blog.example.com" will be sent upstream.
# Usage: One usecase when having split DNS for internal and external (internet facing) users, but not all subdomains are listed in the internal domain.
fallbackUpstream: false
mapping:
{% for item in blocky_conditional_mapping %}
{{ item.domain }}: {{ item.resolver }}
{% endfor %}
{% endif %}
{% if blocky_custom_lookups is defined %}
# optional: custom IP address(es) for domain name (with all sub-domains). Multiple addresses must be separated by a comma
# example: query "printer.lan" or "my.printer.lan" will return 192.168.178.3
customDNS:
customTTL: 1h
# optional: if true (default), return empty result for unmapped query types (for example TXT, MX or AAAA if only IPv4 address is defined).
# if false, queries with unmapped types will be forwarded to the upstream resolver
filterUnmappedTypes: true
# optional: replace domain in the query with other domain before resolver lookup in the mapping
# rewrite:
# example.com: printer.lan
mapping:
{% for item in blocky_custom_lookups %}
{{ item.name }}: {{ item.ip }}
{% endfor %}
{% endif %}
# optional: configuration for caching of DNS responses
caching:
# duration how long a response must be cached (min value).
# If <=0, use response's TTL, if >0 use this value, if TTL is smaller
# Default: 0
minTime: 0
# duration how long a response must be cached (max value).
# If <0, do not cache responses
# If 0, use TTL
# If > 0, use this value, if TTL is greater
# Default: 0
maxTime: 0
# Max number of cache entries (responses) to be kept in cache (soft limit). Useful on systems with limited amount of RAM.
# Default (0): unlimited
maxItemsCount: 0
# if true, will preload DNS results for often used queries (default: names queried more than 5 times in a 2-hour time window)
# this improves the response time for often used queries, but significantly increases external traffic
# default: false
prefetching: true
# prefetch track time window (in duration format)
# default: 120
prefetchExpires: 2h
# name queries threshold for prefetch
# default: 5
prefetchThreshold: 5
# Max number of domains to be kept in cache for prefetching (soft limit). Useful on systems with limited amount of RAM.
# Default (0): unlimited
prefetchMaxItemsCount: 0
# Time how long negative results (NXDOMAIN response or empty result) are cached. A value of -1 will disable caching for negative results.
# Default: 30m
cacheTimeNegative: -1
# optional: configuration of client name resolution
clientLookup:
# optional: this DNS resolver will be used to perform reverse DNS lookup (typically local router)
upstream: {{ blocky_local_upstream | default ("192.168.2.1") }}
# optional: some routers return multiple names for client (host name and user defined name). Define which single name should be used.
# Example: take second name if present, if not take first name
# singleNameOrder:
# - 2
# - 1
# optional: configuration for prometheus metrics endpoint
prometheus:
# enabled if true
enable: {{ blocky_prometheus | default ("false") }}
# url path, optional (default '/metrics')
path: /metrics
# optional: Mininal TLS version that the DoH and DoT server will use
# minTlsServeVersion: 1.3
# if https port > 0: path to cert and key file for SSL encryption. if not set, self-signed certificate will be generated
#certFile: server.crt
#keyFile: server.key
# optional: use these DNS servers to resolve blacklist urls and upstream DNS servers. It is useful if no system DNS resolver is configured, and/or to encrypt the bootstrap queries.
bootstrapDns:
- tcp+udp:9.9.9.9
# optional: drop all queries with following query types. Default: empty
filtering:
queryTypes:
- AAAA
# optional: return NXDOMAIN for queries that are not FQDNs.
fqdnOnly:
# default: false
enable: {{ blocky_fqdn_only | default ("false") }}
# optional: ports configuration
ports:
# optional: DNS listener port(s) and bind ip address(es), default 53 (UDP and TCP). Example: 53, :53, "127.0.0.1:5353,[::1]:5353"
dns: {{ blocky_port_dns | default ("53") }}
# optional: Port(s) and bind ip address(es) for DoT (DNS-over-TLS) listener. Example: 853, 127.0.0.1:853
# tls: 853
# optional: Port(s) and optional bind ip address(es) to serve HTTPS used for prometheus metrics, pprof, REST API, DoH... If you wish to specify a specific IP, you can do so such as 192.168.0.1:443. Example: 443, :443, 127.0.0.1:443,[::1]:443
# https: 443
# optional: Port(s) and optional bind ip address(es) to serve HTTP used for prometheus metrics, pprof, REST API, DoH... If you wish to specify a specific IP, you can do so such as 192.168.0.1:4000. Example: 4000, :4000, 127.0.0.1:4000,[::1]:4000
http: 4000
# optional: logging configuration
log:
# optional: Log level (one from debug, info, warn, error). Default: info
level: {{ blocky_log_level | default ("info") }}
# optional: Log format (text or json). Default: text
format: text
# optional: log timestamps. Default: true
timestamp: true
# optional: obfuscate log output (replace all alphanumeric characters with *) for user sensitive data like request domains or responses to increase privacy. Default: false
privacy: false

View File

@ -0,0 +1,11 @@
## mgrote.cv4pve
### Beschreibung
Installiert [cv4pve-autosnap](https://github.com/Corsinvest/cv4pve-autosnap).
Legt einen systemd-timer.
### getestet auf
- [x] ProxMox 7*
### Variablen + Defaults
- see [defaults](./defaults/main.yml)

View File

@ -3,7 +3,7 @@
cv4pve_cron_minute: "39"
cv4pve_cron_hour: "5"
# proxmox api-token and user
cv4pve_api_token: "supersecret"
cv4pve_api_token: "XXXXXXXXXXXXXXXXXXXXXX"
cv4pve_api_user: "root@pam!test2"
# which vm to snapshot
cv4pve_vmid: all
@ -12,7 +12,3 @@ cv4pve_keep_snapshots: 3
# under which user the script is run
cv4pve_user_group: cv4pve
cv4pve_user: cv4pve
# url
cv4pve_dl_link: https://github.com/Corsinvest/cv4pve-autosnap/releases/download/{{ cv4pve_version }}/cv4pve-autosnap-linux-x64.zip
cv4pve_version: "v1.14.8"
cv4pve_base_path: /usr/local/bin/cv4pve

View File

@ -1,42 +0,0 @@
---
- name: Ensure needed directories exist
ansible.builtin.file:
path: "{{ cv4pve_base_path }}"
state: directory
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
mode: "0644"
- name: Download specified version
ansible.builtin.unarchive:
src: "{{ cv4pve_dl_link }}"
dest: "{{ cv4pve_base_path }}"
mode: '0755'
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
remote_src: true
creates: "{{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }}"
list_files: true
register: download
- name: Rename binary # noqa no-changed-when no-handler
ansible.builtin.command: |
mv "{{ cv4pve_base_path }}/cv4pve-autosnap" "{{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }}"
when: download.changed
# https://stackoverflow.com/questions/20252057/using-ansible-how-would-i-delete-all-items-except-for-a-specified-set-in-a-dire
- name: Find old versions
ansible.builtin.find:
paths: "{{ cv4pve_base_path }}"
file_type: file
use_regex: false
excludes:
- "cv4pve-autosnap-{{ cv4pve_version }}"
register: found_files
- name: Ensure old versions are absent
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
with_items: "{{ found_files['files'] }}"
...

View File

@ -2,9 +2,74 @@
- name: include user tasks
ansible.builtin.include_tasks: user.yml
- name: include install tasks
ansible.builtin.include_tasks: install.yml
- name: include systemd tasks
ansible.builtin.include_tasks: systemd.yml
...
- name: create directories
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
mode: "0644"
loop:
- '/tmp/cv4pve'
- '/usr/local/bin/cv4pve'
- name: download archives
become: true
ansible.builtin.get_url:
url: "{{ cv4pve_dl_link }}"
dest: /tmp/cv4pve/cv4pve-autosnap-linux-x64.zip
mode: '0775'
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
- name: extract archives
become: true
ansible.builtin.unarchive:
src: /tmp/cv4pve/cv4pve-autosnap-linux-x64.zip
dest: /usr/local/bin/cv4pve
remote_src: true
mode: a+x
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
- name: template cv4pve.service
become: true
ansible.builtin.template:
src: cv4pve.service.j2
dest: /etc/systemd/system/cv4pve.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: template cv4pve_mail.service
become: true
ansible.builtin.template:
src: cv4pve_mail.service.j2
dest: /etc/systemd/system/cv4pve_mail.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: template cv4pve.timer
become: true
ansible.builtin.template:
src: cv4pve.timer.j2
dest: /etc/systemd/system/cv4pve.timer
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: systemctl start cv4pve.timer
become: true
ansible.builtin.systemd:
name: cv4pve.timer
state: started
enabled: true

View File

@ -1,38 +0,0 @@
---
- name: Ensure service-unit (cv4pve) is templated
ansible.builtin.template:
src: cv4pve.service.j2
dest: /etc/systemd/system/cv4pve.service
owner: root
group: root
mode: "0644"
no_log: true
notify:
- systemctl daemon-reload
- name: Ensure service-unit (mail) is templated
ansible.builtin.template:
src: cv4pve_mail.service.j2
dest: /etc/systemd/system/cv4pve_mail.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: Ensure service-unit (timer) is templated
ansible.builtin.template:
src: cv4pve.timer.j2
dest: /etc/systemd/system/cv4pve.timer
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: Ensure timer is started is templated
ansible.builtin.systemd:
name: cv4pve.timer
state: started
enabled: true
...

View File

@ -1,5 +1,5 @@
---
- name: Ensure group exists
- name: ensure group exists
become: true
ansible.builtin.group:
name: "{{ cv4pve_user_group }}"
@ -7,7 +7,7 @@
when:
- cv4pve_user_group is defined
- name: Ensure user exists
- name: ensure user exists
become: true
ansible.builtin.user:
name: "{{ cv4pve_user }}"
@ -17,4 +17,3 @@
when:
- cv4pve_user_group is defined
- cv4pve_user is defined
...

View File

@ -6,4 +6,4 @@ OnFailure=cv4pve_mail.service
[Service]
Type=simple
ExecStart={{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }} --host=127.0.0.1 --api-token {{ cv4pve_api_user }}={{ cv4pve_api_token }} --vmid="{{ cv4pve_vmid }}" snap --label='daily' --keep="{{ cv4pve_keep_snapshots }}" --state
ExecStart=/usr/local/bin/cv4pve/cv4pve-autosnap --host=127.0.0.1 --api-token {{ cv4pve_api_user }}={{ cv4pve_api_token }} --vmid="{{ cv4pve_vmid }}" snap --label='daily' --keep="{{ cv4pve_keep_snapshots }}" --state

View File

@ -6,5 +6,6 @@ Description=Timer: Trigger VM-Snapshots in PVE with cv4pve.
OnCalendar=*-*-* {{ cv4pve_cron_hour }}:{{ cv4pve_cron_minute }}:00
RandomizedDelaySec=10 min
[Install]
WantedBy=timers.target multi-user.target

View File

@ -1,4 +1,5 @@
{{ file_header | default () }}
[Unit]
Description=Send a Mail in case of an error in cv4pve.service.

View File

@ -0,0 +1,11 @@
## mgrote.dotfiles
### Beschreibung
Klont das dotfile-repo und erstellt notwendige Ordner.
### getestet auf
- [x] Ubuntu (>=18.04)
- [x] Linux Mint
### Variablen + Defaults
see [defaults](./defaults/main.yml)

View File

@ -0,0 +1,11 @@
---
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles # url zum repo
dotfiles_repo_path: /home/mg/dotfiles # wo soll das repo lokal gespeichert werden
dotfiles_repo_branch: master #default branch for checking out
dotfiles_files: # welche dateien sollen wohin verlinkt werden (ln -s)
- repo_path: "{{ dotfiles_repo_path }}/.vimrc"
local_path: "/home/mg/.vimrc"
dotfiles_dirs: # welche ordner sollen erstellt werden
- path: /home/mg/.config/i3
- path: /home/mg/.config/polybar
dotfiles_owner: mg # chown

View File

@ -0,0 +1,8 @@
---
- name: set owner recursive for repo
ansible.builtin.file:
path: "{{ dotfiles_repo_path }}"
owner: "{{ dotfiles_owner }}"
group: "{{ dotfiles_owner }}"
recurse: true
...

View File

@ -0,0 +1,63 @@
---
- name: Ensure package acl is installed
become: true
ansible.builtin.package:
name: acl
state: present
- name: check if repo exists
ansible.builtin.stat:
path: "{{ dotfiles_repo_path }}"
register: repo_exists
- name: set safe directory
become: true
ansible.builtin.command: # noqa command-instead-of-module
cmd: git config --global --add safe.directory "{{ dotfiles_repo_path }}"
changed_when: false
- name: stash changes
ansible.builtin.command: git stash # noqa command-instead-of-module no-handler
args:
chdir: "{{ dotfiles_repo_path }}"
changed_when: false
when: repo_exists.stat.exists
- name: Ensure dotfiles repository is cloned locally.
ansible.builtin.git:
repo: "{{ dotfiles_repo_url }}"
dest: "{{ dotfiles_repo_path }}"
depth: 1
version: "{{ dotfiles_repo_branch }}"
notify: set owner recursive for repo
- name: Ensure needed dirs exist.
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
owner: "{{ dotfiles_owner }}"
group: "{{ dotfiles_owner }}"
mode: "0644"
with_items: "{{ dotfiles_dirs }}"
- name: Link dotfiles into home folder
ansible.builtin.file:
src: "{{ item.repo_path }}"
dest: "{{ item.local_path }}"
state: link
force: true
owner: "{{ dotfiles_owner }}"
group: "{{ dotfiles_owner }}"
with_items: "{{ dotfiles_files }}"
- name: add .bash_extra to .bashrc
ansible.builtin.lineinfile:
path: /home/{{ dotfiles_owner }}/.bashrc
line: "source {{ dotfiles_repo_path }}/.bash_extra"
state: present
- name: root - add .bash_extra to .bashrc
ansible.builtin.lineinfile:
path: /root/.bashrc
line: "source {{ dotfiles_repo_path }}/.bash_extra"
state: present

Some files were not shown because too many files have changed in this diff Show More