Compare commits

..

3 Commits
master ... b2

Author SHA1 Message Date
Michael Grote 01e3663eb2 pn 2023-11-29 22:43:38 +01:00
Michael Grote 181880277a s 2023-11-29 22:34:22 +01:00
Michael Grote c95b9b0ab1 inv 2023-11-29 22:30:36 +01:00
131 changed files with 1250 additions and 1811 deletions

View File

@ -12,14 +12,15 @@ exclude_paths:
- .gitlab-ci.yml
- friedhof/
- playbooks/on-off
- roles/ansible-role-pip
- roles/ansible-role-bootstrap
- roles/ansible_role_ctop
- roles/ansible-role-docker
- roles/ansible-role-helm
- roles/ansible-role-nfs
- roles/ansible-role-unattended-upgrades
- roles/ansible-manage-lvm
- roles/ansible-ufw
- roles/ansible_role_gitea
- roles/ansible-role-postgresql
- roles/geerlingguy-ansible-role-pip
- roles/pyratlabs-ansible-role-k3s
- roles/robertdebock-ansible-role-bootstrap
- roles/gantsign-ansible-role-ctop
- roles/geerlingguy-ansible-role-docker
- roles/geerlingguy-ansible-role-helm
- roles/geerlingguy-ansible-role-nfs
- roles/hifis-net-ansible-role-unattended-upgrades
- roles/mrlesmithjr-ansible-manage-lvm
- roles/oefenweb-ansible-ufw
- roles/pandemonium1986-ansible-role-k9s
- roles/pyratlabs-ansible-role-gitea

25
.gitignore vendored
View File

@ -2,19 +2,16 @@
vault-pass.yml
id_ed25519
id_ed25519.pub
roles/ansible-role-pip
roles/ansible-role-k3s
roles/ansible-role-bootstrap
roles/ansible_role_ctop
roles/ansible-role-docker
roles/ansible-role-helm
roles/ansible-role-nfs
roles/ansible_role_gitea
roles/ansible-role-unattended-upgrades
roles/ansible-manage-lvm
roles/ansible-ufw
roles/geerlingguy-ansible-role-pip
roles/pyratlabs-ansible-role-k3s
roles/robertdebock-ansible-role-bootstrap
roles/gantsign-ansible-role-ctop
roles/geerlingguy-ansible-role-docker
roles/geerlingguy-ansible-role-helm
roles/geerlingguy-ansible-role-nfs
roles/hifis-net-ansible-role-unattended-upgrades
roles/mrlesmithjr-ansible-manage-lvm
roles/oefenweb-ansible-ufw
roles/pandemonium1986-ansible-role-k9s
roles/ansible_role_gitea
roles/pyratlabs-ansible-role-gitea
collections/
plugins/lookup/__pycache__/
roles/ansible-role-postgresql

View File

@ -1,18 +1,19 @@
---
kind: pipeline
type: docker
name: ansible-lint
depends_on:
- gitleaks
steps:
ansible-lint:
image: quay.io/ansible/creator-ee:v24.2.0
image: quay.io/ansible/creator-ee
commands:
- ansible-lint --version
- echo $${VAULTPASS} > ./vault-pass.yml # nach des Secret in Großschreibung
- ansible-galaxy install -r requirements.yaml
- echo $VAULT-PASS > ./vault-pass.yml # nach des Secret in Großschreibung
- ansible-galaxy install -r requirements.yml
- ansible-lint --force-color --format pep8
# https://woodpecker-ci.org/docs/usage/secrets#use-secrets-in-commands
secrets: [vaultpass]
when:
- event: [push, pull_request, cron]
evaluate: 'CI_COMMIT_AUTHOR_EMAIL != "renovate@mgrote.net"'
...
event:
exclude:
- tag
secret: [vault-pass] #dieses Secret darf verwendet werden

View File

@ -1,10 +1,13 @@
---
kind: pipeline
type: docker
name: gitleaks
steps:
gitleaks:
image: zricethezav/gitleaks:v8.18.2
image: zricethezav/gitleaks:latest
commands:
- gitleaks detect --no-git --verbose --source $CI_WORKSPACE
when:
- event: [push, pull_request, cron]
evaluate: 'CI_COMMIT_AUTHOR_EMAIL != "renovate@mgrote.net"'
...
event:
exclude:
- tag

View File

@ -4,7 +4,7 @@ nocows = 1
retry_files_enabled = False
roles_path = ./roles
lookup_plugins = ./plugins/lookup
collections_path = ./collections
collections_paths = ./collections
private_key_file = ./id_ed25519
vault_password_file = vault-pass.yml
gathering = smart

View File

@ -2,22 +2,19 @@ version: '3'
services:
httpd-registry:
container_name: "httpd-registry"
image: "registry.mgrote.net/httpd:latest"
image: httpd:bullseye
restart: always
volumes:
- uploads:/usr/local/apache2/htdocs/
- "{{ compose_dest_basedir }}/httpd/httpd.conf:/usr/local/apache2/conf/httpd.conf:ro"
ports:
- 3344:80
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
python-api-server:
container_name: httpd-api
image: "registry.mgrote.net/python-api-server:latest"
image: registry.mgrote.net/python-api-server:latest
restart: always
ports:
- "5040:5000"
@ -28,8 +25,10 @@ services:
# FLASK_APP: app # for debugging
MAX_CONTENT_LENGTH: 500
UPLOAD_DIRECTORY: /uploads
AUTH_TOKEN: "{{ lookup('keepass', 'httpd-api-server-token', 'password') }}"
AUTH_TOKEN: {{ lookup('keepass', 'httpd-api-server-token', 'password') }}
ENABLE_WEBSERVER: false
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
uploads:

View File

@ -1,33 +1,25 @@
version: '3.3'
services:
postfix:
image: "registry.mgrote.net/postfix:latest"
container_name: mail-relay
restart: always
ports:
- 1025:25
environment:
SMTP_SERVER: smtp.strato.de
SMTP_USERNAME: info@mgrote.net
SMTP_PASSWORD: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
SERVER_HOSTNAME: mgrote.net
# DEBUG: "yes" # as string not boolean
ALWAYS_ADD_MISSING_HEADERS: "no" # as string not boolean
# LOG_SUBJECT: "yes" # as string not boolean
INET_PROTOCOL: ipv4
SMTP_GENERIC_MAP: |
/nobody@lldap/ lldap@mgrote.net
/mg@pbs.localdomain/ pbs@mgrote.net
/root@pbs.localdomain/ pbs@mgrote.net
# rewrite FROM "nobody@lldap" to "lldap@mgrote.net"
# /.*/ würde alle absender adressen ersetzen
networks:
- mail-relay
healthcheck:
test: ["CMD", "sh", "-c", "echo 'EHLO localhost' | nc -w 1 127.0.0.1 25 | grep -q '220 '"]
interval: 30s
timeout: 10s
retries: 3
postfix:
image: registry.mgrote.net/postfix:master
container_name: mail-relay
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
ports:
- 1025:25
environment:
SMTP_SERVER: smtp.strato.de
SMTP_USERNAME: info@mgrote.net
SMTP_PASSWORD: {{ lookup('keepass', 'strato_smtp_password', 'password') }}
SERVER_HOSTNAME: mgrote.net
# DEBUG: "yes" # literal
ALWAYS_ADD_MISSING_HEADERS: "no" # literal
# LOG_SUBJECT: "yes" # literal
INET_PROTOCOL: ipv4
SMTP_GENERIC_MAP: "/.*/ info@mgrote.net"
networks:
- mail-relay
######## Networks ########
networks:

View File

@ -3,16 +3,16 @@ services:
######## Miniflux ########
miniflux:
container_name: "mf-frontend"
image: "ghcr.io/miniflux/miniflux:2.1.3"
image: miniflux/miniflux:latest
restart: always
depends_on:
- mf-db16
- db
environment:
DATABASE_URL: "postgres://miniflux:{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}@mf-db16/miniflux?sslmode=disable"
DATABASE_URL: postgres://miniflux:{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}@mf-db/miniflux?sslmode=disable
RUN_MIGRATIONS: 1
# CREATE_ADMIN: 1
# ADMIN_USERNAME: adminmf
# ADMIN_PASSWORD: "{{ lookup('keepass', 'miniflux_admin_password', 'password') }}"
# ADMIN_PASSWORD: {{ lookup('keepass', 'miniflux_admin_password', 'password') }}
WORKER_POOL_SIZE: 10
POLLING_FREQUENCY: 10
CLEANUP_ARCHIVE_UNREAD_DAYS: -1
@ -21,8 +21,6 @@ services:
networks:
- intern
- traefik
healthcheck:
test: ["CMD", "/usr/bin/miniflux", "-healthcheck", "auto"]
labels:
traefik.http.routers.miniflux.rule: Host(`miniflux.mgrote.net`)
traefik.enable: true
@ -31,24 +29,25 @@ services:
traefik.http.routers.miniflux.entrypoints: entry_https
traefik.http.services.miniflux.loadbalancer.server.port: 8080
######## Postgres ########
mf-db16:
container_name: "mf-db16"
image: "postgres:16.3"
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: mf-db
######## PostGreSQL ########
db:
container_name: "mf-db"
image: postgres:13
restart: always
environment:
POSTGRES_USER: miniflux
POSTGRES_PASSWORD: "{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}"
POSTGRES_PASSWORD: {{ lookup('keepass', 'miniflux_postgres_password', 'password') }}
TZ: Europe/Berlin
POSTGRES_HOST_AUTH_METHOD: "md5" # Workaround beim Migration von 13 -> 16; https://eelkevdbos.medium.com/upgrade-postgresql-with-docker-compose-99d995e464 ;
volumes:
- db16:/var/lib/postgresql/data
- db:/var/lib/postgresql/data
networks:
- intern
healthcheck:
test: ["CMD", "pg_isready", "-U", "miniflux"]
interval: 10s
start_period: 30s
labels:
com.centurylinklabs.watchtower.enable: true
######## Miniflux-Filter ########
mf-filter:
@ -58,19 +57,22 @@ services:
restart: always
environment:
TZ: Europe/Berlin
MF_AUTH_TOKEN: "{{ lookup('keepass', 'miniflux_auth_token', 'password') }}"
MF_AUTH_TOKEN: {{ lookup('keepass', 'miniflux_auth_token', 'password') }}
MF_API_URL: https://miniflux.mgrote.net/v1
MF_SLEEP: 600
#MF_DEBUG: 1
image: "registry.mgrote.net/miniflux-filter:latest"
image: registry.mgrote.net/miniflux-filter:latest
volumes:
- ./filter.txt:/data/filter.txt
networks:
- intern
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: mf-frontend
######## Volumes ########
volumes:
db16:
db:
######## Networks ########
networks:
traefik:

View File

@ -1,8 +1,6 @@
9to5linux.com::9to5Linux Weekly Roundup:
apnic.net::Podcast
apnic.net::Event Wrap
astralcodexten.substack.com::Open Thread
astralcodexten.substack.com::Book Review Contest
augengeradeaus.net::Sicherheitshalber der Podcast
axios.com::Axios on HBO
axios.com::football
@ -23,8 +21,6 @@ computerbase.de::Twitter
computerbase.de::wettbewerb
computerbase.de::WM
computerbase.de::Wochenrück- und Ausblick:
computerbase.de::Xbox Game Pass
computerbase.de::GeForce
facebook.com::Bridge returned error
golem.de::Anzeige
golem.de::Aus dem Verlag:
@ -34,7 +30,6 @@ golem.de::Fussball
golem.de::Fußball
golem.de::(g+)
golem.de::Golem Karrierewelt
www.thedrive.com::Bunker Talk:
golem.de::in aller Kürze
golem.de::In eigener Sache
golem.de::kurznews
@ -49,7 +44,6 @@ golem.de::Wochenrückblick
hardwareluxx.de::Der Hardwareluxx-Webwatch:
hardwareluxx.de::Die Artikel unserer Partner
hardwareluxx.de::Shopping Club
hardwareluxx.de::KW
heise.de::Anzeige
heise.de::Auslegungssache
heise.de::Bit-Rauschen
@ -108,10 +102,7 @@ heise.de::Zugriff auf alle Inhalte von heise+
instagram.com::Bridge returned error
ipspace.net::Built.fm
ipspace.net::Podcast
mdr.de::Schwimm-WM
mdr.de::DSV
mdr.de::Basketball
mdr.de::Volleyball
mdr.de::DFB
mdr.de::DFB-Pokal
mdr.de::Fussball
@ -129,7 +120,8 @@ mdr.de::Podcast "digital Leben"
mdr.de::Podcast "digital Leben":
mdr.de::Podcastserie
mdr.de::Schwimmen:
falseknees.tumblr.com::Kneesvember
mf-bridge::Bridge encountered an unexpected situation
mf-bridge::Bridge returned error 401
monkeyuser.com::AdLitteram
netzpolitik.org::KW
netzpolitik.org::NPP
@ -172,8 +164,344 @@ portuguesegeese.com::portuguesegeese.com
reddit.com::UEFA
stackoverflow.blog::Podcast
stackoverflow.blog::The Overflow
stadt-bremerhaven.de::[Anzeige]
stadt-bremerhaven.de::Basketball-WM
stadt-bremerhaven.de::Black-Friday
stadt-bremerhaven.de::Bundesliga
stadt-bremerhaven.de::Cloud-Gaming-Rückblick
stadt-bremerhaven.de::DAZN
stadt-bremerhaven.de::Disney+
stadt-bremerhaven.de::eFootball
stadt-bremerhaven.de::Eishockey
stadt-bremerhaven.de::Elon Musk
stadt-bremerhaven.de::EM 2024
stadt-bremerhaven.de::FIFA
stadt-bremerhaven.de::Formel 1
stadt-bremerhaven.de::FUSSBALL
stadt-bremerhaven.de::Immer wieder sonntags KW
stadt-bremerhaven.de::MagentaSport
stadt-bremerhaven.de::Podcast
stadt-bremerhaven.de::Rückblick
stadt-bremerhaven.de::Sky Ticket
stadt-bremerhaven.de::Twitter
stadt-bremerhaven.de::WM 2022
sueddeutsche.de::1:1
sueddeutsche.de::1860 München
sueddeutsche.de::1. FC Kaiserslautern:
sueddeutsche.de::1. FC Köln
sueddeutsche.de::1. FC Nürnberg
sueddeutsche.de::1. FC Union
sueddeutsche.de::2. Liga
sueddeutsche.de::2. Liga:
sueddeutsche.de::3. Liga
sueddeutsche.de::Achtelfinale
sueddeutsche.de::Afrika-Cup:
sueddeutsche.de::Ajax
sueddeutsche.de::Alonso:
sueddeutsche.de::Alphonso Davies
sueddeutsche.de::American Football:
sueddeutsche.de::Amos Pieper
sueddeutsche.de::Arminia Bielefeld:
sueddeutsche.de::Athlet
sueddeutsche.de::ATP-
sueddeutsche.de::ATP Finals
sueddeutsche.de::Australian Open
sueddeutsche.de::Australian Open:
sueddeutsche.de::Auswärtstor
sueddeutsche.de::Barça
sueddeutsche.de::Baseball
sueddeutsche.de::Basketball
sueddeutsche.de::Basketball-WM
sueddeutsche.de::Bayern
sueddeutsche.de::Belenenses
sueddeutsche.de::Berlinale
sueddeutsche.de::Biathlon
sueddeutsche.de::Boateng
sueddeutsche.de::Bobfahr
sueddeutsche.de::Borussia
sueddeutsche.de::Borussia Dortmund
sueddeutsche.de::Borussia Dortmund:
sueddeutsche.de::Bundesliga
sueddeutsche.de::Bundestrainer
sueddeutsche.de::BVB
sueddeutsche.de::Carlos Alcaraz
sueddeutsche.de::CB-Funk-Podcast
sueddeutsche.de::Champions League:
sueddeutsche.de::Champions-League
sueddeutsche.de::Chelsea
sueddeutsche.de::Claudio Pizarro
sueddeutsche.de::Claudio Pizarro:
sueddeutsche.de::Conference League
sueddeutsche.de::Conference-League
sueddeutsche.de::Copa Libertadores
sueddeutsche.de::Daniil Medwedew
sueddeutsche.de::Darts
sueddeutsche.de::Davis Cup
sueddeutsche.de::Derby
sueddeutsche.de::Deutsche Fußball Liga
sueddeutsche.de::Deutsche Nationalmannschaft
sueddeutsche.de::Deutsche U21:
sueddeutsche.de::DFB
sueddeutsche.de::DFB-
sueddeutsche.de::DFB:
sueddeutsche.de::DFB-Elf
sueddeutsche.de::DFB-Frauen:
sueddeutsche.de::DFB-Pokal
sueddeutsche.de::DFL
sueddeutsche.de::DFL-Supercup
sueddeutsche.de::Djokovic
sueddeutsche.de::Dressur-Weltmeister
sueddeutsche.de::Dritte Liga:
sueddeutsche.de::Drittliga
sueddeutsche.de::Dschungelcamp
sueddeutsche.de::Eintracht
sueddeutsche.de::Eintracht Frankfurt
sueddeutsche.de::Eisbären Berlin
sueddeutsche.de::Eishockey
sueddeutsche.de::Eishockey:
sueddeutsche.de::Eishockey-WM:
sueddeutsche.de::Eiskunstl
sueddeutsche.de::Eisschnelllauf
sueddeutsche.de::Elfmeter
sueddeutsche.de::EM:
sueddeutsche.de::EM-Aus
sueddeutsche.de::Emil Forsberg
sueddeutsche.de::EM-Qualifikation
sueddeutsche.de::Englisches Nationalteam:
sueddeutsche.de::Eröffnungsspiel
sueddeutsche.de::ESC-Finale
sueddeutsche.de::ESC-Vorentscheid
sueddeutsche.de::Euroleague
sueddeutsche.de::Europa League
sueddeutsche.de::Europa-League
sueddeutsche.de::Europameister
sueddeutsche.de::Europapokal
sueddeutsche.de::European Championships
sueddeutsche.de::Eurovision Song Contest
sueddeutsche.de::Eurovision Song Contest:
sueddeutsche.de::FC Augsburg
sueddeutsche.de::FC Barcelona
sueddeutsche.de::FC Chelsea:
sueddeutsche.de::FC Homburg
sueddeutsche.de::FC Sevilla
sueddeutsche.de::Fifa
sueddeutsche.de::Fifa:
sueddeutsche.de::Formel 1
sueddeutsche.de::Frankfurt
sueddeutsche.de::French Open
sueddeutsche.de::Fussball
sueddeutsche.de::Fußball:
sueddeutsche.de::Fußball-EM
sueddeutsche.de::Fußballerinnen
sueddeutsche.de::Fußballern
sueddeutsche.de::Fußball in England:
sueddeutsche.de::Fußball-Legende
sueddeutsche.de::Fußballtorwart
sueddeutsche.de::Fußballverband
sueddeutsche.de::Fußball-Verband
sueddeutsche.de::Fußball-Weltmeisterschaft
sueddeutsche.de::Fußball-WM
sueddeutsche.de::Galopp
sueddeutsche.de::Gerard López
sueddeutsche.de::Gladbach
sueddeutsche.de::Glasgow Rangers
sueddeutsche.de::Golf:
sueddeutsche.de::Guardiola
sueddeutsche.de::Hamburger SV
sueddeutsche.de::Handball-EM:
sueddeutsche.de::Handball-WM
sueddeutsche.de::Hannover 96:
sueddeutsche.de::Heldenfußball
sueddeutsche.de::Hertha BSC
sueddeutsche.de::Hertha BSC:
sueddeutsche.de::Hinspiel
sueddeutsche.de::Hochspring
sueddeutsche.de::Hockey
sueddeutsche.de::Hockey-EM
sueddeutsche.de::Hockey-WM
sueddeutsche.de::Hoeneß:
sueddeutsche.de::HSV-Verteidiger
sueddeutsche.de::Ibrahimović
sueddeutsche.de::"Ich bin ein Star"
sueddeutsche.de::Infantino
sueddeutsche.de::Inter Mailand
sueddeutsche.de::Joachim Löw
sueddeutsche.de::Jugendwort des Jahres
sueddeutsche.de::Julian Nagelsmann
sueddeutsche.de::Juve
sueddeutsche.de::Kevin Trapp
sueddeutsche.de::Kinderfußball
sueddeutsche.de::Klopp
sueddeutsche.de::Klosterhalfen
sueddeutsche.de::Kolumne
sueddeutsche.de::Kroos
sueddeutsche.de::La Boum:
sueddeutsche.de::La Liga
sueddeutsche.de::Länderspiel
sueddeutsche.de::Lazio Rom
sueddeutsche.de::Leichtathletik:
sueddeutsche.de::Leichtathletik-Weltmeisterschaft
sueddeutsche.de::Leichtathletik-WM
sueddeutsche.de::Leichtathletik-WM:
sueddeutsche.de::Leon Draisaitl
sueddeutsche.de::leute:
sueddeutsche.de::Leverkusen
sueddeutsche.de::Lewandowski
sueddeutsche.de::Lewis Hamilton
sueddeutsche.de::Linda Dallmann
sueddeutsche.de::LIV Tour
sueddeutsche.de::Los Angeles Lakers
sueddeutsche.de::Manchester City
sueddeutsche.de::Manchester United
sueddeutsche.de::Mancini
sueddeutsche.de::ManCity
sueddeutsche.de::ManUnited
sueddeutsche.de::Markus Weinzierl
sueddeutsche.de::Mbappé
sueddeutsche.de::Messi
sueddeutsche.de::Miroslav Klose
sueddeutsche.de::Monza
sueddeutsche.de::Mourinho
sueddeutsche.de::Musiala
sueddeutsche.de::Nadal
sueddeutsche.de::nationalelf
sueddeutsche.de::Nationalmannschaft
sueddeutsche.de::Nationalspieler
sueddeutsche.de::Nationalteam
sueddeutsche.de::Nations League
sueddeutsche.de::NBA
sueddeutsche.de::NBA:
sueddeutsche.de::Netzkolumne
sueddeutsche.de::Neujahrsspringen
sueddeutsche.de::Neymar
sueddeutsche.de::NFL
sueddeutsche.de::Niklas Süle:
sueddeutsche.de::Nordische Ski-WM
sueddeutsche.de::Olympia
sueddeutsche.de::Olympia:
sueddeutsche.de::Olympia 2022:
sueddeutsche.de::Olympia-Ticker:
sueddeutsche.de::Olympischen Spielen:
sueddeutsche.de::Olympische Spiele:
sueddeutsche.de::Olympische Winterspiele:
sueddeutsche.de::OSC Lille
sueddeutsche.de::Paralympics:
sueddeutsche.de::Paris Saint-Germain
sueddeutsche.de::Pep Guardiola
sueddeutsche.de::Pferdesport
sueddeutsche.de::Playoffs
sueddeutsche.de::Podcast "Lanz & Precht"
sueddeutsche.de::Podcasts-Tipps im
sueddeutsche.de::Podcast-Tipps
sueddeutsche.de::Pokal
sueddeutsche.de::Pokal-Sieg
sueddeutsche.de::Pokalspiel
sueddeutsche.de::Polizeiruf 110
sueddeutsche.de::Prantls Blick:
sueddeutsche.de::Premier
sueddeutsche.de::PSV Eindhoven
sueddeutsche.de::Qualifikationsspiel
sueddeutsche.de::quoted.
sueddeutsche.de::Raducanu
sueddeutsche.de::Rangnick
sueddeutsche.de::RB Leipzig
sueddeutsche.de::RB Leipzig:
sueddeutsche.de::Real Madrid
sueddeutsche.de::Reiten
sueddeutsche.de::Reit-WM
sueddeutsche.de::Relegation
sueddeutsche.de::Rennrodl
sueddeutsche.de::Robin Gosens
sueddeutsche.de::Rodeln:
sueddeutsche.de::Ronaldo
sueddeutsche.de::Rudern
sueddeutsche.de::SC Freiburg
sueddeutsche.de::Schach-WM
sueddeutsche.de::Schiedsrichter
sueddeutsche.de::Schwimmen:
sueddeutsche.de::Schwimm-WM
sueddeutsche.de::Serien des Monats
sueddeutsche.de::Ski alpin
sueddeutsche.de::Ski alpin:
sueddeutsche.de::Skilanglauf
sueddeutsche.de::Ski-nordisch-WM:
sueddeutsche.de::Skirenn
sueddeutsche.de::Skispringen
sueddeutsche.de::Ski-Weltcup
sueddeutsche.de::Ski-Weltverband
sueddeutsche.de::Ski-WM
sueddeutsche.de::Slalomfahr
sueddeutsche.de::-Spiel
sueddeutsche.de::SSC Neapel
sueddeutsche.de::Stanley Cup
sueddeutsche.de::Stimmen zum Spiel
sueddeutsche.de::Stürmer
sueddeutsche.de::Super Bowl
sueddeutsche.de::Super Bowl Sunday:
sueddeutsche.de::Supercup
sueddeutsche.de::Supercup live
sueddeutsche.de::Super League
sueddeutsche.de::SZ-Audioreihe
sueddeutsche.de::SZ-Kolumne "Bester Dinge":
sueddeutsche.de::SZ-Kolumne "Mitten in ...":
sueddeutsche.de::SZ-Plus-Abonnenten lesen auch
sueddeutsche.de::SZ-Podcast
sueddeutsche.de::SZ-Podcast "Und nun zum Sport":
sueddeutsche.de::Tabellenspitze
sueddeutsche.de::"Tatort"
sueddeutsche.de::Tatort aus
sueddeutsche.de::Tatort München:
sueddeutsche.de::Teamspringen
sueddeutsche.de::Team-WM
sueddeutsche.de::Tennis
sueddeutsche.de::Tennis:
sueddeutsche.de::Thomas Müller
sueddeutsche.de::Three Lions:
sueddeutsche.de::Timo Boll
sueddeutsche.de::Tischtennis WM:
sueddeutsche.de::Toni Kroos
sueddeutsche.de::Tore
sueddeutsche.de::Torhüter
sueddeutsche.de::torwart
sueddeutsche.de::Tottenham
sueddeutsche.de::Transfermarkt
sueddeutsche.de::Trikots
sueddeutsche.de::TSG Hoffenheim
sueddeutsche.de::Tuchel
sueddeutsche.de::Türkgücü München
sueddeutsche.de::Turn-WM
sueddeutsche.de::U21-Europameisterschaft
sueddeutsche.de::ückenkemper
sueddeutsche.de::Ukrainisches Tagebuch
sueddeutsche.de::Uli Hoeneß
sueddeutsche.de::Union Berlin
sueddeutsche.de::Urs Fischer
sueddeutsche.de::US Open
sueddeutsche.de::VfB
sueddeutsche.de::VfB Stuttgart
sueddeutsche.de::VfL
sueddeutsche.de::VfL Wolfsburg
sueddeutsche.de::Vierschanzentournee
sueddeutsche.de::Viertelfinal
sueddeutsche.de::Viertelfinale
sueddeutsche.de::Volleyball-WM:
sueddeutsche.de::Vuskovic
sueddeutsche.de::Weitspr
sueddeutsche.de::Werder
sueddeutsche.de::Wiegman
sueddeutsche.de::Wimbledon
sueddeutsche.de::Wintersport:
sueddeutsche.de::WM-
sueddeutsche.de::WM:
sueddeutsche.de::WM-Kader
sueddeutsche.de::WM-Qualifikation
sueddeutsche.de::WM-Silber
sueddeutsche.de::WM-Ticket
sueddeutsche.de::WM-Titel
sueddeutsche.de::Zlatan
sueddeutsche.de::zweite Liga
sueddeutsche.de::zweiten Liga
tagesschau.de::11KM
tagesschau.de::11KM-Podcast
tagesschau.de::Achtelfinale
tagesschau.de::Alpine-Super-Kombination:
tagesschau.de::American Football:
@ -282,8 +610,3 @@ theguardian.com::Guardiola
theguardian.com::Manchester United
theycantalk.com::Tinyview
toonhole.com::Bernai
www.army-technology.com::who are the leaders
www.army-technology.com::files patent
www.army-technology.com::sees highest patent filings
www.army-technology.com::theme innovation strategy
www.army-technology.com::gets grant

View File

@ -1,43 +0,0 @@
version: '3'
services:
munin:
container_name: "munin-master"
image: registry.mgrote.net/munin-server:latest
restart: always
environment:
MAILCONTACT: michael.grote@posteo.de
MAILSERVER: mail-relay
MAILPORT: 25
MAILFROM: munin@mgrote.net
MAILUSER: munin@mgrote.net
MAILNAME: Munin
MAILDOMAIN: mgrote.net
TZ: Europe/Berlin
CRONDELAY: 5
NODES: |
fileserver3.mgrote.net:fileserver3.mgrote.net
ansible2.mgrote.net:ansible2.mgrote.net
pve5.mgrote.net:pve5.mgrote.net
forgejo.mgrote.net:forgejo.mgrote.net
docker10.mgrote.net:docker10.mgrote.net
pbs.mgrote.net:pbs.mgrote.net
blocky.mgrote.net:blocky.mgrote.net
ldap.mgrote.net:ldap.mgrote.net
# z.B.
# computer-test.mgrote.net.test:192.68.2.4
# computer.mgrote.net:computer.mgrote.net
volumes:
- db:/var/lib/munin
- logs:/var/log/munin
- cache:/var/cache/munin
ports:
- 1234:80
volumes:
db:
logs:
cache:
networks:
mail-relay:
external: true

View File

@ -3,7 +3,7 @@ services:
######## navidrome-mg ########
navidrome-mg:
container_name: "navidrome-mg"
image: "deluan/navidrome:0.52.5"
image: deluan/navidrome:latest
restart: always
environment:
ND_LOGLEVEL: info
@ -35,6 +35,8 @@ services:
traefik.http.routers.navidrome-mg.tls.certresolver: resolver_letsencrypt
traefik.http.routers.navidrome-mg.entrypoints: entry_https
traefik.http.services.navidrome-mg.loadbalancer.server.port: 4533
com.centurylinklabs.watchtower.enable: true
ports:
- "4533:4533"

View File

@ -2,7 +2,7 @@ version: '3.3'
services:
######## Datenbank ########
nextcloud-db:
image: "mariadb:11.3.2"
image: mariadb:10
container_name: nextcloud-db
command: --transaction-isolation=READ-COMMITTED --log-bin=ROW --innodb_read_only_compressed=OFF
restart: unless-stopped
@ -11,75 +11,59 @@ services:
- /etc/timezone:/etc/timezone:ro
- db:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_root_password', 'password') }}"
MYSQL_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}"
MYSQL_ROOT_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_root_password', 'password') }}
MYSQL_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}
MYSQL_DATABASE: nextcloud
MYSQL_USER: nextcloud
MYSQL_INITDB_SKIP_TZINFO: 1
networks:
- intern
healthcheck:
interval: 30s
retries: 3
test:
[
"CMD",
"healthcheck.sh",
"--su-mysql",
"--connect"
]
timeout: 30s
# Error
## [ERROR] Incorrect definition of table mysql.column_stats: expected column 'histogram' at position 10 to have type longblob, found type varbinary(255).
## [ERROR] Incorrect definition of table mysql.column_stats: expected column 'hist_type' at position 9 to have type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB','JSON_HB'), found type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB').
# Fix
## docker exec nextcloud-db mysql nextcloud -p<MySQL-Root-Pw> -e "ALTER TABLE mysql.column_stats MODIFY histogram longblob;"
## docker exec nextcloud-db mysql nextcloud -p<MySQL-Root-Pw> -e "ALTER TABLE mysql.column_stats MODIFY hist_type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB','JSON_HB');"
labels:
com.centurylinklabs.watchtower.enable: true
######## Redis ########
nextcloud-redis:
image: "redis:7.2.4"
image: redis:7-alpine
container_name: nextcloud-redis
hostname: nextcloud-redis
networks:
- intern
restart: unless-stopped
command: "redis-server --requirepass {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}"
healthcheck:
test: ["CMD", "redis-cli", "--pass", "{{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}", "--no-auth-warning", "ping"]
interval: 5s
timeout: 2s
retries: 3
command: redis-server --requirepass {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}
labels:
com.centurylinklabs.watchtower.enable: true
######## cron ########
nextcloud-cron:
container_name: nextcloud-cron
image: "registry.mgrote.net/nextcloud-cronjob:latest"
image: registry.mgrote.net/nextcloud-cronjob:master
restart: unless-stopped
network_mode: none
depends_on:
- nextcloud-app
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /etc/localtime:/etc/localtime:ro
environment:
NEXTCLOUD_CONTAINER_NAME: nextcloud-app
NEXTCLOUD_CRON_MINUTE_INTERVAL: 1
labels:
com.centurylinklabs.watchtower.enable: true
######## Nextcloud ########
nextcloud-app:
image: "nextcloud:29.0.0"
image: nextcloud:27
container_name: nextcloud-app
restart: unless-stopped
depends_on:
- nextcloud-db
- nextcloud-redis
- nextcloud-cron
environment:
REDIS_HOST: nextcloud-redis
REDIS_HOST_PASSWORD: "{{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}"
REDIS_HOST_PASSWORD: {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}
MYSQL_DATABASE: nextcloud
MYSQL_USER: nextcloud
MYSQL_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}"
MYSQL_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}
MYSQL_HOST: nextcloud-db
NEXTCLOUD_TRUSTED_DOMAINS: "nextcloud.mgrote.net"
SMTP_HOST: mail-relay
@ -87,15 +71,12 @@ services:
SMTP_PORT: 25
#SMTP_AUTHTYPE: LOGIN
SMTP_NAME: info@mgrote.net
#SMTP_PASSWORD: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
#SMTP_PASSWORD: {{ lookup('keepass', 'strato_smtp_password', 'password') }}
MAIL_FROM_ADDRESS: info@mgrote.net
PHP_MEMORY_LIMIT: 1024M
PHP_UPLOAD_LIMIT: 10G
APACHE_DISABLE_REWRITE_IP: 1
TRUSTED_PROXIES: "192.168.48.0/24" # Subnetz in dem sich traefik befindet
NEXTCLOUD_UPLOAD_LIMIT: 10G
NEXTCLOUD_MAX_TIME: 3600
APACHE_BODY_LIMIT: 0 # unlimited, https://github.com/nextcloud/docker/issues/1796
volumes:
- app:/var/www/html
- data:/var/www/html/data
@ -103,12 +84,10 @@ services:
- intern
- traefik
- mail-relay
healthcheck:
test: ["CMD", "curl", "-f", "--insecure", "http://localhost:80"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: nextcloud-redis,nextcloud-db
traefik.http.routers.nextcloud.rule: Host(`nextcloud.mgrote.net`)
traefik.enable: true
traefik.http.routers.nextcloud.tls: true
@ -143,6 +122,3 @@ volumes:
######## Doku ########
# Telefonregion
# docker exec --user www-data nextcloud-app php occ config:system:set default_phone_region --value="DE"
# https://help.nextcloud.com/t/nextcloud-wont-load-any-mixed-content/13565/3
# docker exec --user www-data nextcloud-app php occ config:system:set overwriteprotocol --value="https"
# docker exec --user www-data nextcloud-app php occ config:system:set overwrite.cli.url --value="http://nextcloud.mgrote.net"

View File

@ -0,0 +1,88 @@
version: '3.5'
# ------------------------------------------------------------------
# DOCKER COMPOSE COMMAND REFERENCE
# ------------------------------------------------------------------
# Start | docker-compose up -d
# Stop | docker-compose stop
# Update | docker-compose pull
# Logs | docker-compose logs --tail=25 -f
# Terminal | docker-compose exec photoprism bash
# Help | docker-compose exec photoprism photoprism help
# Config | docker-compose exec photoprism photoprism config
# Reset | docker-compose exec photoprism photoprism reset
# Backup | docker-compose exec photoprism photoprism backup -a -i
# Restore | docker-compose exec photoprism photoprism restore -a -i
# Index | docker-compose exec photoprism photoprism index
# Reindex | docker-compose exec photoprism photoprism index -a
# Import | docker-compose exec photoprism photoprism import
# -------------------------------------------------------------------
services:
photoprism:
# Use photoprism/photoprism:preview instead for testing preview builds:
image: photoprism/photoprism:latest
container_name: photoprism-frontend
restart: always
security_opt:
- seccomp:unconfined
- apparmor:unconfined
ports:
- 2342:2342
environment:
PHOTOPRISM_ADMIN_PASSWORD: "{{ lookup('keepass', 'photoprism_admin_password', 'password') }}"
PHOTOPRISM_HTTP_PORT: 2342
PHOTOPRISM_HTTP_COMPRESSION: "gzip" # none or gzip
PHOTOPRISM_DEBUG: "false"
PHOTOPRISM_PUBLIC: "false" # No authentication required (disables password protection)
PHOTOPRISM_READONLY: "true" # Don't modify originals directory (reduced functionality)
PHOTOPRISM_EXPERIMENTAL: "false"
PHOTOPRISM_DISABLE_WEBDAV: "true"
PHOTOPRISM_DISABLE_SETTINGS: "false"
PHOTOPRISM_DISABLE_TENSORFLOW: "false"
PHOTOPRISM_DARKTABLE_PRESETS: "false"
PHOTOPRISM_DETECT_NSFW: "true"
PHOTOPRISM_UPLOAD_NSFW: "true"
PHOTOPRISM_DATABASE_DRIVER: "mysql"
PHOTOPRISM_DATABASE_SERVER: "mariadb:3306"
PHOTOPRISM_DATABASE_NAME: "photoprism"
PHOTOPRISM_DATABASE_USER: "photoprism"
PHOTOPRISM_DATABASE_PASSWORD: "{{ lookup('keepass', 'photoprism_database_password', 'password') }}"
PHOTOPRISM_SITE_URL: "http://docker10.mgrote.net:2342/"
PHOTOPRISM_SITE_TITLE: "PhotoPrism"
PHOTOPRISM_SITE_CAPTION: "Browse Your Life"
PHOTOPRISM_SITE_DESCRIPTION: ""
PHOTOPRISM_SITE_AUTHOR: "mgrote"
# You may optionally set a user / group id using environment variables if your Docker version or NAS does not
# support this natively (see next example):
UID: 5000
GID: 5000
# UMASK: 0000
# Uncomment and edit the following line to set a specific user / group id (native):
user: "5000:5000"
volumes:
- /mnt/fileserver3_photoprism_bilder_ro:/photoprism/originals/:ro
- "storage:/photoprism/storage"
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: photoprism-db
mariadb:
image: mariadb:10
container_name: photoprism-db
restart: always
security_opt:
- seccomp:unconfined
- apparmor:unconfined
command: mysqld --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=50
volumes: # Don't remove permanent storage for index database files!
- "database:/var/lib/mysql"
environment:
MYSQL_ROOT_PASSWORD: {{ lookup('keepass', 'photoprism_mysql_root_password', 'password') }}
MYSQL_DATABASE: photoprism
MYSQL_USER: photoprism
MYSQL_PASSWORD: {{ lookup('keepass', 'photoprism_database_password', 'password') }}
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
storage:
database:

View File

@ -3,7 +3,7 @@ services:
oci-registry:
restart: always
container_name: oci-registry
image: "registry:2.8.3"
image: registry:2
volumes:
- oci:/var/lib/registry
- ./htpasswd:/auth/htpasswd
@ -11,25 +11,15 @@ services:
- traefik
- intern
depends_on:
- oci-registry-ui
- oci-registry-redis
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:5000/v2/"]
interval: 30s
timeout: 10s
retries: 3
environment:
TZ: Europe/Berlin
REGISTRY_AUTH: none
REGISTRY_REDIS_ADDR: oci-registry-redis:6379
REGISTRY_REDIS_PASSWORD: "{{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}"
REGISTRY_REDIS_PASSWORD: {{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}
REGISTRY_STORAGE_DELETE_ENABLED: true
REGISTRY_CATALOG_MAXENTRIES: 100000 # https://github.com/Joxit/docker-registry-ui/issues/306
# https://joxit.dev/docker-registry-ui/#using-cors
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin: '[https://registry.mgrote.net/ui/]'
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Methods: '[HEAD,GET,OPTIONS,DELETE]'
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Credentials: '[true]'
REGISTRY_HTTP_HEADERS_Access-Control-Allow-Headers: '[Authorization,Accept,Cache-Control]'
REGISTRY_HTTP_HEADERS_Access-Control-Expose-Headers: '[Docker-Content-Digest]'
labels:
traefik.http.routers.registry.rule: Host(`registry.mgrote.net`)
traefik.enable: true
@ -38,10 +28,13 @@ services:
traefik.http.routers.registry.entrypoints: entry_https
traefik.http.services.registry.loadbalancer.server.port: 5000
traefik.http.routers.registry.middlewares: registry-ipallowlist
traefik.http.routers.registry.middlewares: registry-ipwhitelist
traefik.http.middlewares.registry-ipallowlist.ipallowlist.sourcerange: 192.168.2.0/24,10.25.25.0/24,192.168.48.0/24,172.18.0.0/16 # .48. ist Docker
traefik.http.middlewares.registry-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.registry-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24,10.25.25.0/24,192.168.48.0/24,172.18.0.0/16 # .48. ist Docker
traefik.http.middlewares.registry-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
com.centurylinklabs.watchtower.depends-on: oci-registry-redis
com.centurylinklabs.watchtower.enable: true
# registry aufräumen: docker exec -it oci-registry /bin/registry garbage-collect --delete-untagged=true /etc/docker/registry/config.yml
@ -52,24 +45,21 @@ services:
# docker pull registry.mgrote.net/myfirstimage
oci-registry-redis:
image: "redis:7.2.4"
image: redis:7
container_name: oci-registry-redis
networks:
- intern
restart: always
environment:
REDIS_PASSWORD: "{{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}"
REDIS_PASSWORD: {{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}
MAXMEMORY POLICY: allkeys-lru
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
oci-registry-ui:
restart: always
# url: registry.mgrote.net/ui/index.html
image: "joxit/docker-registry-ui:2.5.7"
image: joxit/docker-registry-ui:latest
container_name: oci-registry-ui
environment:
DELETE_IMAGES: true
@ -77,20 +67,12 @@ services:
NGINX_PROXY_PASS_URL: http://oci-registry:5000
SHOW_CONTENT_DIGEST: true # https://github.com/Joxit/docker-registry-ui/issues/297
SHOW_CATALOG_NB_TAGS: true
PULL_URL: registry.mgrote.net
depends_on:
- oci-registry
networks:
- traefik
- intern
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://127.0.0.1"]
interval: 30s
timeout: 10s
retries: 3
labels:
traefik.http.routers.registry-ui.rule: Host(`registry.mgrote.net`)&&PathPrefix(`/ui`) # mache unter /ui erreichbar, damit wird demPfad dieser Prefix hinzugefügt, die Anwendung "hört" dort abrer nicht
traefik.http.routers.registry-ui.middlewares: registry-ui-strip-prefix,registry-ui-ipallowlist # also entferne den Prefix danach wieder
traefik.http.routers.registry-ui.middlewares: registry-ui-strip-prefix,registry-ui-ipwhitelist # also entferne den Prefix danach wieder
traefik.http.middlewares.registry-ui-strip-prefix.stripprefix.prefixes: /ui # hier ist die Middleware definiert
traefik.enable: true
traefik.http.routers.registry-ui.tls: true
@ -98,8 +80,13 @@ services:
traefik.http.routers.registry-ui.entrypoints: entry_https
traefik.http.services.registry-ui.loadbalancer.server.port: 80
traefik.http.middlewares.registry-ui-ipallowlist.ipallowlist.sourcerange: 192.168.2.0/24,10.25.25.0/24 # .48. ist Docker
traefik.http.middlewares.registry-ui-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.registry-ui-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24,10.25.25.0/24 # .48. ist Docker
traefik.http.middlewares.registry-ui-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
com.centurylinklabs.watchtower.depends-on: oci-registry-redis,oci-registry
com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:

View File

@ -3,7 +3,7 @@ services:
routeros-config-export:
container_name: routeros-config-export
restart: always
image: "registry.mgrote.net/routeros-config-export:latest"
image: registry.mgrote.net/oxidized-selfmade:check
volumes:
- ./key_rb5009:/key_rb5009:ro
- ./key_hex:/key_hex:ro
@ -15,7 +15,7 @@ services:
hex.mgrote.net,routeros-config-backup,/key_hex
crs305.mgrote.net,routeros-config-backup,/key_crs305
GIT_REPO_BRANCH: "master"
GIT_REPO_URL: "ssh://gitea@forgejo.mgrote.net:2222/mg/routeros-configs.git"
GIT_REPO_URL: "ssh://gitea@gitea.mgrote.net:2222/mg/routeros-configs.git"
GIT_REPO_DEPLOY_KEY: "/deploy_token"
GIT_USERNAME: oxidized-selfmade
GIT_USER_MAIL: michael.grote@posteo.de

View File

@ -0,0 +1,27 @@
version: '2.3'
services:
statping:
container_name: statping
image: adamboutcher/statping-ng:latest
restart: always
volumes:
- statping_data:/app
environment:
DB_CONN: sqlite
ALLOW_REPORT: false
ADMIN_USER: statadmin
ADMIN_PASSWORD: {{ lookup('keepass', 'statping_admin_password', 'password') }}
SAMPLE_DATA: false
ports:
- 8083:8080
networks:
- mail-relay
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
statping_data:
networks:
mail-relay:
external: true

View File

@ -3,7 +3,7 @@ services:
######## traefik ########
traefik:
container_name: traefik
image: "traefik:v3.0.0"
image: traefik:latest
restart: always
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
@ -19,19 +19,16 @@ services:
- "2222:2222" # SSH
environment:
TZ: Europe/Berlin
healthcheck:
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
######## nforwardauth ########
nforwardauth:
restart: always
image: "nosduco/nforwardauth:v1.4.0"
image: nosduco/nforwardauth:v1
container_name: traefik-nforwardauth
environment:
TOKEN_SECRET: "{{ lookup('keepass', 'nforwardauth_token_secret', 'password') }}"
TOKEN_SECRET: {{ lookup('keepass', 'nforwardauth_token_secret', 'password') }}
AUTH_HOST: auth.mgrote.net
labels:
traefik.enable: true
@ -43,15 +40,13 @@ services:
traefik.http.routers.nforwardauth.tls: true
traefik.http.routers.nforwardauth.tls.certresolver: resolver_letsencrypt
traefik.http.routers.nforwardauth.entrypoints: entry_https
com.centurylinklabs.watchtower.depends-on: traefik
com.centurylinklabs.watchtower.enable: true
volumes:
- "./passwd:/passwd:ro" # Mount local passwd file at /passwd as read only
networks:
- traefik
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:3000/login"]
interval: 30s
timeout: 10s
retries: 3
######## Networks ########
networks:

View File

@ -14,4 +14,4 @@ http:
service_gitea:
loadBalancer:
servers:
- url: "http://forgejo.mgrote.net:3000/"
- url: "http://gitea.mgrote.net:3000/"

View File

@ -37,8 +37,6 @@ api:
insecure: true
dashboard: true # unter Port 8081 erreichbar
ping: {} # für healthcheck
#experimental:
# plugins:
# ldapAuth:

View File

@ -2,14 +2,14 @@
version: "2.1"
services:
unifi-network-application:
image: "lscr.io/linuxserver/unifi-network-application:8.0.28-ls27"
image: lscr.io/linuxserver/unifi-network-application:latest
container_name: unifi-network-application
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
MONGO_USER: unifiuser
MONGO_PASS: "{{ lookup('keepass', 'unifi-mongodb-pass', 'password') }}"
MONGO_PASS: {{ lookup('keepass', 'unifi-mongodb-pass', 'password') }}
MONGO_HOST: unifi-db
MONGO_PORT: 27017
MONGO_DBNAME: unifidb
@ -28,37 +28,28 @@ services:
- 6789:6789 #optional
- 5514:5514/udp #optional
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: unifi-db
networks:
- mail-relay
- unifi-internal
healthcheck:
test: ["CMD", "curl", "-f", "--insecure", "https://localhost:8443"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
- unifi-db
unifi-db:
# Starte Container OHNE init-script
# In Container
# 1. mongosh
# IN Container
# 1. mongo
# 2. db.getSiblingDB("unifidb").createUser({user: "unifiuser", pwd: "GEHEIM", roles: [{role: "dbOwner", db: "unifidb"}, {role: "dbOwner", db: "unifidb_stat"}]});
# https://discourse.linuxserver.io/t/cant-connect-to-mongodb-for-unifi-network-application/8166
image: "docker.io/mongo:7.0.9"
image: docker.io/mongo:4
container_name: unifi-db
volumes:
- db-data:/data/db
restart: always
environment:
MARIADB_AUTO_UPGRADE: "1"
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- unifi-internal
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.stats().ok"]
interval: 30s
timeout: 10s
retries: 3
######## Volumes ########
volumes:

View File

@ -0,0 +1,42 @@
version: "3"
services:
watchtower:
restart: always
container_name: watchtower
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
TZ: Europe/Berlin
WATCHTOWER_CLEANUP: true
WATCHTOWER_INCLUDE_RESTARTING: true
WATCHTOWER_INCLUDE_STOPPED: true
WATCHTOWER_REVIVE_STOPPED: false
WATCHTOWER_SCHEDULE: "0 20 3 * * *" # jeden Tag um 03:20
WATCHTOWER_LABEL_ENABLE: true
WATCHTOWER_NOTIFICATIONS: email
WATCHTOWER_NOTIFICATION_EMAIL_FROM: info@mgrote.net
WATCHTOWER_NOTIFICATION_EMAIL_TO: info@mgrote.net
WATCHTOWER_NOTIFICATION_EMAIL_SERVER: mail-relay # "container_name" des Relays
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PORT: 25 # nicht benötigt, nur als Referenz stehen gelassen
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER: "" # nicht benötigt, nur als Referenz stehen gelassen
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD: "" # nicht benötigt, nur als Referenz stehen gelassen
WATCHTOWER_NOTIFICATION_EMAIL_DELAY: 2
WATCHTOWER_NO_STARTUP_MESSAGE: true
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- mail-relay # binde externe Netzwerk an Container
# monitore diesen Container nur
# labels:
# com.centurylinklabs.watchtower.monitor-only: true
# dieser container hängt von x ab
# com.centurylinklabs.watchtower.depends-on: mf-db
# aktualisiere container
# com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:
mail-relay: # damit das mail-relaay im anderen Container erreicht werden kann
external: true

View File

@ -2,7 +2,7 @@ version: '3'
services:
wiki-webserver:
container_name: wiki-webserver
image: "registry.mgrote.net/httpd:latest"
image: httpd:2.4
restart: always
networks:
- traefik
@ -13,11 +13,6 @@ services:
# /docker/wiki/site ist ein lokales Verzeichnis auf docker10
# dieser Verzeichnis wird direkt in der wiki ci gemountet
# und die daten werden dort reingeschrieben
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
labels:
traefik.http.routers.wiki.rule: Host(`wiki.mgrote.net`)
traefik.enable: true
@ -28,6 +23,8 @@ services:
traefik.http.routers.wiki.middlewares: nforwardauth
com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:
traefik:

View File

@ -5,7 +5,7 @@ services:
woodpecker-server:
restart: always
container_name: woodpecker-server
image: "woodpeckerci/woodpecker-server:v2.4.1"
image: woodpeckerci/woodpecker-server:v2.0
ports:
- 8000:8000
volumes:
@ -16,9 +16,9 @@ services:
WOODPECKER_WEBHOOK_HOST: http://docker10.mgrote.net:8000
WOODPECKER_GITEA: true
WOODPECKER_GITEA_URL: https://git.mgrote.net
WOODPECKER_GITEA_CLIENT: "{{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}"
WOODPECKER_GITEA_SECRET: "{{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}"
WOODPECKER_AGENT_SECRET: "{{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}"
WOODPECKER_GITEA_CLIENT: {{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}
WOODPECKER_GITEA_SECRET: {{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}
WOODPECKER_AGENT_SECRET: {{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}
WOODPECKER_ADMIN: mg
WOODPECKER_LOG_LEVEL: info
WOODPECKER_DEBUG_PRETTY: true
@ -26,6 +26,8 @@ services:
- intern
- traefik
labels:
com.centurylinklabs.watchtower.enable: true
traefik.http.routers.woodpecker.rule: Host(`ci.mgrote.net`)
traefik.enable: true
traefik.http.routers.woodpecker.tls: true
@ -33,15 +35,15 @@ services:
traefik.http.routers.woodpecker.entrypoints: entry_https
traefik.http.services.woodpecker.loadbalancer.server.port: 8000
traefik.http.routers.woodpecker.middlewares: woodpecker-ipallowlist
traefik.http.routers.woodpecker.middlewares: woodpecker-ipwhitelist
traefik.http.middlewares.woodpecker-ipallowlist.ipallowlist.sourcerange: "192.168.2.0/24,10.25.25.0/24"
traefik.http.middlewares.woodpecker-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.woodpecker-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24
traefik.http.middlewares.woodpecker-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
woodpecker-agent:
container_name: woodpecker-agent
image: "woodpeckerci/woodpecker-agent:v2.4.1"
image: woodpeckerci/woodpecker-agent:v2.0
command: agent
restart: always
depends_on:
@ -53,12 +55,14 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
environment:
WOODPECKER_SERVER: woodpecker-server:9000
WOODPECKER_AGENT_SECRET: "{{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}"
WOODPECKER_MAX_WORKFLOWS: 20
WOODPECKER_AGENT_SECRET: {{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}
WOODPECKER_MAX_WORKFLOWS: 4
WOODPECKER_DEBUG_PRETTY: true
WOODPECKER_LOG_LEVEL: info
WOODPECKER_HEALTHCHECK: true
WOODPECKER_BACKEND: docker
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- intern
@ -68,8 +72,8 @@ volumes:
agent-config:
# git.mgrote.net -> Settings -> Applications -> woodpecker
# WOODPECKER_GITEA_CLIENT: "{{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}"
# WOODPECKER_GITEA_SECRET: "{{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}"
# WOODPECKER_GITEA_CLIENT: {{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}
# WOODPECKER_GITEA_SECRET: {{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}
# Redirect URL: https://ci.mgrote.net/authorize
######## Networks ########

View File

@ -42,7 +42,7 @@ services:
- com.centurylinklabs.watchtower.depends-on=lldap-db
######## DB ########
lldap-db:
image: mariadb:10.6.14
image: mariadb:10
container_name: lldap-db
restart: always
volumes:

View File

@ -2,7 +2,7 @@ version: '3'
services:
wiki-webserver:
container_name: wiki-webserver
image: httpd:2.4@sha256:ba846154ade27292d216cce2d21f1c7e589f3b66a4a643bff0cdd348efd17aa3
image: httpd:2.4
restart: always
networks:
- traefik

View File

@ -0,0 +1,11 @@
## mgrote.dotfiles
### Beschreibung
Klont das dotfile-repo und erstellt notwendige Ordner.
### getestet auf
- [x] Ubuntu (>=18.04)
- [x] Linux Mint
### Variablen + Defaults
see [defaults](./defaults/main.yml)

View File

@ -0,0 +1,11 @@
---
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles # url zum repo
dotfiles_repo_path: /home/mg/dotfiles # wo soll das repo lokal gespeichert werden
dotfiles_repo_branch: master #default branch for checking out
dotfiles_files: # welche dateien sollen wohin verlinkt werden (ln -s)
- repo_path: "{{ dotfiles_repo_path }}/.vimrc"
local_path: "/home/mg/.vimrc"
dotfiles_dirs: # welche ordner sollen erstellt werden
- path: /home/mg/.config/i3
- path: /home/mg/.config/polybar
dotfiles_owner: mg # chown

View File

@ -0,0 +1,8 @@
---
- name: set owner recursive for repo
ansible.builtin.file:
path: "{{ dotfiles_repo_path }}"
owner: "{{ dotfiles_owner }}"
group: "{{ dotfiles_owner }}"
recurse: true
...

View File

@ -0,0 +1,63 @@
---
- name: Ensure package acl is installed
become: true
ansible.builtin.package:
name: acl
state: present
- name: check if repo exists
ansible.builtin.stat:
path: "{{ dotfiles_repo_path }}"
register: repo_exists
- name: set safe directory
become: true
ansible.builtin.command: # noqa command-instead-of-module
cmd: git config --global --add safe.directory "{{ dotfiles_repo_path }}"
changed_when: false
- name: stash changes
ansible.builtin.command: git stash # noqa command-instead-of-module no-handler
args:
chdir: "{{ dotfiles_repo_path }}"
changed_when: false
when: repo_exists.stat.exists
- name: Ensure dotfiles repository is cloned locally.
ansible.builtin.git:
repo: "{{ dotfiles_repo_url }}"
dest: "{{ dotfiles_repo_path }}"
depth: 1
version: "{{ dotfiles_repo_branch }}"
notify: set owner recursive for repo
- name: Ensure needed dirs exist.
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
owner: "{{ dotfiles_owner }}"
group: "{{ dotfiles_owner }}"
mode: "0644"
with_items: "{{ dotfiles_dirs }}"
- name: Link dotfiles into home folder
ansible.builtin.file:
src: "{{ item.repo_path }}"
dest: "{{ item.local_path }}"
state: link
force: true
owner: "{{ dotfiles_owner }}"
group: "{{ dotfiles_owner }}"
with_items: "{{ dotfiles_files }}"
- name: add .bash_extra to .bashrc
ansible.builtin.lineinfile:
path: /home/{{ dotfiles_owner }}/.bashrc
line: "source {{ dotfiles_repo_path }}/.bash_extra"
state: present
- name: root - add .bash_extra to .bashrc
ansible.builtin.lineinfile:
path: /root/.bashrc
line: "source {{ dotfiles_repo_path }}/.bash_extra"
state: present

View File

@ -22,7 +22,7 @@ munin_plugin_dest_path: /etc/munin/plugins/
munin_plugin_conf_dest_path: /etc/munin/plugin-conf.d/
# munin_node_plugins: #plugins to install
# - name: docker_volumes # name
# src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_ #src
# src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/docker/docker_ #src
# config_file_name: /etc/munin/plugin-conf.d/docker # where to put plugin config
# content of config
# config: |

View File

@ -10,6 +10,6 @@
changed_when: "output_conf.rc != 0"
ignore_errors: true # ignoriere fehler
- name: munin-node-configure --shell - 2 # noqa ignore-errors no-changed-when
- name: munin-node-configure --shell - 2 # noqa ignore-errors
ansible.builtin.command: munin-node-configure --shell --families=contrib,auto | sh -x
ignore_errors: true # ignoriere fehler

View File

@ -0,0 +1,12 @@
## mgrote.munin-node
### Beschreibung
Installiert munin-node + Plugins.
### getestet auf
- [x] Ubuntu (>=18.04)
- [ ] Debian
- [x] ProxMox 6.1
### Variablen + Defaults
see [defaults](./defaults/main.yml)

View File

@ -13,7 +13,7 @@
state: directory
owner: root
group: root
mode: "0755"
mode: "0644"
loop:
- /etc/munin
- /etc/munin/plugin-conf.d
@ -25,5 +25,5 @@
dest: /etc/munin/munin-node.conf
owner: root
group: root
mode: "0755"
mode: "0644"
notify: restart munin-node

View File

@ -1,7 +1,7 @@
---
- name: remove unwanted plugins
ansible.builtin.file:
path: "{{ munin_plugin_dest_path }}{{ item }}"
path: "{{ munin_plugin_dest_path }}{{ item.name }}"
state: absent
loop: "{{ munin_node_disabled_plugins }}"
notify: restart munin-node
@ -10,7 +10,7 @@
- name: remove additional plugin-config
ansible.builtin.file:
state: absent
dest: "{{ munin_plugin_conf_dest_path }}{{ item }}"
dest: "{{ munin_plugin_conf_dest_path }}{{ item.name }}"
notify: restart munin-node
loop: "{{ munin_node_disabled_plugins }}"
when: munin_node_disabled_plugins is defined

View File

@ -1,6 +1,5 @@
---
### wird in vielen Rollen verwendet
ansible_facts_parallel: true
ssh_public_key_mg: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKL8opSQ0rWVw9uCfbuiqmXq188OP4xh66MBTO3zV5jo heimserver_mg_v3
my_mail: michael.grote@posteo.de
file_header: |
@ -8,18 +7,32 @@ file_header: |
# This file is managed with ansible! #
#----------------------------------------------------------------#
### mgrote_user_setup
dotfiles:
- user: mg
home: /home/mg
- user: root
home: /root
dotfiles_repo_url: https://git.mgrote.net/mg/dotfiles
dotfiles_vim_vundle_repo_url: https://git.mgrote.net/mirrors/Vundle.vim.git
### mgrote_netplan
netplan_configure: true
### mgrote_restic
restic_user: root
restic_group: restic
restic_conf_dir: /etc/restic
restic_exclude: |
._*
desktop.ini
.Trash-*
**/**cache***/**
**/**Cache***/**
**/**AppData***/**
# https://github.com/restic/restic/issues/1005
# https://forum.restic.net/t/exclude-syntax-confusion/1531/12
restic_mount_timeout: "10 min"
restic_failure_delay: "30 s"
restic_schedule: "0/6:00" # alle 6 Stunden
restic_folders_to_backup: "/" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
restic_repository: "//fileserver3.mgrote.net/restic"
restic_repository_password: "{{ lookup('keepass', 'restic_repository_password', 'password') }}"
restic_mount_user: restic
restic_mount_password: "{{ lookup('keepass', 'fileserver_smb_user_restic', 'password') }}"
restic_fail_mail: "{{ my_mail }}"
### mgrote_user
users:
- username: mg
@ -61,6 +74,11 @@ ntp_chrony_logging: false
postfix_smtp_server: docker10.mgrote.net
postfix_smtp_server_port: 1025
### mgrote_tmux
tmux_conf_destination: "/home/mg/.tmux.conf"
tmux_bashrc_destination: "/home/mg/.bashrc"
tmux_standardsession_name: "default"
### mgrote_fail2ban
f2b_bantime: 300
f2b_findtime: 300
@ -75,11 +93,6 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
ufw_default_incoming_policy: deny
ufw_default_outgoing_policy: allow
@ -114,8 +127,6 @@ apt_packages_common:
- pwgen
- keychain
- fwupd
- bc
- jq
apt_packages_physical:
- s-tui
- smartmontools
@ -128,6 +139,8 @@ apt_packages_absent:
- nano
- snapd
- ubuntu-advantage-tools
apt_packages_internet:
- http://docker10.mgrote.net:3344/bash-helper-scripts-mgrote-latest.deb
### mgrote_zfs_sanoid
sanoid_templates:
@ -178,44 +191,7 @@ sanoid_templates:
autoprune: 'yes'
### mgrote_zfs_sanoid
sanoid_deb_url: http://docker10.mgrote.net:3344/sanoid_v2.2.0.deb
### mgrote_munin_node
munin_node_bind_host: "0.0.0.0"
munin_node_bind_port: "4949"
munin_node_allowed_cidrs: [192.168.2.0/24]
munin_node_disabled_plugins:
- name: meminfo # zu hohe last
- name: hddtemp2 # ersetzt durch hddtemp_smartctl
- name: ntp # verursacht zu viele dns ptr request
- name: hddtempd # ersetzt durch hddtemp_smartctl
- name: squid_cache # proxmox
- name: squid_objectsize # proxmox
- name: squid_requests # proxmox
- name: squid_traffic # proxmox
- name: timesync
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
sanoid_deb_url: http://docker10.mgrote.net:3344/sanoid_3.0.4.deb
# Ansible Variablen
### User

View File

@ -9,31 +9,13 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 53
comment: 'dns'
from_ip: 0.0.0.0/0
### mgrote.apt_manage_packages
apt_packages_extra:
- libnet-dns-perl # für munin: dnsresponse_
### mgrote_user_setup
dotfiles_vim_vundle_repo_url: http://192.168.2.42:3000/mirrors/Vundle.vim.git
dotfiles:
- user: mg
home: /home/mg
- user: root
home: /root
dotfiles_repo_url: http://192.168.2.42:3000/mg/dotfiles
### mgrote_blocky
blocky_version: v0.23
blocky_version: v0.22
blocky_block_type: zeroIp
blocky_local_upstream: 192.168.2.1
blocky_conditional_mapping: # optional
@ -59,14 +41,14 @@ blocky_custom_lookups: # optional
ip: 192.168.2.43
- name: ci.mgrote.net
ip: 192.168.2.43
- name: git.mgrote.net
ip: 192.168.2.43
- name: miniflux.mgrote.net
ip: 192.168.2.43
- name: nextcloud.mgrote.net
ip: 192.168.2.43
- name: registry.mgrote.net
ip: 192.168.2.43
- name: git.mgrote.net
ip: 192.168.2.43
# Intern
- name: ads2700w.mgrote.net
ip: 192.168.2.147
@ -80,46 +62,16 @@ blocky_custom_lookups: # optional
ip: 192.168.3.239
- name: pve5-test.mgrote.net
ip: 192.168.2.17
- name: pve5.mgrote.net # bleibt im Router auch angelegt, weil wenn pve aus auch kein blocky mehr ;-)
- name: pve5.mgrote.net # bleibt im Router auch angelegt, weil wenn pve aus ist, dann auch kein blocky ;-)
ip: 192.168.2.16
- name: rb5009.mgrote.net
ip: 192.168.2.1
- name: fritz.box
ip: 192.168.5.1
- name: ldap.mgrote.net
ip: 192.168.2.47
### mgrote_munin_node
# kann git.mgrote.net nicht auflösen, deshalb hiermit IP
munin_node_plugins:
- name: chrony
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: http://192.168.2.42:3000/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: dnsresponse_192.168.2.1
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
- name: dnsresponse_192.168.2.37
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
- name: dnsresponse_127.0.0.1
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
config: |
[dnsresponse_*]
env.site www.heise.de
env.times 20
### mgrote_apt_manage_packages
apt_packages_internet:
- http://192.168.2.43:3344/bash-helper-scripts-mgrote-latest.deb
### mgrote_restic
restic_repository: "//192.168.2.54/restic"

View File

@ -15,14 +15,8 @@ lvm_groups:
manage_lvm: true
pvresize_to_max: true
### geerlingguy.pip
pip_package: python3-pip
pip_install_packages:
- name: docker # für munin-plugin docker_
### mgrote.apt_manage_packages
apt_packages_extra:
- libnet-dns-perl # für munin: dnsresponse_*
### mgrote_restic
restic_folders_to_backup: "/ /var/lib/docker" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
### mgrote_user
users:
@ -56,7 +50,7 @@ docker_users:
- mg
- docker-user
docker_install_compose: true
docker_add_repo: false # erstelle kein Repo-Eintrag unter /etc/apt/sources.list.d/, steht explizit unter "repos_override", wird nur zum installieren benötigt
docker_add_repo: false # erstelle kein Repo-Eintrag unter /etc/apt/sources.list.d/, steht explizit unter "repos_override"
### mgrote_docker-compose-deploy
docker_compose_base_dir: /home/docker-user
@ -77,63 +71,3 @@ repos_override: # mit docker-repos
### mgrote_systemd_resolved
systemd_resolved_nameserver: 192.168.2.37
### mgrote_munin_node
munin_node_allowed_cidrs: [0.0.0.0/0] # weil der munin-server aus einem anderen subnet zugreift
munin_node_plugins:
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: docker_containers
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
config: |
[docker_*]
user root
env.DOCKER_HOST unix://run/docker.sock
- name: docker_cpu
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_memory
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_network
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_volumes
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_volumesize
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_volumesize
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
from_ip: 192.168.0.0/16
comment: 'docker networks'
- rule: allow
from_ip: 172.0.0.0/8
comment: 'docker networks'

View File

@ -9,11 +9,6 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 445
comment: 'smb'
@ -29,31 +24,3 @@ smb_min_protocol: "SMB2"
smb_client_min_protocol: "SMB2"
smb_client_max_protocol: "SMB3_11"
smb_enable_snapshots_dir: true
smb_enable_snapshots_shadow: true
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: samba
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/samba
config: |
[samba]
user root
group root
env.smbstatus /usr/bin/smbstatus
env.ignoreipcshare 1

View File

@ -1,154 +0,0 @@
---
### mrlesmithjr.ansible-manage-lvm
lvm_groups:
- vgname: vg_data
disks:
- /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
create: true
lvnames:
- lvname: lv_data
size: +100%FREE
create: true
filesystem: xfs
mount: true
mntp: /var/lib/gitea
manage_lvm: true
pvresize_to_max: true
### mgrote_apt_manage_packages
apt_packages_extra:
- fail2ban
### geerlingguy_postgres
postgresql_databases:
- name: "{{ gitea_db_name }}"
postgresql_users:
- name: "{{ gitea_db_user }}"
password: "{{ gitea_db_password }}"
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: "{{ gitea_http_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_ssh_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
### ansible_role_gitea
# https://git.mgrote.net/ansible-roles-mirrors/ansible_role_gitea
gitea_fork: "forgejo"
# gitea update
gitea_version: "1.21.7-0" # alt zum renovate testen
gitea_version_check: true
gitea_backup_on_upgrade: false
# gitea in the linux world
gitea_group: "gitea"
gitea_user: "gitea"
gitea_home: "/var/lib/gitea"
gitea_user_home: "{{ gitea_home }}"
# config liegt in /etc/gitea/gitea.ini
gitea_configuration_path: "/etc/gitea" # anpassen
gitea_app_name: "forgejo"
gitea_fqdn: "git.mgrote.net"
# ssh
gitea_ssh_port: 2222
gitea_start_ssh: true
gitea_shell: "/bin/false"
# Repository
gitea_default_branch: "master"
gitea_default_private: "public"
gitea_repository_root: "{{ gitea_home }}/repos"
# ui
gitea_show_user_email: false
# server
gitea_protocol: "http"
gitea_http_domain: "{{ gitea_fqdn }}"
gitea_http_port: "3000"
gitea_http_listen: "0.0.0.0"
gitea_root_url: "https://git.mgrote.net"
gitea_landing_page: "login"
# database
gitea_db_type: "postgres"
gitea_db_host: "localhost"
gitea_db_name: "gitea"
gitea_db_user: "gitea"
gitea_db_password: "{{ lookup('keepass', 'forgejo_db_password', 'password') }}"
# indexer
gitea_repo_indexer_enabled: true
# security
gitea_disable_webhooks: false
gitea_password_check_pwn: false
gitea_internal_token: "{{ lookup('keepass', 'forgejo_internal_token', 'password') }}"
gitea_secret_key: "{{ lookup('keepass', 'forgejo_secret_key', 'password') }}"
# service
gitea_disable_registration: true
gitea_register_email_confirm: true
gitea_require_signin: false
gitea_default_keep_mail_private: true
gitea_enable_captcha: false
gitea_show_registration_button: false
gitea_enable_notify_mail: true
gitea_default_user_visibility: "public"
gitea_show_milestones_dashboard_page: false
gitea_default_allow_create_organization: true
gitea_default_org_visibility: "public"
gitea_default_user_is_restricted: false
# Mailer
gitea_mailer_enabled: true
gitea_mailer_protocol: "smtp"
gitea_mailer_smtp_addr: "docker10.mgrote.net"
gitea_mailer_smtp_port: 1025
gitea_mailer_from: "gitea@mgrote.net"
gitea_subject_prefix: "git.mgrote.net - "
# log
gitea_log_systemd: true
gitea_log_level: "Info"
# Metrics
gitea_metrics_enabled: false
# Federation
gitea_federation_enabled: false
# Packages
gitea_packages_enabled: false
# actions
gitea_actions_enabled: false
gitea_extra_config: |
; webhook: wird für drone benötigt, sonst wird der Webhook nicht "gesendet"
[webhook]
ALLOWED_HOST_LIST = *.mgrote.net
; für Import/Migration aus anderen Git-Systemen
[migrations]
ALLOWED_DOMAINS = *
; disabled; see: https://github.com/go-gitea/gitea/issues/25992
[repo-archive]
ENABLED = false
# oauth2
gitea_oauth2_jwt_secret: "{{ lookup('keepass', 'forgejo_oauth2_jwt_secret', 'password') }}"
# Fail2Ban configuration
gitea_fail2ban_enabled: true
gitea_fail2ban_jail_maxretry: "3"
gitea_fail2ban_jail_findtime: "300"
gitea_fail2ban_jail_bantime: "600"
gitea_fail2ban_jail_action: "iptables-allports"
### mgrote_gitea_setup
gitea_ldap_host: "ldap.mgrote.net"
gitea_ldap_base_path: "dc=mgrote,dc=net"
gitea_ldap_bind_user: "forgejo_bind_user"
gitea_ldap_bind_pass: "{{ lookup('keepass', 'lldap_forgejo_bind_user', 'password') }}"
gitea_admin_user: "fadmin"
gitea_admin_user_pass: "{{ lookup('keepass', 'forgejo_admin_user_pass', 'password') }}"

101
group_vars/gitea.yml Normal file
View File

@ -0,0 +1,101 @@
---
### mrlesmithjr.ansible-manage-lvm
lvm_groups:
- vgname: vg_gitea_data
disks:
- /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
create: true
lvnames:
- lvname: lv_gitea_data
size: +100%FREE
create: true
filesystem: xfs
mount: true
mntp: /var/lib/gitea
manage_lvm: true
pvresize_to_max: true
### mgrote_restic
restic_folders_to_backup: "/ /var/lib/gitea" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_http_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_ssh_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
### l3d.gitea
# config liegt in /etc/gitea/gitea.ini
gitea_version: "1.21.0"
gitea_app_name: "Gitea"
gitea_user: "gitea"
gitea_home: "/var/lib/gitea"
gitea_repository_root: "{{ gitea_home }}"
gitea_user_repo_limit: 300
gitea_root_url: https://git.mgrote.net
gitea_offline_mode: true
gitea_lfs_server_enabled: false
gitea_secret_key: "{{ lookup('keepass', 'gitea_secret_key', 'password') }}"
gitea_internal_token: "{{ lookup('keepass', 'gitea_internal_token', 'password') }}"
gitea_disable_git_hooks: false
gitea_show_user_email: false
gitea_disable_gravatar: true
gitea_enable_captcha: true
gitea_only_allow_external_registration: false
gitea_enable_notify_mail: false
gitea_force_private: false
gitea_oauth2_enabled: true
gitea_repo_indexer_enabled: true
gitea_mailer_enabled: true
gitea_mailer_skip_verify: false
gitea_mailer_tls_enabled: true
gitea_mailer_host: smtp.strato.de:465
gitea_mailer_from: info@mgrote.net
gitea_mailer_user: "info@mgrote.net"
gitea_mailer_password: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
gitea_mailer_type: smtp
gitea_default_branch: 'master'
gitea_db_type: sqlite3
gitea_db_path: "{{ gitea_home }}/data/gitea.db" # for sqlite3
gitea_ssh_listen: 0.0.0.0
gitea_ssh_domain: gitea.mgrote.net
gitea_ssh_port: 2222
gitea_start_ssh: true
gitea_http_domain: git.mgrote.net
gitea_http_listen: 0.0.0.0
gitea_http_port: 3000
gitea_disable_http_git: false
gitea_protocol: http
gitea_show_registration_button: false
gitea_require_signin: false
gitea_disable_registration: true
gitea_fail2ban_enabled: true
gitea_fail2ban_jail_maxretry: 3
gitea_fail2ban_jail_findtime: 300
gitea_fail2ban_jail_bantime: 600
# wird für drone benötigt, sonst wird der Webhook nicht "gesendet"
gitea_extra_config: |
[webhook]
ALLOWED_HOST_LIST = *.mgrote.net
gitea_backup_on_upgrade: false
gitea_backup_location: "{{ gitea_home }}/backups/"

View File

@ -1,58 +0,0 @@
---
### geerlingguy_postgres
postgresql_databases:
- name: "{{ lldap_db_name }}"
postgresql_users:
- name: "{{ lldap_db_user }}"
password: "{{ lldap_db_pass }}"
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: "{{ lldap_http_port }}"
protocol: tcp
comment: 'lldap'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 3890
protocol: tcp
comment: 'lldap'
from_ip: 192.168.2.0/24
### mgrote_lldap
lldap_package_url: "https://download.opensuse.org/repositories/home:/Masgalor:/LLDAP/xUbuntu_22.04/amd64/lldap_0.5.0-1+3.1_amd64.deb"
lldap_logging_verbose: "true" # must be a string not a boolean
lldap_http_port: 17170
lldap_http_host: "0.0.0.0"
lldap_ldap_host: "0.0.0.0"
lldap_public_url: http://ldap.mgrote.net:17170
lldap_jwt_secret: "{{ lookup('keepass', 'lldap_jwt_secret', 'password') }}"
lldap_ldap_base_dn: "dc=mgrote,dc=net"
lldap_admin_username: ladmin # only used on setup
lldap_admin_password: "{{ lookup('keepass', 'lldap_ldap_user_pass', 'password') }}" # only used on setup; also bind-secret
lldap_admin_mailaddress: lldap-admin@mgrote.net # only used on setup
lldap_database_url: "postgres://{{ lldap_db_user }}:{{ lldap_db_pass }}@{{ lldap_db_host }}/{{ lldap_db_name }}"
lldap_key_seed: "{{ lookup('keepass', 'lldap_key_seed', 'password') }}"
#lldap_smtp_from: "lldap@mgrote.net" # unused in role
lldap_smtp_reply_to: "Do not reply <info@mgrote.net>"
lldap_smtp_server: "docker10.mgrote.net"
lldap_smtp_port: "1025"
lldap_smtp_smtp_encryption: "NONE"
#lldap_smtp_user: "info@mgrote.net" # unused in role
lldap_smtp_enable_password_reset: "true" # must be a string not a boolean
# "meta vars"; daraus werden die db-url und die postgres-db abgeleitet
lldap_db_name: "lldap"
lldap_db_user: "lldap"
lldap_db_pass: "{{ lookup('keepass', 'lldap_db_pass', 'password') }}"
lldap_db_host: "localhost"
...

View File

@ -5,6 +5,9 @@ netplan_configure: false
### mgrote_postfix
postfix_erlaubte_netzwerke: "127.0.0.0/8 192.168.2.0/24 192.168.3.0/24"
### mgrote_restic
restic_folders_to_backup: "/ /etc/proxmox-backup"
### mgrote_user
users:
- username: root
@ -30,55 +33,3 @@ users:
public_ssh_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJcBwOjanQV6sFWaTetqpl20SVe3aRzGjKbsp7hKkDCE mg@irantu
allow_sudo: true
allow_passwordless_sudo: true
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: zfs_arcstats
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_arcstats
- name: zfsonlinux_stats_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfsonlinux_stats_
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zfs_list
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_list
config: |
[zfs_list]
env.ignore_datasets_pattern autodaily
- name: zfs_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_pool_dataset_count
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zpool_capacity
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_capacity
munin_node_disabled_plugins:
- meminfo # zu hohe last
- hddtemp2 # ersetzt durch hddtemp_smartctl
- ntp # verursacht zu viele dns ptr request
- hddtempd # ersetzt durch hddtemp_smartctl
- squid_cache # proxmox
- squid_objectsize # proxmox
- squid_requests # proxmox
- squid_traffic # proxmox
- lvm_
- timesync
- lxc_guests
munin_node_allowed_cidrs:
- 192.168.3.0/24
- 192.168.2.0/24
...

View File

@ -2,6 +2,9 @@
### mgrote_netplan
netplan_configure: false
### mgrote_restic
restic_folders_to_backup: "/ /etc/pve"
### mgrote_user
users:
- username: root
@ -28,13 +31,6 @@ users:
allow_sudo: true
allow_passwordless_sudo: true
### mgrote_cv4pve_autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all,-115
cv4pve_keep_snapshots: 5
cv4pve_version: "v1.14.8"
### mgrote_apt_manage_packages
apt_packages_extra:
- ifupdown2
@ -43,73 +39,6 @@ apt_packages_extra:
- open-vm-tools
- systemd-boot
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: zfs_arcstats
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_arcstats
- name: zfsonlinux_stats_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfsonlinux_stats_
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zfs_list
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_list
config: |
[zfs_list]
env.ignore_datasets_pattern autodaily
- name: zpool_capacity
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_capacity
- name: kvm_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_mem
- name: kvm_net
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_net
- name: kvm_io
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_io
config: |
[kvm_io]
user root
- name: kvm_cpu
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_cpu
- name: proxmox_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/proxmox/proxmox_vm_count
config: |
[proxmox_count]
user root
group root
- name: zfs_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_pool_dataset_count
- name: ksm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/system/kernel_same_page_merging
munin_node_disabled_plugins:
- meminfo # zu hohe last
- hddtemp2 # ersetzt durch hddtemp_smartctl
- ntp # verursacht zu viele dns ptr request
- hddtempd # ersetzt durch hddtemp_smartctl
- squid_cache # proxmox
- squid_objectsize # proxmox
- squid_requests # proxmox
- squid_traffic # proxmox
- lvm_
- slab
- timesync
- lxc_guests
# Ansible Variablen
### sudo
sudo: false
...

View File

@ -15,11 +15,11 @@ lvm_groups:
manage_lvm: true
pvresize_to_max: true
### mgrote_mount_cifs # löschen
### mgrote_mount_cifs
cifs_mounts:
- name: bilder
type: cifs
state: absent
state: present
dest: /mnt/fileserver3_photoprism_bilder_ro
src: //fileserver3.mgrote.net/bilder
user: photoprism
@ -29,6 +29,9 @@ cifs_mounts:
gid: 5000
extra_opts: ",ro" # komma am Anfang ist notwendig weil die Option hinten angehangen wird
### mgrote_restic
restic_folders_to_backup: "/ /var/lib/docker /mnt/oci-registry" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben
### mgrote_docker-compose-inline
compose_owner: "docker-user"
compose_group: "docker-user"
@ -56,6 +59,8 @@ compose_files:
- name: navidrome
state: present
network: traefik
- name: watchtower
state: present
- name: routeros-config-export
state: present
- name: mail-relay
@ -64,9 +69,13 @@ compose_files:
- name: woodpecker
state: present
network: traefik
- name: photoprism
state: present
- name: wiki
state: present
network: traefik
- name: statping-ng
state: present
### oefenweb.ufw
ufw_rules:

View File

@ -23,18 +23,19 @@ ytdl_video_urls:
- https://www.youtube.com/watch?v=TowKvEJcYDw&list=PLlQWnS27jXh9aEp7hl54xrk5CgiVbvMBy # arte - zu Tisch in...
- https://www.youtube.com/playlist?list=PLs4hTtftqnlAkiQNdWn6bbKUr-P1wuSm0 # jimmy kimmel mean tweets
- https://www.youtube.com/tomstantonengineering
- https://www.youtube.com/@liamcarps #englandvideos ironisch
ytdl_podcast_urls:
- https://sternengeschichten.podigee.io/feed/aac # Sternengeschichten
- https://feeds.br.de/radiowissen/feed.xml # BR2 RadioWissen
ytdl_video_output: "/shares_videos/Youtube/%(uploader)s/%(title)s-%(id)s.%(ext)s" # Videos werden jetzt IMMER nach "Uploader/Name.ext" geschrieben
ytdl_enable_video_download: true
ytdl_enable_podcast_download: false
ytdl_video_output: "/shares_videos/Youtube/%(uploader)s/%(title)s-%(id)s.%(ext)s" # Videos werden jetzt IMMEr nach "Uploader/Name.ext" geschrieben
ytdl_podcast_output: "/shares_music/Podcasts/%(playlist)s/%(id)s.%(ext)s"
ytdl_video_log_output: "/shares_videos/Youtube/archive-youtube.log"
ytdl_podcast_log_output: "/shares_music/Podcasts/archive-podcast.log"
ytdl_youtube_username: "{{ lookup('keepass', 'youtubedl_youtube_login', 'username') }}"
ytdl_youtube_password: "{{ lookup('keepass', 'youtubedl_youtube_login', 'password') }}"
ytdl_conf_dir: "/etc/youtubedl" # ohne / am ende
ytdl_conf_dir: "/etc/youtubedl" #ohne / am ende
ytdl_download_limit: "10000K"
### mgrote_fileserver_smb
@ -55,6 +56,8 @@ smb_users:
password: "{{ lookup('keepass', 'fileserver_smb_user_pve', 'password') }}"
- name: 'brother_ads2700w'
password: "{{ lookup('keepass', 'fileserver_smb_user_brother_ads2700w', 'password') }}"
- name: 'photoprism'
password: "{{ lookup('keepass', 'fileserver_smb_user_photoprism', 'password') }}"
smb_shares:
- name: 'videos'
@ -87,7 +90,7 @@ smb_shares:
users_rw: 'kodi win10 michaelgrote'
- name: 'bilder'
path: '/shares_bilder'
users_ro: ''
users_ro: 'photoprism'
users_rw: ' michaelgrote win10'
- name: 'proxmox'
path: '/shares_pve_backup'
@ -96,7 +99,7 @@ smb_shares:
- name: 'restic'
path: '/shares_restic'
users_ro: ''
users_rw: 'restic win10 michaelgrote'
users_rw: ' restic win10 michaelgrote'
- name: 'buecher'
path: '/shares_buecher'
users_ro: ''

View File

@ -47,15 +47,12 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 1G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 1G
- dataset: rpool/ROOT/pbs-1
state: present
refreservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# backup-pool
- dataset: backup/pbs_data
state: present
@ -63,7 +60,6 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
- dataset: backup/pve5
state: present
canmount: off # noqa yaml[truthy]
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
zfs_extra_arc_max_size: "1073741824" # 1GB in Bytes
zfs_extra_zfs_pools:

View File

@ -43,14 +43,12 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 10G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 10G
- dataset: rpool/ROOT/pbs-1
state: present
refreservation: 10G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# backup-pool
- dataset: backup/pbs_data
state: present

View File

@ -21,14 +21,12 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 1G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 1G
- dataset: rpool/ROOT/pve-1
state: present
refreservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# rpool - VMs
- dataset: rpool/vm
@ -37,7 +35,6 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
state: present
- dataset: rpool/vm/lxc
state: present
# hdd_data
- dataset: hdd_data
state: present
@ -153,6 +150,13 @@ sanoid_datasets:
snapshots: true
template: '3tage'
### mgrote_cv4pve-autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all
cv4pve_keep_snapshots: 5
cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download/v1.10.0/cv4pve-autosnap-linux-x64.zip"
### mgrote_proxmox_bind_mounts
pve_bind_mounts:
- vmid: 100

View File

@ -6,10 +6,6 @@
## hdd_data
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data mirror /dev/disk/by-id/ata-TOSHIBA_MG09ACA18TE_Z1B0A27KFJDH /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS
## hdd_data "neu"
### sudo zpool create -o ashift=12 -o feature@encryption=enabled -O encryption=on -O keylocation=prompt -O keyformat=passphrase hdd_data /dev/disk/by-id/ata-ST18000NM003D-3DL103_ZVTBSAYS
# mgrote.zfs_manage_datasets
### mgrote_zfs_extra
# Variablen für mgrote.zfs_health/trim/scrub/zed/arc_mem/ sind zusammengefasst unter zfs_extra_*
@ -25,19 +21,15 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
snapdir: hidden
reservation: 1G
refreservation: 10G
acltype: posix
- dataset: rpool/ROOT
state: present
refreservation: 10G
- dataset: rpool/ROOT/pve-1
state: present
refreservation: 10G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
# rpool - Data
- dataset: rpool/data
state: present
# rpool - VMs
- dataset: rpool/vm
state: present
@ -47,7 +39,6 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
state: present
- dataset: rpool/data
state: present
# hdd_data
- dataset: hdd_data
state: present
@ -58,7 +49,6 @@ zfs_datasets: # DatenPools werden hier nicht verwaltet
atime: on # noqa yaml[truthy]
snapdir: hidden
reservation: 1G
acltype: posix # https://docs.ansible.com/ansible-core/2.14/playbook_guide/playbooks_privilege_escalation.html#risks-of-becoming-an-unprivileged-user ; sonst kann die dotfiles-Rolle kein setfacl machen
- dataset: hdd_data/papa_backup
state: present
- dataset: hdd_data/pve_backup
@ -170,6 +160,13 @@ sanoid_datasets:
snapshots: true
template: 'pve3tage'
### mgrote_cv4pve-autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all,-115
cv4pve_keep_snapshots: 5
cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download/v1.14.7/cv4pve-autosnap-linux-x64.zip"
### mgrote_proxmox_bind_mounts
pve_bind_mounts:
### fileserver3

View File

@ -6,9 +6,7 @@ all:
blocky:
hosts:
blocky.mgrote.net:
ldap:
hosts:
ldap.mgrote.net:
blocky2.mgrote.net:
lxc:
hosts:
fileserver3.mgrote.net:
@ -35,20 +33,20 @@ all:
hosts:
pve5.mgrote.net:
pbs.mgrote.net:
git:
gitea:
hosts:
forgejo.mgrote.net:
gitea.mgrote.net:
production:
hosts:
fileserver3.mgrote.net:
ansible2.mgrote.net:
pve5.mgrote.net:
forgejo.mgrote.net:
gitea.mgrote.net:
docker10.mgrote.net:
pbs.mgrote.net:
blocky.mgrote.net:
ldap.mgrote.net:
blocky2.mgrote.net:
test:
hosts:
vm-test-2204.mgrote.net:

Binary file not shown.

View File

@ -2,7 +2,7 @@
- hosts: all
gather_facts: false
roles:
- role: ansible-role-bootstrap
- role: robertdebock-ansible-role-bootstrap
tags: "bootstrap"
become: true
- role: mgrote_apt_manage_sources

View File

@ -3,4 +3,3 @@
- ansible.builtin.import_playbook: base/system.yml
- ansible.builtin.import_playbook: base/users.yml
- ansible.builtin.import_playbook: base/ufw.yml
- ansible.builtin.import_playbook: base/monitoring.yml

View File

@ -1,6 +1,4 @@
---
- hosts: ansible
roles:
- role: ansible-role-pip
tags: "pip"
become: true
- { role: geerlingguy-ansible-role-pip, tags: "pip", become: true }

View File

@ -1,7 +1,5 @@
---
- hosts: blocky
roles:
- role: mgrote_systemd_resolved
tags: "resolved"
- role: mgrote_blocky
tags: "blocky"
- { role: mgrote_blocky, tags: "blocky" }
- { role: mgrote_systemd_resolved, tags: "resolved" }

View File

@ -1,21 +1,10 @@
---
- hosts: docker
roles:
- role: mgrote_systemd_resolved
tags: "dns"
become: true
- role: ansible-role-pip
tags: "pip"
become: true
- role: ansible-role-docker
tags: "docker"
become: true
- role: ansible_role_ctop
tags: "ctop"
become: true
- role: mgrote_set_permissions
tags: "perm"
become: true
- role: mgrote_docker_compose_inline
tags: "compose"
become: true
- { role: mgrote_systemd_resolved, tags: "dns", become: true }
- { role: mgrote_mount_cifs, tags: "cifs", become: true }
- { role: geerlingguy-ansible-role-pip, tags: "pip", become: true }
- { role: geerlingguy-ansible-role-docker, tags: "docker", become: true }
- { role: gantsign-ansible-role-ctop, tags: "ctop", become: true }
- { role: mgrote_set_permissions, tags: "perm", become: true }
- { role: mgrote_docker_compose_inline, tags: "compose", become: true }

View File

@ -6,9 +6,6 @@
---
- hosts: fileserver
roles:
- role: mgrote_fileserver_smb
tags: "smb"
- role: mgrote_youtubedl
tags: "youtubedl"
- role: mgrote_disable_oom_killer
tags: "oom"
- { role: mgrote_fileserver_smb, tags: "fileserver_smb" }
- { role: mgrote_youtubedl, tags: "youtubedl" }
- { role: mgrote_disable_oom_killer, tags: "oom" }

View File

@ -1,12 +0,0 @@
---
- hosts: git
roles:
- role: ansible-role-postgresql
tags: "db"
become: true
- role: ansible_role_gitea
tags: "gitea"
become: true
- role: mgrote_gitea_setup
tags: "setup"
become: true

View File

@ -0,0 +1,4 @@
---
- hosts: gitea
roles:
- { role: pyratlabs-ansible-role-gitea, tags: "gitea", become: true }

View File

@ -1,11 +0,0 @@
---
- hosts: ldap
roles:
- role: ansible-role-postgresql
tags: "db"
become: true
- role: mgrote_lldap
tags:
- lldap
- ldap
become: true

View File

@ -1,21 +1,12 @@
---
- hosts: pbs
roles:
- role: mgrote_zfs_packages
tags: "zfs_packages"
- role: mgrote_zfs_arc_mem
tags: "zfs_arc_mem"
- role: mgrote_zfs_manage_datasets
tags: "datasets"
- role: mgrote_zfs_scrub
tags: "zfs_scrub"
- role: mgrote_zfs_zed
tags: "zfs_zed"
- role: mgrote_zfs_sanoid
tags: "sanoid"
- role: mgrote_smart
tags: "smart"
- role: mgrote_pbs_users
tags: "pbs_users"
- role: mgrote_pbs_datastores
tags: "pbs_datastores"
- { role: mgrote_zfs_packages, tags: "zfs_packages" }
- { role: mgrote_zfs_arc_mem, tags: "zfs_arc_mem" }
- { role: mgrote_zfs_manage_datasets, tags: "datasets" }
- { role: mgrote_zfs_scrub, tags: "zfs_scrub" }
- { role: mgrote_zfs_zed, tags: "zfs_zed" }
- { role: mgrote_zfs_sanoid, tags: "sanoid" }
- { role: mgrote_smart, tags: "smart" }
- { role: mgrote_pbs_users, tags: "pbs_users" }
- { role: mgrote_pbs_datastores, tags: "pbs_datastores" }

View File

@ -1,26 +1,14 @@
---
- hosts: pve
roles:
- role: mgrote_zfs_packages
tags: "zfs_packages"
- role: mgrote_zfs_arc_mem
tags: "zfs_arc_mem"
- role: mgrote_zfs_manage_datasets
tags: "datasets"
- role: mgrote_zfs_scrub
tags: "zfs_scrub"
- role: mgrote_zfs_zed
tags: "zfs_zed"
- role: mgrote_zfs_sanoid
tags: "sanoid"
- role: mgrote_smart
tags: "smart"
- role: mgrote_cv4pve_autosnap
tags: cv4pve
become: true
- role: mgrote_proxmox_bind_mounts
tags: "bindmounts"
- role: mgrote_proxmox_lxc_profiles
tags: "lxc-profile"
- role: mgrote_pbs_pve_integration
tags: "pbs"
- { role: mgrote_zfs_packages, tags: "zfs_packages" }
- { role: mgrote_zfs_arc_mem, tags: "zfs_arc_mem" }
- { role: mgrote_zfs_manage_datasets, tags: "datasets" }
- { role: mgrote_zfs_scrub, tags: "zfs_scrub" }
- { role: mgrote_zfs_zed, tags: "zfs_zed" }
- { role: mgrote_zfs_sanoid, tags: "sanoid" }
- { role: mgrote_smart, tags: "smart" }
- { role: mgrote_cv4pve_autosnap, tags: "cv4pve" }
- { role: mgrote_proxmox_bind_mounts, tags: "bindmounts" }
- { role: mgrote_proxmox_lxc_profiles, tags: "lxc-profile" }
- { role: mgrote_pbs_pve_integration, tags: "pbs" }

View File

@ -1,11 +0,0 @@
---
- hosts: all
roles:
- role: mgrote_munin_node
become: true
tags: "munin"
when: "not 'laptop' in group_names"
### Die Host müssen auch beim Docker-Container: "munin-master eingetragen" werden.
### wird nur auf physischen Rechnern ausgeführt.
### Wenn ein Plugin nicht geht: munin-node-configure --shell --families=contrib,auto | sh -x

View File

@ -5,12 +5,14 @@
tags: "apt_sources"
- role: mgrote_apt_manage_packages
tags: "install"
- role: mgrote_exa
tags: "exa"
- role: mgrote_remove_snapd
become: true
tags: "snapd"
- role: mgrote_apt_update_packages
tags: "updates"
- role: ansible-role-unattended-upgrades
- role: hifis-net-ansible-role-unattended-upgrades
become: true
tags: unattended
when: "ansible_facts['distribution'] == 'Ubuntu'"

View File

@ -3,21 +3,21 @@
roles:
- role: mgrote_ntp_chrony_client
tags: "ntp"
- role: mgrote_etckeeper
tags: "etckeeper"
- role: mgrote_postfix
tags: "postfix"
- role: mgrote_restic
tags: "restic"
- role: mgrote_fail2ban
tags: "f2b"
- role: mgrote_fwupd_settings
become: true
tags: fwupd
when: "ansible_facts['distribution'] == 'Ubuntu'"
- role: ansible-manage-lvm
- role: mrlesmithjr-ansible-manage-lvm
tags: "lvm"
become: true
when: manage_lvm == true and manage_lvm is defined
# $manage_lvm gehört zu dieser Rolle, wird aber extra abgefragt um das Playbook zu "aktivieren"
# $manage_lvm gehört zu dieser Rolle, wird aber extra abgefragt um das PLaybook zu "aktivieren"
- role: mgrote_ssh
tags: "ssh"
- role: mgrote_netplan

View File

@ -1,6 +1,6 @@
---
- hosts: all:!pve:!pbs
roles:
- role: ansible-ufw # Regeln werden in den Group/Host-Vars gesetzt
tags: ufw
become: true
- { role: oefenweb-ansible-ufw, # Regeln werden in den Group/Host-Vars gesetzt
tags: "ufw",
become: true}

View File

@ -1,10 +1,8 @@
---
- hosts: all
become: true
roles:
- role: mgrote_users
tags: users
become: true
- role: mgrote_user_setup
tags:
- user_setup
- dotfiles
tags: "user"
- role: mgrote_vim
tags: "vim"

View File

@ -1,18 +0,0 @@
---
- hosts: all
tasks:
- name: remove user
become: true
ansible.builtin.user:
name: "{{ item }}"
state: absent
remove: true
loop:
- drone
- drone-user
- name: Ensure dir is removed
become: true
ansible.builtin.file:
path: /home/drone
state: absent

View File

@ -1,22 +0,0 @@
---
- hosts: all
become: yes
tasks:
- name: Ensure packages are absent
become: yes
ansible.builtin.apt:
autoremove: yes
autoclean: yes
purge: yes
name:
- munin-node
state: absent
- name: Ensure directories are absent
become: yes
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /usr/share/munin
- /etc/munin

View File

@ -1,78 +0,0 @@
---
- hosts: all
tasks:
- name: ensure group exists
become: true
ansible.builtin.group:
name: restic
state: absent
- name: install restic-packages
become: true
ansible.builtin.package:
name:
- restic
state: absent
- name: create "/etc/restic"
become: true
ansible.builtin.file:
path: "/etc/restic"
state: absent
- name: systemctl start restic.timer
become: true
ansible.builtin.systemd:
name: restic.timer
state: stopped
enabled: false
- name: systemctl enable units
become: true
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
masked: true
with_items:
- media-restic.automount
- media-restic.mount
- restic.service
- restic.timer
- restic_mail.service
- name: template restic.mount
become: true
ansible.builtin.file:
state: absent
path: /etc/systemd/system/media-restic.mount # media-restic == /media/restic
- name: template restic.automount
become: true
ansible.builtin.file:
path: /etc/systemd/system/media-restic.automount
state: absent
- name: template restic.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic.service
state: absent
- name: template restic.timer
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic.timer
state: absent
- name: template restic_mail.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic_mail.service
state: absent
- name: template restic_mail.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/media-restic.automount
state: absent

View File

@ -1,26 +0,0 @@
---
- hosts: all
become: true
tasks:
- name: update apt cache
ansible.builtin.apt:
update_cache: true
- name: update installed packages
ansible.builtin.package:
upgrade: dist
register: upgrade
- name: apt autoremove
ansible.builtin.apt:
autoremove: true
clean: yes
- name: reboot
ansible.builtin.reboot:
when: (upgrade.changed and (inventory_hostname != 'pve5.mgrote.net' and inventory_hostname != 'ansible2.mgrote.net'))
- name: Info
ansible.builtin.debug:
msg: Pool auf pbs.mgrote.net mounten!
when: (upgrade.changed and inventory_hostname == 'pbs.mgrote.net')

View File

@ -1,5 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended"],
"ignorePaths": ["**/friedhof/**"]
}

View File

@ -1,30 +0,0 @@
collections:
- name: community.general
version: "8.6.0"
- name: community.crypto
version: "2.19.1"
- name: ansible.posix
version: "1.5.4"
- name: community.docker
version: "3.9.0"
roles:
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-bootstrap
version: "6.2.5"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-ufw
version: "v4.1.13"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-manage-lvm
version: "v0.2.11"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-unattended-upgrades
version: "v4.1.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-pip
version: "3.0.3"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-nfs
version: "2.0.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-docker
version: "7.1.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible_role_ctop
version: "1.1.6"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible_role_gitea
version: "v3.4.2"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-postgresql
version: "3.5.1"

30
requirements.yml Normal file
View File

@ -0,0 +1,30 @@
collections:
- git+https://git.mgrote.net/ansible-collections-mirrors/community.general
- git+https://git.mgrote.net/ansible-collections-mirrors/community.crypto
- git+https://git.mgrote.net/ansible-collections-mirrors/ansible.posix
- git+https://git.mgrote.net/ansible-collections-mirrors/community.docker
roles:
- src: https://git.mgrote.net/ansible-roles-mirrors/pyratlabs-ansible-role-k3s
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/robertdebock-ansible-role-bootstrap
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/pandemonium1986-ansible-role-k9s
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/oefenweb-ansible-ufw
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/mrlesmithjr-ansible-manage-lvm
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/hifis-net-ansible-role-unattended-upgrades
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-pip
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-nfs
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-helm
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-docker
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/gantsign-ansible-role-ctop
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/pyratlabs-ansible-role-gitea
scm: git

View File

@ -29,9 +29,10 @@
- name: install packages from the internet
become: true
ansible.builtin.apt:
deb: "{{ apt_packages_internet }}"
deb: "{{ item }}"
state: present
when: apt_packages_internet is defined
loop: "{{ apt_packages_internet }}"
- name: remove packages
become: true

View File

@ -1,6 +1,10 @@
---
- name: update apt cache and installed packages
- name: update apt cache
become: true
ansible.builtin.apt:
update_cache: true
- name: update installed packages
become: true
ansible.builtin.package:
upgrade: dist
update_cache: true

View File

@ -1,10 +1,5 @@
{{ file_header | default () }}
upstreams:
init:
# Configure startup behavior.
# accepted: blocking, failOnError, fast
# default: blocking
strategy: fast
groups:
default:
{% for item in blocky_dns_upstream %}
@ -13,6 +8,9 @@ upstreams:
strategy: parallel_best
timeout: 2s
# optional: If true, blocky will fail to start unless at least one upstream server per group is reachable. Default: false
startVerifyUpstream: true
# optional: Determines how blocky will create outgoing connections. This impacts both upstreams, and lists.
# accepted: dual, v4, v6
# default: dual
@ -49,18 +47,17 @@ blocking:
downloads:
# optional: timeout for list download (each url). Use large values for big lists or slow internet connections
# default: 5s
timeout: 60s
timeout: 5s
# optional: Maximum download attempts
# default: 3
attempts: 5
attempts: 3
# optional: Time between the download attempts
# default: 500ms
cooldown: 10s
cooldown: 500ms
# optional: Maximum number of lists to process in parallel.
# default: 4
concurrency: 16
# Configure startup behavior.
# accepted: blocking, failOnError, fast
concurrency: 4
# optional: if failOnError, application startup will fail if at least one list can't be downloaded/opened
# default: blocking
strategy: {{ blocky_blacklists_strategy | default ("blocking") }}
# Number of errors allowed in a list before it is considered invalid.
@ -122,7 +119,7 @@ caching:
prefetching: true
# prefetch track time window (in duration format)
# default: 120
prefetchExpires: 2h
prefetchExpires: 120
# name queries threshold for prefetch
# default: 5
prefetchThreshold: 5
@ -131,7 +128,7 @@ caching:
prefetchMaxItemsCount: 0
# Time how long negative results (NXDOMAIN response or empty result) are cached. A value of -1 will disable caching for negative results.
# Default: 30m
cacheTimeNegative: -1
cacheTimeNegative: 30m
# optional: configuration of client name resolution
clientLookup:

View File

@ -0,0 +1,11 @@
## mgrote.cv4pve
### Beschreibung
Installiert [cv4pve-autosnap](https://github.com/Corsinvest/cv4pve-autosnap).
Legt einen systemd-timer.
### getestet auf
- [x] ProxMox 7*
### Variablen + Defaults
- see [defaults](./defaults/main.yml)

View File

@ -3,7 +3,7 @@
cv4pve_cron_minute: "39"
cv4pve_cron_hour: "5"
# proxmox api-token and user
cv4pve_api_token: "supersecret"
cv4pve_api_token: "XXXXXXXXXXXXXXXXXXXXXX"
cv4pve_api_user: "root@pam!test2"
# which vm to snapshot
cv4pve_vmid: all
@ -12,7 +12,3 @@ cv4pve_keep_snapshots: 3
# under which user the script is run
cv4pve_user_group: cv4pve
cv4pve_user: cv4pve
# url
cv4pve_dl_link: https://github.com/Corsinvest/cv4pve-autosnap/releases/download/{{ cv4pve_version }}/cv4pve-autosnap-linux-x64.zip
cv4pve_version: "v1.14.8"
cv4pve_base_path: /usr/local/bin/cv4pve

View File

@ -1,42 +0,0 @@
---
- name: Ensure needed directories exist
ansible.builtin.file:
path: "{{ cv4pve_base_path }}"
state: directory
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
mode: "0644"
- name: Download specified version
ansible.builtin.unarchive:
src: "{{ cv4pve_dl_link }}"
dest: "{{ cv4pve_base_path }}"
mode: '0755'
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
remote_src: true
creates: "{{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }}"
list_files: true
register: download
- name: Rename binary # noqa no-changed-when no-handler
ansible.builtin.command: |
mv "{{ cv4pve_base_path }}/cv4pve-autosnap" "{{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }}"
when: download.changed
# https://stackoverflow.com/questions/20252057/using-ansible-how-would-i-delete-all-items-except-for-a-specified-set-in-a-dire
- name: Find old versions
ansible.builtin.find:
paths: "{{ cv4pve_base_path }}"
file_type: file
use_regex: false
excludes:
- "cv4pve-autosnap-{{ cv4pve_version }}"
register: found_files
- name: Ensure old versions are absent
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
with_items: "{{ found_files['files'] }}"
...

View File

@ -2,9 +2,74 @@
- name: include user tasks
ansible.builtin.include_tasks: user.yml
- name: include install tasks
ansible.builtin.include_tasks: install.yml
- name: include systemd tasks
ansible.builtin.include_tasks: systemd.yml
...
- name: create directories
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
mode: "0644"
loop:
- '/tmp/cv4pve'
- '/usr/local/bin/cv4pve'
- name: download archives
become: true
ansible.builtin.get_url:
url: "{{ cv4pve_dl_link }}"
dest: /tmp/cv4pve/cv4pve-autosnap-linux-x64.zip
mode: '0775'
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
- name: extract archives
become: true
ansible.builtin.unarchive:
src: /tmp/cv4pve/cv4pve-autosnap-linux-x64.zip
dest: /usr/local/bin/cv4pve
remote_src: true
mode: a+x
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
- name: template cv4pve.service
become: true
ansible.builtin.template:
src: cv4pve.service.j2
dest: /etc/systemd/system/cv4pve.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: template cv4pve_mail.service
become: true
ansible.builtin.template:
src: cv4pve_mail.service.j2
dest: /etc/systemd/system/cv4pve_mail.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: template cv4pve.timer
become: true
ansible.builtin.template:
src: cv4pve.timer.j2
dest: /etc/systemd/system/cv4pve.timer
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: systemctl start cv4pve.timer
become: true
ansible.builtin.systemd:
name: cv4pve.timer
state: started
enabled: true

View File

@ -1,38 +0,0 @@
---
- name: Ensure service-unit (cv4pve) is templated
ansible.builtin.template:
src: cv4pve.service.j2
dest: /etc/systemd/system/cv4pve.service
owner: root
group: root
mode: "0644"
no_log: true
notify:
- systemctl daemon-reload
- name: Ensure service-unit (mail) is templated
ansible.builtin.template:
src: cv4pve_mail.service.j2
dest: /etc/systemd/system/cv4pve_mail.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: Ensure service-unit (timer) is templated
ansible.builtin.template:
src: cv4pve.timer.j2
dest: /etc/systemd/system/cv4pve.timer
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: Ensure timer is started is templated
ansible.builtin.systemd:
name: cv4pve.timer
state: started
enabled: true
...

View File

@ -1,5 +1,5 @@
---
- name: Ensure group exists
- name: ensure group exists
become: true
ansible.builtin.group:
name: "{{ cv4pve_user_group }}"
@ -7,7 +7,7 @@
when:
- cv4pve_user_group is defined
- name: Ensure user exists
- name: ensure user exists
become: true
ansible.builtin.user:
name: "{{ cv4pve_user }}"
@ -17,4 +17,3 @@
when:
- cv4pve_user_group is defined
- cv4pve_user is defined
...

View File

@ -6,4 +6,4 @@ OnFailure=cv4pve_mail.service
[Service]
Type=simple
ExecStart={{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }} --host=127.0.0.1 --api-token {{ cv4pve_api_user }}={{ cv4pve_api_token }} --vmid="{{ cv4pve_vmid }}" snap --label='daily' --keep="{{ cv4pve_keep_snapshots }}" --state
ExecStart=/usr/local/bin/cv4pve/cv4pve-autosnap --host=127.0.0.1 --api-token {{ cv4pve_api_user }}={{ cv4pve_api_token }} --vmid="{{ cv4pve_vmid }}" snap --label='daily' --keep="{{ cv4pve_keep_snapshots }}" --state

View File

@ -6,5 +6,6 @@ Description=Timer: Trigger VM-Snapshots in PVE with cv4pve.
OnCalendar=*-*-* {{ cv4pve_cron_hour }}:{{ cv4pve_cron_minute }}:00
RandomizedDelaySec=10 min
[Install]
WantedBy=timers.target multi-user.target

View File

@ -1,4 +1,5 @@
{{ file_header | default () }}
[Unit]
Description=Send a Mail in case of an error in cv4pve.service.

View File

@ -1,16 +0,0 @@
---
- name: ensure etckeeper is installed
become: true
ansible.builtin.package:
name:
- etckeeper
- git
state: present
install_recommends: false
- name: ensure repository is initialized
ansible.builtin.command: etckeeper init
args:
chdir: /etc/
creates: /etc/.etckeeper
...

View File

@ -83,16 +83,16 @@ smb_packages:
## sorgt dafur das statt "A0KDC9~F" die Ordnernamen als "autosnap_2021-11-04_23÷59÷02_daily" angezeigt werden
## https://www.samba.org/samba/docs/current/man-html/vfs_catia.8.html
# aktiv?
smb_enable_snapshots_dir: false
smb_enable_snapshots_dir: true
# welche Character/zeichen-Ersetzungen soll catia ausführen
smb_catia_mappings: "0x3a:0xf7" # ersetzt ":" durch "÷"
# als Windows-Shattenkopien einbinden
## https://www.samba.org/samba/docs/current/man-html/vfs_shadow_copy2.8.html
## BUG: Windows sieht die Schattenkopien, kann die Ausgewählte Datei aber nicht öffnen wenn sie seit dem Snapshot gelöscht wurde, vmtl da Windows nicht den kompletten Snapshot-Pfad verwendet
## Format ist passend für sanoid-Snapshots
## Format ist passend fur sanoid-Snapshots
# aktiv?
smb_enable_snapshots_shadow: true
smb_enable_snapshots_shadow: false
# wo liegen die Snapshots
smb_shadow_snapdir: ".zfs/snapshot"
# Sortierung
@ -104,4 +104,4 @@ smb_shadow_snapprefix: "^autosnap"
# Snapshot-"Trenner"
smb_shadow_delimiter: "_"
# zeitformat Snapshots
smb_shadow_localtime: "yes"
smb_shadow_localtime: "no"

View File

@ -11,21 +11,20 @@
##======================= catia =======================
vfs objects = catia
catia: mappings = {{ smb_catia_mappings }}
{% elif smb_enable_snapshots_dir is sameas false and smb_enable_snapshots_shadow is sameas true %}
{% elif smb_enable_snapshots_shadow is sameas true and smb_enable_snapshots_dir is sameas false %}
##======================= shadow_copy2 =======================
vfs objects = shadow_copy2
vfs objects = {{ smb_shadow_vfs_objects }}
shadow: snapdir = {{ smb_shadow_snapdir }}
shadow: sort = {{ smb_shadow_sort }}
shadow: format = {{ smb_shadow_format }}
shadow: snapprefix = {{ smb_shadow_snapprefix }}
shadow: delimiter = {{ smb_shadow_delimiter }}
shadow: localtime = {{ smb_shadow_localtime }}
shadow: snapdirseverywhere = yes
{% elif smb_enable_snapshots_shadow is sameas true and smb_enable_snapshots_dir is sameas true %}
#======================= vfs objects =======================
vfs objects = shadow_copy2, catia
##======================= catia =======================
catia:mappings = {{ smb_catia_mappings }}
catia: mappings = {{ smb_catia_mappings }}
##======================= shadow_copy2 =======================
shadow: snapdir = {{ smb_shadow_snapdir }}
shadow: sort = {{ smb_shadow_sort }}
@ -33,7 +32,6 @@ shadow: format = {{ smb_shadow_format }}
shadow: snapprefix = {{ smb_shadow_snapprefix }}
shadow: delimiter = {{ smb_shadow_delimiter }}
shadow: localtime = {{ smb_shadow_localtime }}
shadow: snapdirseverywhere = yes
{% endif %}

View File

@ -1,34 +0,0 @@
---
# die Variablen kommen aus
# - https://docs.gitea.com/administration/command-line
# - https://github.com/lldap/lldap/blob/main/example_configs/gitea.md
# und
# den jeweiligen group/host-Vars!
- name: Check if Admin-User exists
no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin user list \
--config "{{ gitea_configuration_path }}/gitea.ini"
register: check
changed_when: false
- name: Ensure Admin-User exists
#no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin user create \
--config "{{ gitea_configuration_path }}/gitea.ini" \
--username "{{ gitea_admin_user }}" \
--password "{{ gitea_admin_user_pass }}" \
--email "{{ gitea_admin_user }}@mgrote.net" \
--admin
when: 'not "{{ gitea_admin_user }}@mgrote.net" in check.stdout'
- name: Show existing users
ansible.builtin.debug:
msg: "{{ check.stdout_lines }}"
...

View File

@ -1,56 +0,0 @@
---
# die Variablen kommen aus
# - https://docs.gitea.com/administration/command-line
# - https://github.com/lldap/lldap/blob/main/example_configs/gitea.md
# und
# den jeweiligen group/host-Vars!
- name: Ensure LDAP config is set up
no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin auth add-ldap \
--config "{{ gitea_configuration_path }}/gitea.ini" \
--name "lldap" \
--security-protocol "unencrypted" \
--host "{{ gitea_ldap_host }}" \
--port "3890" \
--bind-dn "uid={{ gitea_ldap_bind_user }},ou=people,{{ gitea_ldap_base_path }}" \
--bind-password "{{ gitea_ldap_bind_pass }}" \
--user-search-base "ou=people,{{ gitea_ldap_base_path }}" \
--user-filter "(&(memberof=cn=gitea,ou=groups,{{ gitea_ldap_base_path }})(|(uid=%[1]s)(mail=%[1]s)))" \
--username-attribute "uid" \
--email-attribute "mail" \
--firstname-attribute "givenName" \
--surname-attribute "sn" \
--avatar-attribute "jpegPhoto" \
--synchronize-users
register: setup
ignore_errors: true
failed_when: 'not "Command error: login source already exists [name: lldap]" in setup.stderr' # fail Task wenn LDAP schon konfiguriert ist
changed_when: "setup.rc == 0" # chnaged nur wenn Task rc 0 hat, sollte nur beim ersten lauf vorkommen; ungetestet
- name: Modify LDAP config
no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin auth update-ldap \
--config "{{ gitea_configuration_path }}/gitea.ini" \
--id "1" \
--security-protocol "unencrypted" \
--host "{{ gitea_ldap_host }}" \
--port "3890" \
--bind-dn "uid={{ gitea_ldap_bind_user }},ou=people,{{ gitea_ldap_base_path }}" \
--bind-password "{{ gitea_ldap_bind_pass }}" \
--user-search-base "ou=people,{{ gitea_ldap_base_path }}" \
--user-filter "(&(memberof=cn=gitea,ou=groups,{{ gitea_ldap_base_path }})(|(uid=%[1]s)(mail=%[1]s)))" \
--username-attribute "uid" \
--email-attribute "mail" \
--firstname-attribute "givenName" \
--surname-attribute "sn" \
--avatar-attribute "jpegPhoto" \
--synchronize-users
when: '"Command error: login source already exists [name: lldap]" in setup.stderr' # führe nur aus wenn erster Task fehlgeschlagen ist
changed_when: false # keine idee wie ich changed feststellen kann
...

View File

@ -1,7 +0,0 @@
---
- name: Include LDAP tasks
ansible.builtin.include_tasks: ldap.yml
- name: Include User tasks
ansible.builtin.include_tasks: admin.yml
...

View File

@ -1,21 +0,0 @@
---
lldap_package_url: "https://download.opensuse.org/repositories/home:/Masgalor:/LLDAP/xUbuntu_22.04/amd64/lldap_0.5.0-1+3.1_amd64.deb"
lldap_logging_verbose: "false"
lldap_http_port: "17170"
lldap_http_host: "0.0.0.0"
lldap_ldap_host: "0.0.0.0"
lldap_public_url: http://localhost
lldap_jwt_secret: supersecret
lldap_ldap_base_dn: "dc=example,dc=com"
lldap_admin_username: ladmin # only used on setup
lldap_admin_password: supersecret # also bind-secret; only used on setup
lldap_admin_mailaddress: lldap-admin@mgrote.net # only used on setup
lldap_database_url: "postgres://postgres-user:password@postgres-server/my-database"
lldap_key_seed: supersecretseed
lldap_smtp_from: "LLDAP Admin <info@mgrote.net>"
lldap_smtp_reply_to: "Do not reply <info@mgrote.net>"
lldap_smtp_server: "mail.domain.net"
lldap_smtp_port: "25"
lldap_smtp_smtp_encryption: "NONE"
lldap_smtp_user: "info@mgrote.net"
lldap_smtp_enable_password_reset: "true"

Some files were not shown because too many files have changed in this diff Show More