Compare commits

..

71 Commits
master ... w

Author SHA1 Message Date
Michael Grote 65dc8f43a1 d9 2024-01-25 21:00:05 +01:00
Michael Grote fe223d75e6 d8 2024-01-25 20:59:25 +01:00
Michael Grote 8bbb9a6a9c g 2024-01-25 20:57:36 +01:00
Michael Grote 4086efea9e d7 2024-01-25 20:55:45 +01:00
Michael Grote cb49bb51c8 d6 2024-01-25 20:46:41 +01:00
Michael Grote dabf332abd d5 2024-01-25 20:43:59 +01:00
Michael Grote 574f9c7338 clone d4 2024-01-25 20:43:16 +01:00
Michael Grote 2283b3efe7 d3 2024-01-25 20:41:06 +01:00
Michael Grote 44f006ee7c d2 2024-01-25 20:39:26 +01:00
Michael Grote cf6d9d53b1 debug 2024-01-25 20:37:28 +01:00
Michael Grote 9a4b2f8a0f f 2024-01-25 20:36:13 +01:00
Michael Grote add939d434 d 2024-01-25 20:34:35 +01:00
Michael Grote fd311282f0 test ci 2024-01-25 20:26:34 +01:00
Michael Grote d049b8db4c d 2024-01-25 20:16:29 +01:00
Michael Grote 956d0bc6ed g 2024-01-25 20:14:08 +01:00
Michael Grote 2a2c20478e d 2024-01-25 20:12:48 +01:00
Michael Grote 41e7e7371c d 2024-01-25 20:08:53 +01:00
Michael Grote 65a50a3033 d 2024-01-25 20:05:47 +01:00
Michael Grote 07894c2300 d 2024-01-25 19:56:15 +01:00
Michael Grote 2ebaed8551 g 2024-01-25 19:55:22 +01:00
Michael Grote 59d0e13f05 [actions]
ENABLED=true
2024-01-25 19:48:11 +01:00
Michael Grote 2e183480fe g 2024-01-25 19:47:31 +01:00
Michael Grote 0ad6b6ccf4 s 2024-01-25 19:44:50 +01:00
Michael Grote ea053bf386 inventory 2024-01-25 19:41:15 +01:00
Michael Grote bb25b39a05 f 2024-01-25 19:39:41 +01:00
Michael Grote d0de5a5a4b f 2024-01-25 19:39:41 +01:00
Michael Grote 78d47c0319 d 2024-01-25 19:39:41 +01:00
Michael Grote d173d12982 d 2024-01-25 19:39:41 +01:00
Michael Grote 86e53fe7a2 gg 2024-01-25 19:39:41 +01:00
Michael Grote 3a8899e9c0 f 2024-01-25 19:39:41 +01:00
Michael Grote 82758c84e8 f 2024-01-25 19:39:41 +01:00
Michael Grote 910f035529 f 2024-01-25 19:39:41 +01:00
Michael Grote 63d2abc3c7 d 2024-01-25 19:39:41 +01:00
Michael Grote d92af3101f f 2024-01-25 19:39:40 +01:00
Michael Grote 9f69e9e0bd deug 2024-01-25 19:39:40 +01:00
Michael Grote c18716a9eb f 2024-01-25 19:39:40 +01:00
Michael Grote 4ba62a4550 f 2024-01-25 19:39:40 +01:00
Michael Grote 49fddb443d g 2024-01-25 19:39:40 +01:00
Michael Grote ffbe5713b8 t 2024-01-25 19:39:40 +01:00
Michael Grote a651bfbdfc h 2024-01-25 19:39:40 +01:00
Michael Grote 5b541db6d3 f 2024-01-25 19:39:40 +01:00
Michael Grote 32103dc55c recipients_only 2024-01-25 19:39:40 +01:00
Michael Grote 0d9fc50863 g 2024-01-25 19:39:39 +01:00
Michael Grote 8f27c81b6c g 2024-01-25 19:39:39 +01:00
Michael Grote 0df26403ca runs_on: [ success, failure ] 2024-01-25 19:39:39 +01:00
Michael Grote 9ca53e02f6 g 2024-01-25 19:39:39 +01:00
Michael Grote 8e87c4cfee f 2024-01-25 19:39:39 +01:00
Michael Grote 8601b7a6b9 f 2024-01-25 19:39:39 +01:00
Michael Grote fd2229d998 s 2024-01-25 19:39:39 +01:00
Michael Grote d803a2982f g 2024-01-25 19:39:39 +01:00
Michael Grote 51b69a1500 f 2024-01-25 19:39:38 +01:00
Michael Grote 292f8f4edb g 2024-01-25 19:39:38 +01:00
Michael Grote 6187a3b3a7 e 2024-01-25 19:39:38 +01:00
Michael Grote 96d8958bf1 m 2024-01-25 19:39:38 +01:00
Michael Grote 138ce9ac17 f 2024-01-25 19:39:38 +01:00
Michael Grote 2f373d09fb body 2024-01-25 19:39:38 +01:00
Michael Grote a00e9fa892 g 2024-01-25 19:39:38 +01:00
Michael Grote 95b7e24488 g 2024-01-25 19:39:38 +01:00
Michael Grote 97107c62d3 g 2024-01-25 19:39:38 +01:00
Michael Grote 92930e20cb f 2024-01-25 19:39:38 +01:00
Michael Grote 1efc46ce05 t 2024-01-25 19:39:38 +01:00
Michael Grote 124713bb39 g 2024-01-25 19:39:37 +01:00
Michael Grote 96abd42788 g 2024-01-25 19:39:37 +01:00
Michael Grote 060449c82a t 2024-01-25 19:39:37 +01:00
Michael Grote 2adc529cd3 t 2024-01-25 19:39:37 +01:00
Michael Grote 3e6213458b t 2024-01-25 19:39:37 +01:00
Michael Grote bb2964e13a vars 2024-01-25 19:39:37 +01:00
Michael Grote 57b3ebe75e remove exlude paths 2024-01-25 19:39:37 +01:00
Michael Grote 5df65653d2 add mail 2024-01-25 19:39:37 +01:00
Michael Grote 0bc5ebea1a mail 2024-01-25 19:39:36 +01:00
Michael Grote e80c3a2f8d ci 2024-01-25 19:39:36 +01:00
112 changed files with 757 additions and 1548 deletions

View File

@ -6,20 +6,3 @@ skip_list:
- name[play]
- var-naming[no-role-prefix]
quiet: true
exclude_paths:
- .cache/ # implicit unless exclude_paths is defined in config
- collections/
- .gitlab-ci.yml
- friedhof/
- playbooks/on-off
- roles/ansible-role-pip
- roles/ansible-role-bootstrap
- roles/ansible_role_ctop
- roles/ansible-role-docker
- roles/ansible-role-helm
- roles/ansible-role-nfs
- roles/ansible-role-unattended-upgrades
- roles/ansible-manage-lvm
- roles/ansible-ufw
- roles/ansible_role_gitea
- roles/ansible-role-postgresql

31
.gitea/workflows/lint.yml Normal file
View File

@ -0,0 +1,31 @@
---
name: lint
on:
push
jobs:
clone:
steps:
- name: Check out repository
uses: actions/checkout@v3
gitleaks:
container:
image: zricethezav/gitleaks:latest
steps:
- run: ls -lah
- name: gitleaks
run: gitleaks detect --no-git --verbose --source $GITHUB_WORKSPACE
ansible-lint:
container:
image: quay.io/ansible/creator-ee
env:
VAULT-PASS: ${{ secrets.vault_pass }} # Repo-Secret
steps:
- run: ansible-lint --version
- run: echo $VAULT-PASS > ./vault-pass.yml # nach des Secret in Großschreibung
- run: ls -lah
- run: ansible-galaxy install -r ./requirements.yml
- run: ansible-lint --force-color --format pep8

24
.gitignore vendored
View File

@ -2,19 +2,17 @@
vault-pass.yml
id_ed25519
id_ed25519.pub
roles/ansible-role-pip
roles/ansible-role-k3s
roles/ansible-role-bootstrap
roles/ansible_role_ctop
roles/ansible-role-docker
roles/ansible-role-helm
roles/ansible-role-nfs
roles/ansible_role_gitea
roles/ansible-role-unattended-upgrades
roles/ansible-manage-lvm
roles/ansible-ufw
roles/geerlingguy-ansible-role-pip
roles/pyratlabs-ansible-role-k3s
roles/robertdebock-ansible-role-bootstrap
roles/gantsign-ansible-role-ctop
roles/geerlingguy-ansible-role-docker
roles/geerlingguy-ansible-role-helm
roles/geerlingguy-ansible-role-nfs
roles/hifis-net-ansible-role-unattended-upgrades
roles/mrlesmithjr-ansible-manage-lvm
roles/oefenweb-ansible-ufw
roles/pandemonium1986-ansible-role-k9s
roles/ansible_role_gitea
roles/pyratlabs-ansible-role-gitea
collections/
plugins/lookup/__pycache__/
roles/ansible-role-postgresql

View File

@ -1,18 +0,0 @@
---
depends_on:
- gitleaks
steps:
ansible-lint:
image: quay.io/ansible/creator-ee:v24.2.0
commands:
- ansible-lint --version
- echo $${VAULTPASS} > ./vault-pass.yml # nach des Secret in Großschreibung
- ansible-galaxy install -r requirements.yaml
- ansible-lint --force-color --format pep8
# https://woodpecker-ci.org/docs/usage/secrets#use-secrets-in-commands
secrets: [vaultpass]
when:
- event: [push, pull_request, cron]
evaluate: 'CI_COMMIT_AUTHOR_EMAIL != "renovate@mgrote.net"'
...

View File

@ -1,10 +0,0 @@
---
steps:
gitleaks:
image: zricethezav/gitleaks:v8.18.2
commands:
- gitleaks detect --no-git --verbose --source $CI_WORKSPACE
when:
- event: [push, pull_request, cron]
evaluate: 'CI_COMMIT_AUTHOR_EMAIL != "renovate@mgrote.net"'
...

View File

@ -0,0 +1,18 @@
version: '3'
services:
runner:
container_name: "gitea-act-runner"
image: gitea/act_runner:latest
restart: always
volumes:
- data:/data
- /var/run/docker.sock:/var/run/docker.sock
environment:
GITEA_INSTANCE_URL: "https://git.mgrote.net"
GITEA_RUNNER_REGISTRATION_TOKEN: {{ lookup('keepass', 'gitea_runner_token', 'password') }} # muss für jeden Runner neu generiert werden, wird nur einmal benötigt
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
data:

View File

@ -2,22 +2,19 @@ version: '3'
services:
httpd-registry:
container_name: "httpd-registry"
image: "registry.mgrote.net/httpd:latest"
image: httpd:bullseye
restart: always
volumes:
- uploads:/usr/local/apache2/htdocs/
- "{{ compose_dest_basedir }}/httpd/httpd.conf:/usr/local/apache2/conf/httpd.conf:ro"
ports:
- 3344:80
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
python-api-server:
container_name: httpd-api
image: "registry.mgrote.net/python-api-server:latest"
image: registry.mgrote.net/python-api-server:latest
restart: always
ports:
- "5040:5000"
@ -28,8 +25,10 @@ services:
# FLASK_APP: app # for debugging
MAX_CONTENT_LENGTH: 500
UPLOAD_DIRECTORY: /uploads
AUTH_TOKEN: "{{ lookup('keepass', 'httpd-api-server-token', 'password') }}"
AUTH_TOKEN: {{ lookup('keepass', 'httpd-api-server-token', 'password') }}
ENABLE_WEBSERVER: false
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
uploads:

View File

@ -1,33 +1,25 @@
version: '3.3'
services:
postfix:
image: "registry.mgrote.net/postfix:latest"
container_name: mail-relay
restart: always
ports:
- 1025:25
environment:
SMTP_SERVER: smtp.strato.de
SMTP_USERNAME: info@mgrote.net
SMTP_PASSWORD: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
SERVER_HOSTNAME: mgrote.net
# DEBUG: "yes" # as string not boolean
ALWAYS_ADD_MISSING_HEADERS: "no" # as string not boolean
# LOG_SUBJECT: "yes" # as string not boolean
INET_PROTOCOL: ipv4
SMTP_GENERIC_MAP: |
/nobody@lldap/ lldap@mgrote.net
/mg@pbs.localdomain/ pbs@mgrote.net
/root@pbs.localdomain/ pbs@mgrote.net
# rewrite FROM "nobody@lldap" to "lldap@mgrote.net"
# /.*/ würde alle absender adressen ersetzen
networks:
- mail-relay
healthcheck:
test: ["CMD", "sh", "-c", "echo 'EHLO localhost' | nc -w 1 127.0.0.1 25 | grep -q '220 '"]
interval: 30s
timeout: 10s
retries: 3
postfix:
image: registry.mgrote.net/postfix:master
container_name: mail-relay
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
ports:
- 1025:25
environment:
SMTP_SERVER: smtp.strato.de
SMTP_USERNAME: info@mgrote.net
SMTP_PASSWORD: {{ lookup('keepass', 'strato_smtp_password', 'password') }}
SERVER_HOSTNAME: mgrote.net
# DEBUG: "yes" # literal
ALWAYS_ADD_MISSING_HEADERS: "no" # literal
# LOG_SUBJECT: "yes" # literal
INET_PROTOCOL: ipv4
SMTP_GENERIC_MAP: "/.*/ info@mgrote.net"
networks:
- mail-relay
######## Networks ########
networks:

View File

@ -3,16 +3,16 @@ services:
######## Miniflux ########
miniflux:
container_name: "mf-frontend"
image: "ghcr.io/miniflux/miniflux:2.1.3"
image: miniflux/miniflux:latest
restart: always
depends_on:
- mf-db16
- db
environment:
DATABASE_URL: "postgres://miniflux:{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}@mf-db16/miniflux?sslmode=disable"
DATABASE_URL: postgres://miniflux:{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}@mf-db/miniflux?sslmode=disable
RUN_MIGRATIONS: 1
# CREATE_ADMIN: 1
# ADMIN_USERNAME: adminmf
# ADMIN_PASSWORD: "{{ lookup('keepass', 'miniflux_admin_password', 'password') }}"
# ADMIN_PASSWORD: {{ lookup('keepass', 'miniflux_admin_password', 'password') }}
WORKER_POOL_SIZE: 10
POLLING_FREQUENCY: 10
CLEANUP_ARCHIVE_UNREAD_DAYS: -1
@ -21,8 +21,6 @@ services:
networks:
- intern
- traefik
healthcheck:
test: ["CMD", "/usr/bin/miniflux", "-healthcheck", "auto"]
labels:
traefik.http.routers.miniflux.rule: Host(`miniflux.mgrote.net`)
traefik.enable: true
@ -31,24 +29,25 @@ services:
traefik.http.routers.miniflux.entrypoints: entry_https
traefik.http.services.miniflux.loadbalancer.server.port: 8080
######## Postgres ########
mf-db16:
container_name: "mf-db16"
image: "postgres:16.3"
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: mf-db
######## PostGreSQL ########
db:
container_name: "mf-db"
image: postgres:13
restart: always
environment:
POSTGRES_USER: miniflux
POSTGRES_PASSWORD: "{{ lookup('keepass', 'miniflux_postgres_password', 'password') }}"
POSTGRES_PASSWORD: {{ lookup('keepass', 'miniflux_postgres_password', 'password') }}
TZ: Europe/Berlin
POSTGRES_HOST_AUTH_METHOD: "md5" # Workaround beim Migration von 13 -> 16; https://eelkevdbos.medium.com/upgrade-postgresql-with-docker-compose-99d995e464 ;
volumes:
- db16:/var/lib/postgresql/data
- db:/var/lib/postgresql/data
networks:
- intern
healthcheck:
test: ["CMD", "pg_isready", "-U", "miniflux"]
interval: 10s
start_period: 30s
labels:
com.centurylinklabs.watchtower.enable: true
######## Miniflux-Filter ########
mf-filter:
@ -58,19 +57,22 @@ services:
restart: always
environment:
TZ: Europe/Berlin
MF_AUTH_TOKEN: "{{ lookup('keepass', 'miniflux_auth_token', 'password') }}"
MF_AUTH_TOKEN: {{ lookup('keepass', 'miniflux_auth_token', 'password') }}
MF_API_URL: https://miniflux.mgrote.net/v1
MF_SLEEP: 600
#MF_DEBUG: 1
image: "registry.mgrote.net/miniflux-filter:latest"
image: registry.mgrote.net/miniflux-filter:latest
volumes:
- ./filter.txt:/data/filter.txt
networks:
- intern
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: mf-frontend
######## Volumes ########
volumes:
db16:
db:
######## Networks ########
networks:
traefik:

View File

@ -1,8 +1,6 @@
9to5linux.com::9to5Linux Weekly Roundup:
apnic.net::Podcast
apnic.net::Event Wrap
astralcodexten.substack.com::Open Thread
astralcodexten.substack.com::Book Review Contest
augengeradeaus.net::Sicherheitshalber der Podcast
axios.com::Axios on HBO
axios.com::football
@ -23,8 +21,6 @@ computerbase.de::Twitter
computerbase.de::wettbewerb
computerbase.de::WM
computerbase.de::Wochenrück- und Ausblick:
computerbase.de::Xbox Game Pass
computerbase.de::GeForce
facebook.com::Bridge returned error
golem.de::Anzeige
golem.de::Aus dem Verlag:
@ -34,7 +30,6 @@ golem.de::Fussball
golem.de::Fußball
golem.de::(g+)
golem.de::Golem Karrierewelt
www.thedrive.com::Bunker Talk:
golem.de::in aller Kürze
golem.de::In eigener Sache
golem.de::kurznews
@ -108,8 +103,6 @@ heise.de::Zugriff auf alle Inhalte von heise+
instagram.com::Bridge returned error
ipspace.net::Built.fm
ipspace.net::Podcast
mdr.de::Schwimm-WM
mdr.de::DSV
mdr.de::Basketball
mdr.de::Volleyball
mdr.de::DFB
@ -173,7 +166,6 @@ reddit.com::UEFA
stackoverflow.blog::Podcast
stackoverflow.blog::The Overflow
tagesschau.de::11KM
tagesschau.de::11KM-Podcast
tagesschau.de::Achtelfinale
tagesschau.de::Alpine-Super-Kombination:
tagesschau.de::American Football:
@ -282,8 +274,3 @@ theguardian.com::Guardiola
theguardian.com::Manchester United
theycantalk.com::Tinyview
toonhole.com::Bernai
www.army-technology.com::who are the leaders
www.army-technology.com::files patent
www.army-technology.com::sees highest patent filings
www.army-technology.com::theme innovation strategy
www.army-technology.com::gets grant

View File

@ -1,43 +0,0 @@
version: '3'
services:
munin:
container_name: "munin-master"
image: registry.mgrote.net/munin-server:latest
restart: always
environment:
MAILCONTACT: michael.grote@posteo.de
MAILSERVER: mail-relay
MAILPORT: 25
MAILFROM: munin@mgrote.net
MAILUSER: munin@mgrote.net
MAILNAME: Munin
MAILDOMAIN: mgrote.net
TZ: Europe/Berlin
CRONDELAY: 5
NODES: |
fileserver3.mgrote.net:fileserver3.mgrote.net
ansible2.mgrote.net:ansible2.mgrote.net
pve5.mgrote.net:pve5.mgrote.net
forgejo.mgrote.net:forgejo.mgrote.net
docker10.mgrote.net:docker10.mgrote.net
pbs.mgrote.net:pbs.mgrote.net
blocky.mgrote.net:blocky.mgrote.net
ldap.mgrote.net:ldap.mgrote.net
# z.B.
# computer-test.mgrote.net.test:192.68.2.4
# computer.mgrote.net:computer.mgrote.net
volumes:
- db:/var/lib/munin
- logs:/var/log/munin
- cache:/var/cache/munin
ports:
- 1234:80
volumes:
db:
logs:
cache:
networks:
mail-relay:
external: true

View File

@ -3,7 +3,7 @@ services:
######## navidrome-mg ########
navidrome-mg:
container_name: "navidrome-mg"
image: "deluan/navidrome:0.52.5"
image: deluan/navidrome:0.51.0
restart: always
environment:
ND_LOGLEVEL: info
@ -35,6 +35,8 @@ services:
traefik.http.routers.navidrome-mg.tls.certresolver: resolver_letsencrypt
traefik.http.routers.navidrome-mg.entrypoints: entry_https
traefik.http.services.navidrome-mg.loadbalancer.server.port: 4533
com.centurylinklabs.watchtower.enable: true
ports:
- "4533:4533"

View File

@ -2,7 +2,7 @@ version: '3.3'
services:
######## Datenbank ########
nextcloud-db:
image: "mariadb:11.3.2"
image: mariadb:10
container_name: nextcloud-db
command: --transaction-isolation=READ-COMMITTED --log-bin=ROW --innodb_read_only_compressed=OFF
restart: unless-stopped
@ -11,24 +11,15 @@ services:
- /etc/timezone:/etc/timezone:ro
- db:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_root_password', 'password') }}"
MYSQL_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}"
MYSQL_ROOT_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_root_password', 'password') }}
MYSQL_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}
MYSQL_DATABASE: nextcloud
MYSQL_USER: nextcloud
MYSQL_INITDB_SKIP_TZINFO: 1
networks:
- intern
healthcheck:
interval: 30s
retries: 3
test:
[
"CMD",
"healthcheck.sh",
"--su-mysql",
"--connect"
]
timeout: 30s
labels:
com.centurylinklabs.watchtower.enable: true
# Error
## [ERROR] Incorrect definition of table mysql.column_stats: expected column 'histogram' at position 10 to have type longblob, found type varbinary(255).
@ -39,47 +30,47 @@ services:
######## Redis ########
nextcloud-redis:
image: "redis:7.2.4"
image: redis:7-alpine
container_name: nextcloud-redis
hostname: nextcloud-redis
networks:
- intern
restart: unless-stopped
command: "redis-server --requirepass {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}"
healthcheck:
test: ["CMD", "redis-cli", "--pass", "{{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}", "--no-auth-warning", "ping"]
interval: 5s
timeout: 2s
retries: 3
command: redis-server --requirepass {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}
labels:
com.centurylinklabs.watchtower.enable: true
######## cron ########
nextcloud-cron:
container_name: nextcloud-cron
image: "registry.mgrote.net/nextcloud-cronjob:latest"
image: registry.mgrote.net/nextcloud-cronjob:master
restart: unless-stopped
network_mode: none
depends_on:
- nextcloud-app
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /etc/localtime:/etc/localtime:ro
environment:
NEXTCLOUD_CONTAINER_NAME: nextcloud-app
NEXTCLOUD_CRON_MINUTE_INTERVAL: 1
labels:
com.centurylinklabs.watchtower.enable: true
######## Nextcloud ########
nextcloud-app:
image: "nextcloud:29.0.0"
image: nextcloud:27
container_name: nextcloud-app
restart: unless-stopped
depends_on:
- nextcloud-db
- nextcloud-redis
- nextcloud-cron
environment:
REDIS_HOST: nextcloud-redis
REDIS_HOST_PASSWORD: "{{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}"
REDIS_HOST_PASSWORD: {{ lookup('keepass', 'nextcloud_redis_host_password', 'password') }}
MYSQL_DATABASE: nextcloud
MYSQL_USER: nextcloud
MYSQL_PASSWORD: "{{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}"
MYSQL_PASSWORD: {{ lookup('keepass', 'nextcloud_mysql_password', 'password') }}
MYSQL_HOST: nextcloud-db
NEXTCLOUD_TRUSTED_DOMAINS: "nextcloud.mgrote.net"
SMTP_HOST: mail-relay
@ -87,15 +78,12 @@ services:
SMTP_PORT: 25
#SMTP_AUTHTYPE: LOGIN
SMTP_NAME: info@mgrote.net
#SMTP_PASSWORD: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
#SMTP_PASSWORD: {{ lookup('keepass', 'strato_smtp_password', 'password') }}
MAIL_FROM_ADDRESS: info@mgrote.net
PHP_MEMORY_LIMIT: 1024M
PHP_UPLOAD_LIMIT: 10G
APACHE_DISABLE_REWRITE_IP: 1
TRUSTED_PROXIES: "192.168.48.0/24" # Subnetz in dem sich traefik befindet
NEXTCLOUD_UPLOAD_LIMIT: 10G
NEXTCLOUD_MAX_TIME: 3600
APACHE_BODY_LIMIT: 0 # unlimited, https://github.com/nextcloud/docker/issues/1796
volumes:
- app:/var/www/html
- data:/var/www/html/data
@ -103,12 +91,10 @@ services:
- intern
- traefik
- mail-relay
healthcheck:
test: ["CMD", "curl", "-f", "--insecure", "http://localhost:80"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: nextcloud-redis,nextcloud-db
traefik.http.routers.nextcloud.rule: Host(`nextcloud.mgrote.net`)
traefik.enable: true
traefik.http.routers.nextcloud.tls: true

View File

@ -0,0 +1,88 @@
version: '3.5'
# ------------------------------------------------------------------
# DOCKER COMPOSE COMMAND REFERENCE
# ------------------------------------------------------------------
# Start | docker-compose up -d
# Stop | docker-compose stop
# Update | docker-compose pull
# Logs | docker-compose logs --tail=25 -f
# Terminal | docker-compose exec photoprism bash
# Help | docker-compose exec photoprism photoprism help
# Config | docker-compose exec photoprism photoprism config
# Reset | docker-compose exec photoprism photoprism reset
# Backup | docker-compose exec photoprism photoprism backup -a -i
# Restore | docker-compose exec photoprism photoprism restore -a -i
# Index | docker-compose exec photoprism photoprism index
# Reindex | docker-compose exec photoprism photoprism index -a
# Import | docker-compose exec photoprism photoprism import
# -------------------------------------------------------------------
services:
photoprism:
# Use photoprism/photoprism:preview instead for testing preview builds:
image: photoprism/photoprism:latest
container_name: photoprism-frontend
restart: always
security_opt:
- seccomp:unconfined
- apparmor:unconfined
ports:
- 2342:2342
environment:
PHOTOPRISM_ADMIN_PASSWORD: "{{ lookup('keepass', 'photoprism_admin_password', 'password') }}"
PHOTOPRISM_HTTP_PORT: 2342
PHOTOPRISM_HTTP_COMPRESSION: "gzip" # none or gzip
PHOTOPRISM_DEBUG: "false"
PHOTOPRISM_PUBLIC: "false" # No authentication required (disables password protection)
PHOTOPRISM_READONLY: "true" # Don't modify originals directory (reduced functionality)
PHOTOPRISM_EXPERIMENTAL: "false"
PHOTOPRISM_DISABLE_WEBDAV: "true"
PHOTOPRISM_DISABLE_SETTINGS: "false"
PHOTOPRISM_DISABLE_TENSORFLOW: "false"
PHOTOPRISM_DARKTABLE_PRESETS: "false"
PHOTOPRISM_DETECT_NSFW: "true"
PHOTOPRISM_UPLOAD_NSFW: "true"
PHOTOPRISM_DATABASE_DRIVER: "mysql"
PHOTOPRISM_DATABASE_SERVER: "mariadb:3306"
PHOTOPRISM_DATABASE_NAME: "photoprism"
PHOTOPRISM_DATABASE_USER: "photoprism"
PHOTOPRISM_DATABASE_PASSWORD: "{{ lookup('keepass', 'photoprism_database_password', 'password') }}"
PHOTOPRISM_SITE_URL: "http://docker10.mgrote.net:2342/"
PHOTOPRISM_SITE_TITLE: "PhotoPrism"
PHOTOPRISM_SITE_CAPTION: "Browse Your Life"
PHOTOPRISM_SITE_DESCRIPTION: ""
PHOTOPRISM_SITE_AUTHOR: "mgrote"
# You may optionally set a user / group id using environment variables if your Docker version or NAS does not
# support this natively (see next example):
UID: 5000
GID: 5000
# UMASK: 0000
# Uncomment and edit the following line to set a specific user / group id (native):
user: "5000:5000"
volumes:
- /mnt/fileserver3_photoprism_bilder_ro:/photoprism/originals/:ro
- "storage:/photoprism/storage"
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: photoprism-db
mariadb:
image: mariadb:10
container_name: photoprism-db
restart: always
security_opt:
- seccomp:unconfined
- apparmor:unconfined
command: mysqld --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=50
volumes: # Don't remove permanent storage for index database files!
- "database:/var/lib/mysql"
environment:
MYSQL_ROOT_PASSWORD: {{ lookup('keepass', 'photoprism_mysql_root_password', 'password') }}
MYSQL_DATABASE: photoprism
MYSQL_USER: photoprism
MYSQL_PASSWORD: {{ lookup('keepass', 'photoprism_database_password', 'password') }}
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
storage:
database:

View File

@ -3,7 +3,7 @@ services:
oci-registry:
restart: always
container_name: oci-registry
image: "registry:2.8.3"
image: registry:2
volumes:
- oci:/var/lib/registry
- ./htpasswd:/auth/htpasswd
@ -11,17 +11,13 @@ services:
- traefik
- intern
depends_on:
- oci-registry-ui
- oci-registry-redis
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:5000/v2/"]
interval: 30s
timeout: 10s
retries: 3
environment:
TZ: Europe/Berlin
REGISTRY_AUTH: none
REGISTRY_REDIS_ADDR: oci-registry-redis:6379
REGISTRY_REDIS_PASSWORD: "{{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}"
REGISTRY_REDIS_PASSWORD: {{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}
REGISTRY_STORAGE_DELETE_ENABLED: true
REGISTRY_CATALOG_MAXENTRIES: 100000 # https://github.com/Joxit/docker-registry-ui/issues/306
# https://joxit.dev/docker-registry-ui/#using-cors
@ -38,10 +34,13 @@ services:
traefik.http.routers.registry.entrypoints: entry_https
traefik.http.services.registry.loadbalancer.server.port: 5000
traefik.http.routers.registry.middlewares: registry-ipallowlist
traefik.http.routers.registry.middlewares: registry-ipwhitelist
traefik.http.middlewares.registry-ipallowlist.ipallowlist.sourcerange: 192.168.2.0/24,10.25.25.0/24,192.168.48.0/24,172.18.0.0/16 # .48. ist Docker
traefik.http.middlewares.registry-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.registry-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24,10.25.25.0/24,192.168.48.0/24,172.18.0.0/16 # .48. ist Docker
traefik.http.middlewares.registry-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
com.centurylinklabs.watchtower.depends-on: oci-registry-redis
com.centurylinklabs.watchtower.enable: true
# registry aufräumen: docker exec -it oci-registry /bin/registry garbage-collect --delete-untagged=true /etc/docker/registry/config.yml
@ -52,24 +51,21 @@ services:
# docker pull registry.mgrote.net/myfirstimage
oci-registry-redis:
image: "redis:7.2.4"
image: redis:7
container_name: oci-registry-redis
networks:
- intern
restart: always
environment:
REDIS_PASSWORD: "{{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}"
REDIS_PASSWORD: {{ lookup('keepass', 'oci-registry-redis-pw', 'password') }}
MAXMEMORY POLICY: allkeys-lru
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
oci-registry-ui:
restart: always
# url: registry.mgrote.net/ui/index.html
image: "joxit/docker-registry-ui:2.5.7"
image: joxit/docker-registry-ui:latest
container_name: oci-registry-ui
environment:
DELETE_IMAGES: true
@ -78,19 +74,12 @@ services:
SHOW_CONTENT_DIGEST: true # https://github.com/Joxit/docker-registry-ui/issues/297
SHOW_CATALOG_NB_TAGS: true
PULL_URL: registry.mgrote.net
depends_on:
- oci-registry
networks:
- traefik
- intern
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://127.0.0.1"]
interval: 30s
timeout: 10s
retries: 3
labels:
traefik.http.routers.registry-ui.rule: Host(`registry.mgrote.net`)&&PathPrefix(`/ui`) # mache unter /ui erreichbar, damit wird demPfad dieser Prefix hinzugefügt, die Anwendung "hört" dort abrer nicht
traefik.http.routers.registry-ui.middlewares: registry-ui-strip-prefix,registry-ui-ipallowlist # also entferne den Prefix danach wieder
traefik.http.routers.registry-ui.middlewares: registry-ui-strip-prefix,registry-ui-ipwhitelist # also entferne den Prefix danach wieder
traefik.http.middlewares.registry-ui-strip-prefix.stripprefix.prefixes: /ui # hier ist die Middleware definiert
traefik.enable: true
traefik.http.routers.registry-ui.tls: true
@ -98,8 +87,13 @@ services:
traefik.http.routers.registry-ui.entrypoints: entry_https
traefik.http.services.registry-ui.loadbalancer.server.port: 80
traefik.http.middlewares.registry-ui-ipallowlist.ipallowlist.sourcerange: 192.168.2.0/24,10.25.25.0/24 # .48. ist Docker
traefik.http.middlewares.registry-ui-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.registry-ui-ipwhitelist.ipwhitelist.sourcerange: 192.168.2.0/24,10.25.25.0/24 # .48. ist Docker
traefik.http.middlewares.registry-ui-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
com.centurylinklabs.watchtower.depends-on: oci-registry-redis,oci-registry
com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:

View File

@ -3,7 +3,7 @@ services:
routeros-config-export:
container_name: routeros-config-export
restart: always
image: "registry.mgrote.net/routeros-config-export:latest"
image: registry.mgrote.net/routeros-config-export:master
volumes:
- ./key_rb5009:/key_rb5009:ro
- ./key_hex:/key_hex:ro
@ -15,7 +15,7 @@ services:
hex.mgrote.net,routeros-config-backup,/key_hex
crs305.mgrote.net,routeros-config-backup,/key_crs305
GIT_REPO_BRANCH: "master"
GIT_REPO_URL: "ssh://gitea@forgejo.mgrote.net:2222/mg/routeros-configs.git"
GIT_REPO_URL: "ssh://gitea@gitea.mgrote.net:2222/mg/routeros-configs.git"
GIT_REPO_DEPLOY_KEY: "/deploy_token"
GIT_USERNAME: oxidized-selfmade
GIT_USER_MAIL: michael.grote@posteo.de

View File

@ -0,0 +1,27 @@
version: '2.3'
services:
statping:
container_name: statping
image: adamboutcher/statping-ng:latest
restart: always
volumes:
- statping_data:/app
environment:
DB_CONN: sqlite
ALLOW_REPORT: false
ADMIN_USER: statadmin
ADMIN_PASSWORD: {{ lookup('keepass', 'statping_admin_password', 'password') }}
SAMPLE_DATA: false
ports:
- 8083:8080
networks:
- mail-relay
labels:
com.centurylinklabs.watchtower.enable: true
volumes:
statping_data:
networks:
mail-relay:
external: true

View File

@ -3,7 +3,7 @@ services:
######## traefik ########
traefik:
container_name: traefik
image: "traefik:v3.0.0"
image: traefik:latest
restart: always
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
@ -19,19 +19,16 @@ services:
- "2222:2222" # SSH
environment:
TZ: Europe/Berlin
healthcheck:
test: ["CMD", "traefik", "healthcheck", "--ping"]
interval: 30s
timeout: 10s
retries: 3
labels:
com.centurylinklabs.watchtower.enable: true
######## nforwardauth ########
nforwardauth:
restart: always
image: "nosduco/nforwardauth:v1.4.0"
image: nosduco/nforwardauth:v1
container_name: traefik-nforwardauth
environment:
TOKEN_SECRET: "{{ lookup('keepass', 'nforwardauth_token_secret', 'password') }}"
TOKEN_SECRET: {{ lookup('keepass', 'nforwardauth_token_secret', 'password') }}
AUTH_HOST: auth.mgrote.net
labels:
traefik.enable: true
@ -43,15 +40,13 @@ services:
traefik.http.routers.nforwardauth.tls: true
traefik.http.routers.nforwardauth.tls.certresolver: resolver_letsencrypt
traefik.http.routers.nforwardauth.entrypoints: entry_https
com.centurylinklabs.watchtower.depends-on: traefik
com.centurylinklabs.watchtower.enable: true
volumes:
- "./passwd:/passwd:ro" # Mount local passwd file at /passwd as read only
networks:
- traefik
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:3000/login"]
interval: 30s
timeout: 10s
retries: 3
######## Networks ########
networks:

View File

@ -14,4 +14,4 @@ http:
service_gitea:
loadBalancer:
servers:
- url: "http://forgejo.mgrote.net:3000/"
- url: "http://gitea.mgrote.net:3000/"

View File

@ -37,8 +37,6 @@ api:
insecure: true
dashboard: true # unter Port 8081 erreichbar
ping: {} # für healthcheck
#experimental:
# plugins:
# ldapAuth:

View File

@ -2,14 +2,14 @@
version: "2.1"
services:
unifi-network-application:
image: "lscr.io/linuxserver/unifi-network-application:8.0.28-ls27"
image: lscr.io/linuxserver/unifi-network-application:latest
container_name: unifi-network-application
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
MONGO_USER: unifiuser
MONGO_PASS: "{{ lookup('keepass', 'unifi-mongodb-pass', 'password') }}"
MONGO_PASS: {{ lookup('keepass', 'unifi-mongodb-pass', 'password') }}
MONGO_HOST: unifi-db
MONGO_PORT: 27017
MONGO_DBNAME: unifidb
@ -28,37 +28,28 @@ services:
- 6789:6789 #optional
- 5514:5514/udp #optional
restart: always
labels:
com.centurylinklabs.watchtower.enable: true
com.centurylinklabs.watchtower.depends-on: unifi-db
networks:
- mail-relay
- unifi-internal
healthcheck:
test: ["CMD", "curl", "-f", "--insecure", "https://localhost:8443"]
interval: 30s
timeout: 10s
retries: 3
depends_on:
- unifi-db
unifi-db:
# Starte Container OHNE init-script
# In Container
# 1. mongosh
# IN Container
# 1. mongo
# 2. db.getSiblingDB("unifidb").createUser({user: "unifiuser", pwd: "GEHEIM", roles: [{role: "dbOwner", db: "unifidb"}, {role: "dbOwner", db: "unifidb_stat"}]});
# https://discourse.linuxserver.io/t/cant-connect-to-mongodb-for-unifi-network-application/8166
image: "docker.io/mongo:7.0.9"
image: docker.io/mongo:4
container_name: unifi-db
volumes:
- db-data:/data/db
restart: always
environment:
MARIADB_AUTO_UPGRADE: "1"
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- unifi-internal
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.stats().ok"]
interval: 30s
timeout: 10s
retries: 3
######## Volumes ########
volumes:

View File

@ -0,0 +1,42 @@
version: "3"
services:
watchtower:
restart: always
container_name: watchtower
image: containrrr/watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
TZ: Europe/Berlin
WATCHTOWER_CLEANUP: true
WATCHTOWER_INCLUDE_RESTARTING: true
WATCHTOWER_INCLUDE_STOPPED: true
WATCHTOWER_REVIVE_STOPPED: false
WATCHTOWER_SCHEDULE: "0 20 3 * * *" # jeden Tag um 03:20
WATCHTOWER_LABEL_ENABLE: true
WATCHTOWER_NOTIFICATIONS: email
WATCHTOWER_NOTIFICATION_EMAIL_FROM: info@mgrote.net
WATCHTOWER_NOTIFICATION_EMAIL_TO: info@mgrote.net
WATCHTOWER_NOTIFICATION_EMAIL_SERVER: mail-relay # "container_name" des Relays
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PORT: 25 # nicht benötigt, nur als Referenz stehen gelassen
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER: "" # nicht benötigt, nur als Referenz stehen gelassen
# WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD: "" # nicht benötigt, nur als Referenz stehen gelassen
WATCHTOWER_NOTIFICATION_EMAIL_DELAY: 2
WATCHTOWER_NO_STARTUP_MESSAGE: true
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- mail-relay # binde externe Netzwerk an Container
# monitore diesen Container nur
# labels:
# com.centurylinklabs.watchtower.monitor-only: true
# dieser container hängt von x ab
# com.centurylinklabs.watchtower.depends-on: mf-db
# aktualisiere container
# com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:
mail-relay: # damit das mail-relaay im anderen Container erreicht werden kann
external: true

View File

@ -2,7 +2,7 @@ version: '3'
services:
wiki-webserver:
container_name: wiki-webserver
image: "registry.mgrote.net/httpd:latest"
image: httpd:2.4
restart: always
networks:
- traefik
@ -13,11 +13,6 @@ services:
# /docker/wiki/site ist ein lokales Verzeichnis auf docker10
# dieser Verzeichnis wird direkt in der wiki ci gemountet
# und die daten werden dort reingeschrieben
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
labels:
traefik.http.routers.wiki.rule: Host(`wiki.mgrote.net`)
traefik.enable: true
@ -28,6 +23,8 @@ services:
traefik.http.routers.wiki.middlewares: nforwardauth
com.centurylinklabs.watchtower.enable: true
######## Networks ########
networks:
traefik:

View File

@ -5,7 +5,7 @@ services:
woodpecker-server:
restart: always
container_name: woodpecker-server
image: "woodpeckerci/woodpecker-server:v2.4.1"
image: woodpeckerci/woodpecker-server:v2.2.2
ports:
- 8000:8000
volumes:
@ -16,9 +16,9 @@ services:
WOODPECKER_WEBHOOK_HOST: http://docker10.mgrote.net:8000
WOODPECKER_GITEA: true
WOODPECKER_GITEA_URL: https://git.mgrote.net
WOODPECKER_GITEA_CLIENT: "{{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}"
WOODPECKER_GITEA_SECRET: "{{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}"
WOODPECKER_AGENT_SECRET: "{{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}"
WOODPECKER_GITEA_CLIENT: {{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}
WOODPECKER_GITEA_SECRET: {{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}
WOODPECKER_AGENT_SECRET: {{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}
WOODPECKER_ADMIN: mg
WOODPECKER_LOG_LEVEL: info
WOODPECKER_DEBUG_PRETTY: true
@ -26,6 +26,8 @@ services:
- intern
- traefik
labels:
com.centurylinklabs.watchtower.enable: true
traefik.http.routers.woodpecker.rule: Host(`ci.mgrote.net`)
traefik.enable: true
traefik.http.routers.woodpecker.tls: true
@ -33,15 +35,15 @@ services:
traefik.http.routers.woodpecker.entrypoints: entry_https
traefik.http.services.woodpecker.loadbalancer.server.port: 8000
traefik.http.routers.woodpecker.middlewares: woodpecker-ipallowlist
traefik.http.routers.woodpecker.middlewares: woodpecker-ipwhitelist
traefik.http.middlewares.woodpecker-ipallowlist.ipallowlist.sourcerange: "192.168.2.0/24,10.25.25.0/24"
traefik.http.middlewares.woodpecker-ipallowlist.ipallowlist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipallowlist/#ipstrategydepth
traefik.http.middlewares.woodpecker-ipwhitelist.ipwhitelist.sourcerange: "192.168.2.0/24,10.25.25.0/24"
traefik.http.middlewares.woodpecker-ipwhitelist.ipwhitelist.ipstrategy.depth: 0 # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipstrategydepth
woodpecker-agent:
container_name: woodpecker-agent
image: "woodpeckerci/woodpecker-agent:v2.4.1"
image: woodpeckerci/woodpecker-agent:v2.2.2
command: agent
restart: always
depends_on:
@ -53,12 +55,14 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
environment:
WOODPECKER_SERVER: woodpecker-server:9000
WOODPECKER_AGENT_SECRET: "{{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}"
WOODPECKER_MAX_WORKFLOWS: 20
WOODPECKER_AGENT_SECRET: {{ lookup('keepass', 'woodpecker-agent-secret', 'password') }}
WOODPECKER_MAX_WORKFLOWS: 4
WOODPECKER_DEBUG_PRETTY: true
WOODPECKER_LOG_LEVEL: info
WOODPECKER_HEALTHCHECK: true
WOODPECKER_BACKEND: docker
labels:
com.centurylinklabs.watchtower.enable: true
networks:
- intern
@ -68,8 +72,8 @@ volumes:
agent-config:
# git.mgrote.net -> Settings -> Applications -> woodpecker
# WOODPECKER_GITEA_CLIENT: "{{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}"
# WOODPECKER_GITEA_SECRET: "{{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}"
# WOODPECKER_GITEA_CLIENT: {{ lookup('keepass', 'woodpecker-oauth2-client-id', 'password') }}
# WOODPECKER_GITEA_SECRET: {{ lookup('keepass', 'woodpecker-oauth2-client-secret', 'password') }}
# Redirect URL: https://ci.mgrote.net/authorize
######## Networks ########

View File

@ -42,7 +42,7 @@ services:
- com.centurylinklabs.watchtower.depends-on=lldap-db
######## DB ########
lldap-db:
image: mariadb:10.6.14
image: mariadb:10
container_name: lldap-db
restart: always
volumes:

View File

@ -2,7 +2,7 @@ version: '3'
services:
wiki-webserver:
container_name: wiki-webserver
image: httpd:2.4@sha256:ba846154ade27292d216cce2d21f1c7e589f3b66a4a643bff0cdd348efd17aa3
image: httpd:2.4
restart: always
networks:
- traefik

View File

@ -22,7 +22,7 @@ munin_plugin_dest_path: /etc/munin/plugins/
munin_plugin_conf_dest_path: /etc/munin/plugin-conf.d/
# munin_node_plugins: #plugins to install
# - name: docker_volumes # name
# src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_ #src
# src: https://git.mgrote.net/mg/mirror-munin-contrib/raw/branch/master/plugins/docker/docker_ #src
# config_file_name: /etc/munin/plugin-conf.d/docker # where to put plugin config
# content of config
# config: |

View File

@ -10,6 +10,6 @@
changed_when: "output_conf.rc != 0"
ignore_errors: true # ignoriere fehler
- name: munin-node-configure --shell - 2 # noqa ignore-errors no-changed-when
- name: munin-node-configure --shell - 2 # noqa ignore-errors
ansible.builtin.command: munin-node-configure --shell --families=contrib,auto | sh -x
ignore_errors: true # ignoriere fehler

View File

@ -0,0 +1,12 @@
## mgrote.munin-node
### Beschreibung
Installiert munin-node + Plugins.
### getestet auf
- [x] Ubuntu (>=18.04)
- [ ] Debian
- [x] ProxMox 6.1
### Variablen + Defaults
see [defaults](./defaults/main.yml)

View File

@ -13,7 +13,7 @@
state: directory
owner: root
group: root
mode: "0755"
mode: "0644"
loop:
- /etc/munin
- /etc/munin/plugin-conf.d
@ -25,5 +25,5 @@
dest: /etc/munin/munin-node.conf
owner: root
group: root
mode: "0755"
mode: "0644"
notify: restart munin-node

View File

@ -1,7 +1,7 @@
---
- name: remove unwanted plugins
ansible.builtin.file:
path: "{{ munin_plugin_dest_path }}{{ item }}"
path: "{{ munin_plugin_dest_path }}{{ item.name }}"
state: absent
loop: "{{ munin_node_disabled_plugins }}"
notify: restart munin-node
@ -10,7 +10,7 @@
- name: remove additional plugin-config
ansible.builtin.file:
state: absent
dest: "{{ munin_plugin_conf_dest_path }}{{ item }}"
dest: "{{ munin_plugin_conf_dest_path }}{{ item.name }}"
notify: restart munin-node
loop: "{{ munin_node_disabled_plugins }}"
when: munin_node_disabled_plugins is defined

View File

@ -20,6 +20,29 @@ dotfiles_vim_vundle_repo_url: https://git.mgrote.net/mirrors/Vundle.vim.git
### mgrote_netplan
netplan_configure: true
### mgrote_restic
restic_user: root
restic_group: restic
restic_conf_dir: /etc/restic
restic_exclude: |
._*
desktop.ini
.Trash-*
**/**cache***/**
**/**Cache***/**
**/**AppData***/**
# https://github.com/restic/restic/issues/1005
# https://forum.restic.net/t/exclude-syntax-confusion/1531/12
restic_mount_timeout: "10 min"
restic_failure_delay: "30 s"
restic_schedule: "0/6:00" # alle 6 Stunden
restic_folders_to_backup: "/" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
restic_repository: "//fileserver3.mgrote.net/restic"
restic_repository_password: "{{ lookup('keepass', 'restic_repository_password', 'password') }}"
restic_mount_user: restic
restic_mount_password: "{{ lookup('keepass', 'fileserver_smb_user_restic', 'password') }}"
restic_fail_mail: "{{ my_mail }}"
### mgrote_user
users:
- username: mg
@ -61,6 +84,11 @@ ntp_chrony_logging: false
postfix_smtp_server: docker10.mgrote.net
postfix_smtp_server_port: 1025
### mgrote_tmux
tmux_conf_destination: "/home/mg/.tmux.conf"
tmux_bashrc_destination: "/home/mg/.bashrc"
tmux_standardsession_name: "default"
### mgrote_fail2ban
f2b_bantime: 300
f2b_findtime: 300
@ -75,11 +103,6 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
ufw_default_incoming_policy: deny
ufw_default_outgoing_policy: allow
@ -114,8 +137,6 @@ apt_packages_common:
- pwgen
- keychain
- fwupd
- bc
- jq
apt_packages_physical:
- s-tui
- smartmontools
@ -128,6 +149,8 @@ apt_packages_absent:
- nano
- snapd
- ubuntu-advantage-tools
apt_packages_internet:
- http://docker10.mgrote.net:3344/bash-helper-scripts-mgrote-latest.deb
### mgrote_zfs_sanoid
sanoid_templates:
@ -178,44 +201,7 @@ sanoid_templates:
autoprune: 'yes'
### mgrote_zfs_sanoid
sanoid_deb_url: http://docker10.mgrote.net:3344/sanoid_v2.2.0.deb
### mgrote_munin_node
munin_node_bind_host: "0.0.0.0"
munin_node_bind_port: "4949"
munin_node_allowed_cidrs: [192.168.2.0/24]
munin_node_disabled_plugins:
- name: meminfo # zu hohe last
- name: hddtemp2 # ersetzt durch hddtemp_smartctl
- name: ntp # verursacht zu viele dns ptr request
- name: hddtempd # ersetzt durch hddtemp_smartctl
- name: squid_cache # proxmox
- name: squid_objectsize # proxmox
- name: squid_requests # proxmox
- name: squid_traffic # proxmox
- name: timesync
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
sanoid_deb_url: http://docker10.mgrote.net:3344/sanoid_3.0.4.deb
# Ansible Variablen
### User

View File

@ -9,28 +9,19 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 53
comment: 'dns'
from_ip: 0.0.0.0/0
### mgrote.apt_manage_packages
apt_packages_extra:
- libnet-dns-perl # für munin: dnsresponse_
### mgrote_user_setup
dotfiles_vim_vundle_repo_url: http://192.168.2.42:3000/mirrors/Vundle.vim.git
dotfiles_vim_vundle_repo_url: http://192.168.2.44:3000/mirrors/Vundle.vim.git
dotfiles:
- user: mg
home: /home/mg
- user: root
home: /root
dotfiles_repo_url: http://192.168.2.42:3000/mg/dotfiles
dotfiles_repo_url: http://192.168.2.44:3000/mg/dotfiles
### mgrote_blocky
blocky_version: v0.23
@ -59,14 +50,14 @@ blocky_custom_lookups: # optional
ip: 192.168.2.43
- name: ci.mgrote.net
ip: 192.168.2.43
- name: git.mgrote.net
ip: 192.168.2.43
- name: miniflux.mgrote.net
ip: 192.168.2.43
- name: nextcloud.mgrote.net
ip: 192.168.2.43
- name: registry.mgrote.net
ip: 192.168.2.43
- name: git.mgrote.net
ip: 192.168.2.43
# Intern
- name: ads2700w.mgrote.net
ip: 192.168.2.147
@ -80,46 +71,16 @@ blocky_custom_lookups: # optional
ip: 192.168.3.239
- name: pve5-test.mgrote.net
ip: 192.168.2.17
- name: pve5.mgrote.net # bleibt im Router auch angelegt, weil wenn pve aus auch kein blocky mehr ;-)
- name: pve5.mgrote.net # bleibt im Router auch angelegt, weil wenn pve aus auch kein blocky ;-)
ip: 192.168.2.16
- name: rb5009.mgrote.net
ip: 192.168.2.1
- name: fritz.box
ip: 192.168.5.1
- name: ldap.mgrote.net
ip: 192.168.2.47
### mgrote_munin_node
# kann git.mgrote.net nicht auflösen, deshalb hiermit IP
munin_node_plugins:
- name: chrony
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: http://192.168.2.42:3000/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: dnsresponse_192.168.2.1
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
- name: dnsresponse_192.168.2.37
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
- name: dnsresponse_127.0.0.1
src: http://192.168.2.42:3000/mirrors/munin-contrib/raw/branch/master/plugins/network/dns/dnsresponse_
config: |
[dnsresponse_*]
env.site www.heise.de
env.times 20
### mgrote_apt_manage_packages
apt_packages_internet:
- http://192.168.2.43:3344/bash-helper-scripts-mgrote-latest.deb
### mgrote_restic
restic_repository: "//192.168.2.54/restic"

View File

@ -15,14 +15,8 @@ lvm_groups:
manage_lvm: true
pvresize_to_max: true
### geerlingguy.pip
pip_package: python3-pip
pip_install_packages:
- name: docker # für munin-plugin docker_
### mgrote.apt_manage_packages
apt_packages_extra:
- libnet-dns-perl # für munin: dnsresponse_*
### mgrote_restic
restic_folders_to_backup: "/ /var/lib/docker" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
### mgrote_user
users:
@ -77,63 +71,3 @@ repos_override: # mit docker-repos
### mgrote_systemd_resolved
systemd_resolved_nameserver: 192.168.2.37
### mgrote_munin_node
munin_node_allowed_cidrs: [0.0.0.0/0] # weil der munin-server aus einem anderen subnet zugreift
munin_node_plugins:
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: lvm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/disk/lvm_
config: |
[lvm_*]
user root
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: docker_containers
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
config: |
[docker_*]
user root
env.DOCKER_HOST unix://run/docker.sock
- name: docker_cpu
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_memory
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_network
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_volumes
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_
- name: docker_volumesize
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/docker/docker_volumesize
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
from_ip: 192.168.0.0/16
comment: 'docker networks'
- rule: allow
from_ip: 172.0.0.0/8
comment: 'docker networks'

View File

@ -9,11 +9,6 @@ ufw_rules:
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 445
comment: 'smb'
@ -29,31 +24,3 @@ smb_min_protocol: "SMB2"
smb_client_min_protocol: "SMB2"
smb_client_max_protocol: "SMB3_11"
smb_enable_snapshots_dir: true
smb_enable_snapshots_shadow: true
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: samba
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/samba
config: |
[samba]
user root
group root
env.smbstatus /usr/bin/smbstatus
env.ignoreipcshare 1

View File

@ -1,154 +0,0 @@
---
### mrlesmithjr.ansible-manage-lvm
lvm_groups:
- vgname: vg_data
disks:
- /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
create: true
lvnames:
- lvname: lv_data
size: +100%FREE
create: true
filesystem: xfs
mount: true
mntp: /var/lib/gitea
manage_lvm: true
pvresize_to_max: true
### mgrote_apt_manage_packages
apt_packages_extra:
- fail2ban
### geerlingguy_postgres
postgresql_databases:
- name: "{{ gitea_db_name }}"
postgresql_users:
- name: "{{ gitea_db_user }}"
password: "{{ gitea_db_password }}"
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: "{{ gitea_http_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_ssh_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
### ansible_role_gitea
# https://git.mgrote.net/ansible-roles-mirrors/ansible_role_gitea
gitea_fork: "forgejo"
# gitea update
gitea_version: "1.21.7-0" # alt zum renovate testen
gitea_version_check: true
gitea_backup_on_upgrade: false
# gitea in the linux world
gitea_group: "gitea"
gitea_user: "gitea"
gitea_home: "/var/lib/gitea"
gitea_user_home: "{{ gitea_home }}"
# config liegt in /etc/gitea/gitea.ini
gitea_configuration_path: "/etc/gitea" # anpassen
gitea_app_name: "forgejo"
gitea_fqdn: "git.mgrote.net"
# ssh
gitea_ssh_port: 2222
gitea_start_ssh: true
gitea_shell: "/bin/false"
# Repository
gitea_default_branch: "master"
gitea_default_private: "public"
gitea_repository_root: "{{ gitea_home }}/repos"
# ui
gitea_show_user_email: false
# server
gitea_protocol: "http"
gitea_http_domain: "{{ gitea_fqdn }}"
gitea_http_port: "3000"
gitea_http_listen: "0.0.0.0"
gitea_root_url: "https://git.mgrote.net"
gitea_landing_page: "login"
# database
gitea_db_type: "postgres"
gitea_db_host: "localhost"
gitea_db_name: "gitea"
gitea_db_user: "gitea"
gitea_db_password: "{{ lookup('keepass', 'forgejo_db_password', 'password') }}"
# indexer
gitea_repo_indexer_enabled: true
# security
gitea_disable_webhooks: false
gitea_password_check_pwn: false
gitea_internal_token: "{{ lookup('keepass', 'forgejo_internal_token', 'password') }}"
gitea_secret_key: "{{ lookup('keepass', 'forgejo_secret_key', 'password') }}"
# service
gitea_disable_registration: true
gitea_register_email_confirm: true
gitea_require_signin: false
gitea_default_keep_mail_private: true
gitea_enable_captcha: false
gitea_show_registration_button: false
gitea_enable_notify_mail: true
gitea_default_user_visibility: "public"
gitea_show_milestones_dashboard_page: false
gitea_default_allow_create_organization: true
gitea_default_org_visibility: "public"
gitea_default_user_is_restricted: false
# Mailer
gitea_mailer_enabled: true
gitea_mailer_protocol: "smtp"
gitea_mailer_smtp_addr: "docker10.mgrote.net"
gitea_mailer_smtp_port: 1025
gitea_mailer_from: "gitea@mgrote.net"
gitea_subject_prefix: "git.mgrote.net - "
# log
gitea_log_systemd: true
gitea_log_level: "Info"
# Metrics
gitea_metrics_enabled: false
# Federation
gitea_federation_enabled: false
# Packages
gitea_packages_enabled: false
# actions
gitea_actions_enabled: false
gitea_extra_config: |
; webhook: wird für drone benötigt, sonst wird der Webhook nicht "gesendet"
[webhook]
ALLOWED_HOST_LIST = *.mgrote.net
; für Import/Migration aus anderen Git-Systemen
[migrations]
ALLOWED_DOMAINS = *
; disabled; see: https://github.com/go-gitea/gitea/issues/25992
[repo-archive]
ENABLED = false
# oauth2
gitea_oauth2_jwt_secret: "{{ lookup('keepass', 'forgejo_oauth2_jwt_secret', 'password') }}"
# Fail2Ban configuration
gitea_fail2ban_enabled: true
gitea_fail2ban_jail_maxretry: "3"
gitea_fail2ban_jail_findtime: "300"
gitea_fail2ban_jail_bantime: "600"
gitea_fail2ban_jail_action: "iptables-allports"
### mgrote_gitea_setup
gitea_ldap_host: "ldap.mgrote.net"
gitea_ldap_base_path: "dc=mgrote,dc=net"
gitea_ldap_bind_user: "forgejo_bind_user"
gitea_ldap_bind_pass: "{{ lookup('keepass', 'lldap_forgejo_bind_user', 'password') }}"
gitea_admin_user: "fadmin"
gitea_admin_user_pass: "{{ lookup('keepass', 'forgejo_admin_user_pass', 'password') }}"

102
group_vars/gitea.yml Normal file
View File

@ -0,0 +1,102 @@
---
### mrlesmithjr.ansible-manage-lvm
lvm_groups:
- vgname: vg_gitea_data
disks:
- /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1
create: true
lvnames:
- lvname: lv_gitea_data
size: +100%FREE
create: true
filesystem: xfs
mount: true
mntp: /var/lib/gitea
manage_lvm: true
pvresize_to_max: true
### mgrote_restic
restic_folders_to_backup: "/ /var/lib/gitea" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben; https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_http_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
- rule: allow
to_port: "{{ gitea_ssh_port }}"
protocol: tcp
comment: 'gitea'
from_ip: 0.0.0.0/0
### l3d.gitea
# config liegt in /etc/gitea/gitea.ini
gitea_version: "1.21.0"
gitea_app_name: "Gitea"
gitea_user: "gitea"
gitea_home: "/var/lib/gitea"
gitea_repository_root: "{{ gitea_home }}"
gitea_user_repo_limit: 300
gitea_root_url: https://git.mgrote.net
gitea_offline_mode: true
gitea_lfs_server_enabled: false
gitea_secret_key: "{{ lookup('keepass', 'gitea_secret_key', 'password') }}"
gitea_internal_token: "{{ lookup('keepass', 'gitea_internal_token', 'password') }}"
gitea_disable_git_hooks: false
gitea_show_user_email: false
gitea_disable_gravatar: true
gitea_enable_captcha: true
gitea_only_allow_external_registration: false
gitea_enable_notify_mail: true
gitea_force_private: false
gitea_oauth2_enabled: true
gitea_repo_indexer_enabled: true
gitea_mailer_enabled: true
gitea_mailer_skip_verify: false
gitea_mailer_tls_enabled: true
gitea_mailer_host: smtp.strato.de:465
gitea_mailer_from: info@mgrote.net
gitea_mailer_user: "info@mgrote.net"
gitea_mailer_password: "{{ lookup('keepass', 'strato_smtp_password', 'password') }}"
gitea_mailer_type: smtp
gitea_default_branch: 'master'
gitea_db_type: sqlite3
gitea_db_path: "{{ gitea_home }}/data/gitea.db" # for sqlite3
gitea_ssh_listen: 0.0.0.0
gitea_ssh_domain: gitea.mgrote.net
gitea_ssh_port: 2222
gitea_start_ssh: true
gitea_http_domain: git.mgrote.net
gitea_http_listen: 0.0.0.0
gitea_http_port: 3000
gitea_disable_http_git: false
gitea_protocol: http
gitea_show_registration_button: false
gitea_require_signin: false
gitea_disable_registration: true
gitea_fail2ban_enabled: true
gitea_fail2ban_jail_maxretry: 3
gitea_fail2ban_jail_findtime: 300
gitea_fail2ban_jail_bantime: 600
# wird für drone benötigt, sonst wird der Webhook nicht "gesendet"
gitea_extra_config: |
[webhook]
ALLOWED_HOST_LIST = *.mgrote.net
[actions]
ENABLED=true
gitea_backup_on_upgrade: false
gitea_backup_location: "{{ gitea_home }}/backups/"

View File

@ -1,58 +0,0 @@
---
### geerlingguy_postgres
postgresql_databases:
- name: "{{ lldap_db_name }}"
postgresql_users:
- name: "{{ lldap_db_user }}"
password: "{{ lldap_db_pass }}"
### oefenweb.ufw
ufw_rules:
- rule: allow
to_port: 22
protocol: tcp
comment: 'ssh'
from_ip: 0.0.0.0/0
- rule: allow
to_port: 4949
protocol: tcp
comment: 'munin'
from_ip: 192.168.2.0/24
- rule: allow
to_port: "{{ lldap_http_port }}"
protocol: tcp
comment: 'lldap'
from_ip: 192.168.2.0/24
- rule: allow
to_port: 3890
protocol: tcp
comment: 'lldap'
from_ip: 192.168.2.0/24
### mgrote_lldap
lldap_package_url: "https://download.opensuse.org/repositories/home:/Masgalor:/LLDAP/xUbuntu_22.04/amd64/lldap_0.5.0-1+3.1_amd64.deb"
lldap_logging_verbose: "true" # must be a string not a boolean
lldap_http_port: 17170
lldap_http_host: "0.0.0.0"
lldap_ldap_host: "0.0.0.0"
lldap_public_url: http://ldap.mgrote.net:17170
lldap_jwt_secret: "{{ lookup('keepass', 'lldap_jwt_secret', 'password') }}"
lldap_ldap_base_dn: "dc=mgrote,dc=net"
lldap_admin_username: ladmin # only used on setup
lldap_admin_password: "{{ lookup('keepass', 'lldap_ldap_user_pass', 'password') }}" # only used on setup; also bind-secret
lldap_admin_mailaddress: lldap-admin@mgrote.net # only used on setup
lldap_database_url: "postgres://{{ lldap_db_user }}:{{ lldap_db_pass }}@{{ lldap_db_host }}/{{ lldap_db_name }}"
lldap_key_seed: "{{ lookup('keepass', 'lldap_key_seed', 'password') }}"
#lldap_smtp_from: "lldap@mgrote.net" # unused in role
lldap_smtp_reply_to: "Do not reply <info@mgrote.net>"
lldap_smtp_server: "docker10.mgrote.net"
lldap_smtp_port: "1025"
lldap_smtp_smtp_encryption: "NONE"
#lldap_smtp_user: "info@mgrote.net" # unused in role
lldap_smtp_enable_password_reset: "true" # must be a string not a boolean
# "meta vars"; daraus werden die db-url und die postgres-db abgeleitet
lldap_db_name: "lldap"
lldap_db_user: "lldap"
lldap_db_pass: "{{ lookup('keepass', 'lldap_db_pass', 'password') }}"
lldap_db_host: "localhost"
...

View File

@ -5,6 +5,9 @@ netplan_configure: false
### mgrote_postfix
postfix_erlaubte_netzwerke: "127.0.0.0/8 192.168.2.0/24 192.168.3.0/24"
### mgrote_restic
restic_folders_to_backup: "/ /etc/proxmox-backup"
### mgrote_user
users:
- username: root
@ -30,55 +33,3 @@ users:
public_ssh_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJcBwOjanQV6sFWaTetqpl20SVe3aRzGjKbsp7hKkDCE mg@irantu
allow_sudo: true
allow_passwordless_sudo: true
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: zfs_arcstats
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_arcstats
- name: zfsonlinux_stats_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfsonlinux_stats_
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zfs_list
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_list
config: |
[zfs_list]
env.ignore_datasets_pattern autodaily
- name: zfs_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_pool_dataset_count
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zpool_capacity
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_capacity
munin_node_disabled_plugins:
- meminfo # zu hohe last
- hddtemp2 # ersetzt durch hddtemp_smartctl
- ntp # verursacht zu viele dns ptr request
- hddtempd # ersetzt durch hddtemp_smartctl
- squid_cache # proxmox
- squid_objectsize # proxmox
- squid_requests # proxmox
- squid_traffic # proxmox
- lvm_
- timesync
- lxc_guests
munin_node_allowed_cidrs:
- 192.168.3.0/24
- 192.168.2.0/24
...

View File

@ -2,6 +2,9 @@
### mgrote_netplan
netplan_configure: false
### mgrote_restic
restic_folders_to_backup: "/ /etc/pve"
### mgrote_user
users:
- username: root
@ -28,13 +31,6 @@ users:
allow_sudo: true
allow_passwordless_sudo: true
### mgrote_cv4pve_autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all,-115
cv4pve_keep_snapshots: 5
cv4pve_version: "v1.14.8"
### mgrote_apt_manage_packages
apt_packages_extra:
- ifupdown2
@ -43,73 +39,6 @@ apt_packages_extra:
- open-vm-tools
- systemd-boot
### mgrote_munin_node
munin_node_plugins:
- name: chrony
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/chrony/chrony
- name: systemd_status
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_status
- name: systemd_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/systemd/systemd_mem
config: |
[systemd_mem]
env.all_services true
- name: fail2ban
src: https://git.mgrote.net/mg/munin-plugins/raw/branch/master/extern/fail2ban
config: |
[fail2ban]
env.client /usr/bin/fail2ban-client
env.config_dir /etc/fail2ban
user root
- name: zfs_arcstats
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_arcstats
- name: zfsonlinux_stats_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfsonlinux_stats_
- name: zpool_iostat
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_iostat
- name: zfs_list
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_list
config: |
[zfs_list]
env.ignore_datasets_pattern autodaily
- name: zpool_capacity
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zpool_capacity
- name: kvm_mem
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_mem
- name: kvm_net
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_net
- name: kvm_io
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_io
config: |
[kvm_io]
user root
- name: kvm_cpu
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/libvirt/kvm_cpu
- name: proxmox_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/proxmox/proxmox_vm_count
config: |
[proxmox_count]
user root
group root
- name: zfs_count
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/zfs/zfs_pool_dataset_count
- name: ksm_
src: https://git.mgrote.net/mirrors/munin-contrib/raw/branch/master/plugins/system/kernel_same_page_merging
munin_node_disabled_plugins:
- meminfo # zu hohe last
- hddtemp2 # ersetzt durch hddtemp_smartctl
- ntp # verursacht zu viele dns ptr request
- hddtempd # ersetzt durch hddtemp_smartctl
- squid_cache # proxmox
- squid_objectsize # proxmox
- squid_requests # proxmox
- squid_traffic # proxmox
- lvm_
- slab
- timesync
- lxc_guests
# Ansible Variablen
### sudo
sudo: false
...

View File

@ -15,11 +15,11 @@ lvm_groups:
manage_lvm: true
pvresize_to_max: true
### mgrote_mount_cifs # löschen
### mgrote_mount_cifs
cifs_mounts:
- name: bilder
type: cifs
state: absent
state: present
dest: /mnt/fileserver3_photoprism_bilder_ro
src: //fileserver3.mgrote.net/bilder
user: photoprism
@ -29,6 +29,9 @@ cifs_mounts:
gid: 5000
extra_opts: ",ro" # komma am Anfang ist notwendig weil die Option hinten angehangen wird
### mgrote_restic
restic_folders_to_backup: "/ /var/lib/docker /mnt/oci-registry" # --one-file-system ist gesetzt, also werden weitere Dateisysteme nicht eingeschlossen, es sei denn sie werden hier explizit angegeben
### mgrote_docker-compose-inline
compose_owner: "docker-user"
compose_group: "docker-user"
@ -56,6 +59,8 @@ compose_files:
- name: navidrome
state: present
network: traefik
- name: watchtower
state: present
- name: routeros-config-export
state: present
- name: mail-relay
@ -64,10 +69,16 @@ compose_files:
- name: woodpecker
state: present
network: traefik
- name: photoprism
state: present
- name: wiki
state: present
network: traefik
- name: statping-ng
state: present
- name: gitea-act-runner
state: present
### oefenweb.ufw
ufw_rules:
- rule: allow

View File

@ -23,6 +23,7 @@ ytdl_video_urls:
- https://www.youtube.com/watch?v=TowKvEJcYDw&list=PLlQWnS27jXh9aEp7hl54xrk5CgiVbvMBy # arte - zu Tisch in...
- https://www.youtube.com/playlist?list=PLs4hTtftqnlAkiQNdWn6bbKUr-P1wuSm0 # jimmy kimmel mean tweets
- https://www.youtube.com/tomstantonengineering
- https://www.youtube.com/@liamcarps #englandvideos ironisch
ytdl_video_output: "/shares_videos/Youtube/%(uploader)s/%(title)s-%(id)s.%(ext)s" # Videos werden jetzt IMMER nach "Uploader/Name.ext" geschrieben
@ -55,6 +56,8 @@ smb_users:
password: "{{ lookup('keepass', 'fileserver_smb_user_pve', 'password') }}"
- name: 'brother_ads2700w'
password: "{{ lookup('keepass', 'fileserver_smb_user_brother_ads2700w', 'password') }}"
- name: 'photoprism'
password: "{{ lookup('keepass', 'fileserver_smb_user_photoprism', 'password') }}"
smb_shares:
- name: 'videos'
@ -87,7 +90,7 @@ smb_shares:
users_rw: 'kodi win10 michaelgrote'
- name: 'bilder'
path: '/shares_bilder'
users_ro: ''
users_ro: 'photoprism'
users_rw: ' michaelgrote win10'
- name: 'proxmox'
path: '/shares_pve_backup'
@ -96,7 +99,7 @@ smb_shares:
- name: 'restic'
path: '/shares_restic'
users_ro: ''
users_rw: 'restic win10 michaelgrote'
users_rw: ' restic win10 michaelgrote'
- name: 'buecher'
path: '/shares_buecher'
users_ro: ''

View File

@ -153,6 +153,13 @@ sanoid_datasets:
snapshots: true
template: '3tage'
### mgrote_cv4pve-autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all
cv4pve_keep_snapshots: 5
cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download/v1.10.0/cv4pve-autosnap-linux-x64.zip"
### mgrote_proxmox_bind_mounts
pve_bind_mounts:
- vmid: 100

View File

@ -170,6 +170,13 @@ sanoid_datasets:
snapshots: true
template: 'pve3tage'
### mgrote_cv4pve-autosnap
cv4pve_api_user: root@pam!cv4pve-autosnap
cv4pve_api_token: "{{ lookup('keepass', 'cv4pve_api_token', 'password') }}"
cv4pve_vmid: all,-115
cv4pve_keep_snapshots: 5
cv4pve_dl_link: "https://github.com/Corsinvest/cv4pve-autosnap/releases/download/v1.14.7/cv4pve-autosnap-linux-x64.zip"
### mgrote_proxmox_bind_mounts
pve_bind_mounts:
### fileserver3

View File

@ -6,9 +6,6 @@ all:
blocky:
hosts:
blocky.mgrote.net:
ldap:
hosts:
ldap.mgrote.net:
lxc:
hosts:
fileserver3.mgrote.net:
@ -35,20 +32,19 @@ all:
hosts:
pve5.mgrote.net:
pbs.mgrote.net:
git:
gitea:
hosts:
forgejo.mgrote.net:
gitea.mgrote.net:
production:
hosts:
fileserver3.mgrote.net:
ansible2.mgrote.net:
pve5.mgrote.net:
forgejo.mgrote.net:
gitea.mgrote.net:
docker10.mgrote.net:
pbs.mgrote.net:
blocky.mgrote.net:
ldap.mgrote.net:
test:
hosts:
vm-test-2204.mgrote.net:

Binary file not shown.

View File

@ -2,7 +2,7 @@
- hosts: all
gather_facts: false
roles:
- role: ansible-role-bootstrap
- role: robertdebock-ansible-role-bootstrap
tags: "bootstrap"
become: true
- role: mgrote_apt_manage_sources

View File

@ -3,4 +3,3 @@
- ansible.builtin.import_playbook: base/system.yml
- ansible.builtin.import_playbook: base/users.yml
- ansible.builtin.import_playbook: base/ufw.yml
- ansible.builtin.import_playbook: base/monitoring.yml

View File

@ -1,6 +1,4 @@
---
- hosts: ansible
roles:
- role: ansible-role-pip
tags: "pip"
become: true
- { role: geerlingguy-ansible-role-pip, tags: "pip", become: true }

View File

@ -1,7 +1,5 @@
---
- hosts: blocky
roles:
- role: mgrote_systemd_resolved
tags: "resolved"
- role: mgrote_blocky
tags: "blocky"
- { role: mgrote_systemd_resolved, tags: "resolved" }
- { role: mgrote_blocky, tags: "blocky" }

View File

@ -1,21 +1,10 @@
---
- hosts: docker
roles:
- role: mgrote_systemd_resolved
tags: "dns"
become: true
- role: ansible-role-pip
tags: "pip"
become: true
- role: ansible-role-docker
tags: "docker"
become: true
- role: ansible_role_ctop
tags: "ctop"
become: true
- role: mgrote_set_permissions
tags: "perm"
become: true
- role: mgrote_docker_compose_inline
tags: "compose"
become: true
- { role: mgrote_systemd_resolved, tags: "dns", become: true }
- { role: mgrote_mount_cifs, tags: "cifs", become: true }
- { role: geerlingguy-ansible-role-pip, tags: "pip", become: true }
- { role: geerlingguy-ansible-role-docker, tags: "docker", become: true }
- { role: gantsign-ansible-role-ctop, tags: "ctop", become: true }
- { role: mgrote_set_permissions, tags: "perm", become: true }
- { role: mgrote_docker_compose_inline, tags: "compose", become: true }

View File

@ -6,9 +6,6 @@
---
- hosts: fileserver
roles:
- role: mgrote_fileserver_smb
tags: "smb"
- role: mgrote_youtubedl
tags: "youtubedl"
- role: mgrote_disable_oom_killer
tags: "oom"
- { role: mgrote_fileserver_smb, tags: "fileserver_smb" }
- { role: mgrote_youtubedl, tags: "youtubedl" }
- { role: mgrote_disable_oom_killer, tags: "oom" }

View File

@ -1,12 +0,0 @@
---
- hosts: git
roles:
- role: ansible-role-postgresql
tags: "db"
become: true
- role: ansible_role_gitea
tags: "gitea"
become: true
- role: mgrote_gitea_setup
tags: "setup"
become: true

View File

@ -0,0 +1,4 @@
---
- hosts: gitea
roles:
- { role: pyratlabs-ansible-role-gitea, tags: "gitea", become: true }

View File

@ -1,11 +0,0 @@
---
- hosts: ldap
roles:
- role: ansible-role-postgresql
tags: "db"
become: true
- role: mgrote_lldap
tags:
- lldap
- ldap
become: true

View File

@ -1,21 +1,12 @@
---
- hosts: pbs
roles:
- role: mgrote_zfs_packages
tags: "zfs_packages"
- role: mgrote_zfs_arc_mem
tags: "zfs_arc_mem"
- role: mgrote_zfs_manage_datasets
tags: "datasets"
- role: mgrote_zfs_scrub
tags: "zfs_scrub"
- role: mgrote_zfs_zed
tags: "zfs_zed"
- role: mgrote_zfs_sanoid
tags: "sanoid"
- role: mgrote_smart
tags: "smart"
- role: mgrote_pbs_users
tags: "pbs_users"
- role: mgrote_pbs_datastores
tags: "pbs_datastores"
- { role: mgrote_zfs_packages, tags: "zfs_packages" }
- { role: mgrote_zfs_arc_mem, tags: "zfs_arc_mem" }
- { role: mgrote_zfs_manage_datasets, tags: "datasets" }
- { role: mgrote_zfs_scrub, tags: "zfs_scrub" }
- { role: mgrote_zfs_zed, tags: "zfs_zed" }
- { role: mgrote_zfs_sanoid, tags: "sanoid" }
- { role: mgrote_smart, tags: "smart" }
- { role: mgrote_pbs_users, tags: "pbs_users" }
- { role: mgrote_pbs_datastores, tags: "pbs_datastores" }

View File

@ -1,26 +1,14 @@
---
- hosts: pve
roles:
- role: mgrote_zfs_packages
tags: "zfs_packages"
- role: mgrote_zfs_arc_mem
tags: "zfs_arc_mem"
- role: mgrote_zfs_manage_datasets
tags: "datasets"
- role: mgrote_zfs_scrub
tags: "zfs_scrub"
- role: mgrote_zfs_zed
tags: "zfs_zed"
- role: mgrote_zfs_sanoid
tags: "sanoid"
- role: mgrote_smart
tags: "smart"
- role: mgrote_cv4pve_autosnap
tags: cv4pve
become: true
- role: mgrote_proxmox_bind_mounts
tags: "bindmounts"
- role: mgrote_proxmox_lxc_profiles
tags: "lxc-profile"
- role: mgrote_pbs_pve_integration
tags: "pbs"
- { role: mgrote_zfs_packages, tags: "zfs_packages" }
- { role: mgrote_zfs_arc_mem, tags: "zfs_arc_mem" }
- { role: mgrote_zfs_manage_datasets, tags: "datasets" }
- { role: mgrote_zfs_scrub, tags: "zfs_scrub" }
- { role: mgrote_zfs_zed, tags: "zfs_zed" }
- { role: mgrote_zfs_sanoid, tags: "sanoid" }
- { role: mgrote_smart, tags: "smart" }
- { role: mgrote_cv4pve_autosnap, tags: "cv4pve" }
- { role: mgrote_proxmox_bind_mounts, tags: "bindmounts" }
- { role: mgrote_proxmox_lxc_profiles, tags: "lxc-profile" }
- { role: mgrote_pbs_pve_integration, tags: "pbs" }

View File

@ -1,11 +0,0 @@
---
- hosts: all
roles:
- role: mgrote_munin_node
become: true
tags: "munin"
when: "not 'laptop' in group_names"
### Die Host müssen auch beim Docker-Container: "munin-master eingetragen" werden.
### wird nur auf physischen Rechnern ausgeführt.
### Wenn ein Plugin nicht geht: munin-node-configure --shell --families=contrib,auto | sh -x

View File

@ -5,12 +5,14 @@
tags: "apt_sources"
- role: mgrote_apt_manage_packages
tags: "install"
- role: mgrote_exa
tags: "exa"
- role: mgrote_remove_snapd
become: true
tags: "snapd"
- role: mgrote_apt_update_packages
tags: "updates"
- role: ansible-role-unattended-upgrades
- role: hifis-net-ansible-role-unattended-upgrades
become: true
tags: unattended
when: "ansible_facts['distribution'] == 'Ubuntu'"

View File

@ -3,21 +3,21 @@
roles:
- role: mgrote_ntp_chrony_client
tags: "ntp"
- role: mgrote_etckeeper
tags: "etckeeper"
- role: mgrote_postfix
tags: "postfix"
- role: mgrote_restic
tags: "restic"
- role: mgrote_fail2ban
tags: "f2b"
- role: mgrote_fwupd_settings
become: true
tags: fwupd
when: "ansible_facts['distribution'] == 'Ubuntu'"
- role: ansible-manage-lvm
- role: mrlesmithjr-ansible-manage-lvm
tags: "lvm"
become: true
when: manage_lvm == true and manage_lvm is defined
# $manage_lvm gehört zu dieser Rolle, wird aber extra abgefragt um das Playbook zu "aktivieren"
# $manage_lvm gehört zu dieser Rolle, wird aber extra abgefragt um das PLaybook zu "aktivieren"
- role: mgrote_ssh
tags: "ssh"
- role: mgrote_netplan

View File

@ -1,6 +1,6 @@
---
- hosts: all:!pve:!pbs
roles:
- role: ansible-ufw # Regeln werden in den Group/Host-Vars gesetzt
tags: ufw
become: true
- { role: oefenweb-ansible-ufw, # Regeln werden in den Group/Host-Vars gesetzt
tags: "ufw",
become: true}

View File

@ -2,9 +2,9 @@
- hosts: all
roles:
- role: mgrote_users
tags: users
tags: "user"
become: true
- role: mgrote_user_setup
tags:
- user_setup
- "user_setup"
- dotfiles

View File

@ -1,22 +0,0 @@
---
- hosts: all
become: yes
tasks:
- name: Ensure packages are absent
become: yes
ansible.builtin.apt:
autoremove: yes
autoclean: yes
purge: yes
name:
- munin-node
state: absent
- name: Ensure directories are absent
become: yes
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /usr/share/munin
- /etc/munin

View File

@ -1,78 +0,0 @@
---
- hosts: all
tasks:
- name: ensure group exists
become: true
ansible.builtin.group:
name: restic
state: absent
- name: install restic-packages
become: true
ansible.builtin.package:
name:
- restic
state: absent
- name: create "/etc/restic"
become: true
ansible.builtin.file:
path: "/etc/restic"
state: absent
- name: systemctl start restic.timer
become: true
ansible.builtin.systemd:
name: restic.timer
state: stopped
enabled: false
- name: systemctl enable units
become: true
ansible.builtin.systemd:
name: "{{ item }}"
enabled: false
masked: true
with_items:
- media-restic.automount
- media-restic.mount
- restic.service
- restic.timer
- restic_mail.service
- name: template restic.mount
become: true
ansible.builtin.file:
state: absent
path: /etc/systemd/system/media-restic.mount # media-restic == /media/restic
- name: template restic.automount
become: true
ansible.builtin.file:
path: /etc/systemd/system/media-restic.automount
state: absent
- name: template restic.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic.service
state: absent
- name: template restic.timer
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic.timer
state: absent
- name: template restic_mail.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/restic_mail.service
state: absent
- name: template restic_mail.service
become: true
ansible.builtin.file:
path: /etc/systemd/system/media-restic.automount
state: absent

View File

@ -1,5 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended"],
"ignorePaths": ["**/friedhof/**"]
}

View File

@ -1,30 +0,0 @@
collections:
- name: community.general
version: "8.6.0"
- name: community.crypto
version: "2.19.1"
- name: ansible.posix
version: "1.5.4"
- name: community.docker
version: "3.9.0"
roles:
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-bootstrap
version: "6.2.5"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-ufw
version: "v4.1.13"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-manage-lvm
version: "v0.2.11"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-unattended-upgrades
version: "v4.1.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-pip
version: "3.0.3"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-nfs
version: "2.0.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-docker
version: "7.1.0"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible_role_ctop
version: "1.1.6"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible_role_gitea
version: "v3.4.2"
- src: https://git.mgrote.net/ansible-role-mirrors/ansible-role-postgresql
version: "3.5.1"

30
requirements.yml Normal file
View File

@ -0,0 +1,30 @@
collections:
- git+https://git.mgrote.net/ansible-collections-mirrors/community.general
- git+https://git.mgrote.net/ansible-collections-mirrors/community.crypto
- git+https://git.mgrote.net/ansible-collections-mirrors/ansible.posix
- git+https://git.mgrote.net/ansible-collections-mirrors/community.docker
roles:
- src: https://git.mgrote.net/ansible-roles-mirrors/pyratlabs-ansible-role-k3s
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/robertdebock-ansible-role-bootstrap
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/pandemonium1986-ansible-role-k9s
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/oefenweb-ansible-ufw
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/mrlesmithjr-ansible-manage-lvm
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/hifis-net-ansible-role-unattended-upgrades
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-pip
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-nfs
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-helm
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/geerlingguy-ansible-role-docker
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/gantsign-ansible-role-ctop
scm: git
- src: https://git.mgrote.net/ansible-roles-mirrors/pyratlabs-ansible-role-gitea
scm: git

View File

@ -29,9 +29,10 @@
- name: install packages from the internet
become: true
ansible.builtin.apt:
deb: "{{ apt_packages_internet }}"
deb: "{{ item }}"
state: present
when: apt_packages_internet is defined
loop: "{{ apt_packages_internet }}"
- name: remove packages
become: true

View File

@ -1,10 +1,5 @@
{{ file_header | default () }}
upstreams:
init:
# Configure startup behavior.
# accepted: blocking, failOnError, fast
# default: blocking
strategy: fast
groups:
default:
{% for item in blocky_dns_upstream %}
@ -13,6 +8,9 @@ upstreams:
strategy: parallel_best
timeout: 2s
# optional: If true, blocky will fail to start unless at least one upstream server per group is reachable. Default: false
startVerifyUpstream: true
# optional: Determines how blocky will create outgoing connections. This impacts both upstreams, and lists.
# accepted: dual, v4, v6
# default: dual
@ -49,18 +47,17 @@ blocking:
downloads:
# optional: timeout for list download (each url). Use large values for big lists or slow internet connections
# default: 5s
timeout: 60s
timeout: 5s
# optional: Maximum download attempts
# default: 3
attempts: 5
attempts: 3
# optional: Time between the download attempts
# default: 500ms
cooldown: 10s
cooldown: 500ms
# optional: Maximum number of lists to process in parallel.
# default: 4
concurrency: 16
# Configure startup behavior.
# accepted: blocking, failOnError, fast
concurrency: 4
# optional: if failOnError, application startup will fail if at least one list can't be downloaded/opened
# default: blocking
strategy: {{ blocky_blacklists_strategy | default ("blocking") }}
# Number of errors allowed in a list before it is considered invalid.
@ -122,7 +119,7 @@ caching:
prefetching: true
# prefetch track time window (in duration format)
# default: 120
prefetchExpires: 2h
prefetchExpires: 120
# name queries threshold for prefetch
# default: 5
prefetchThreshold: 5

View File

@ -0,0 +1,11 @@
## mgrote.cv4pve
### Beschreibung
Installiert [cv4pve-autosnap](https://github.com/Corsinvest/cv4pve-autosnap).
Legt einen systemd-timer.
### getestet auf
- [x] ProxMox 7*
### Variablen + Defaults
- see [defaults](./defaults/main.yml)

View File

@ -3,7 +3,7 @@
cv4pve_cron_minute: "39"
cv4pve_cron_hour: "5"
# proxmox api-token and user
cv4pve_api_token: "supersecret"
cv4pve_api_token: "XXXXXXXXXXXXXXXXXXXXXX"
cv4pve_api_user: "root@pam!test2"
# which vm to snapshot
cv4pve_vmid: all
@ -12,7 +12,3 @@ cv4pve_keep_snapshots: 3
# under which user the script is run
cv4pve_user_group: cv4pve
cv4pve_user: cv4pve
# url
cv4pve_dl_link: https://github.com/Corsinvest/cv4pve-autosnap/releases/download/{{ cv4pve_version }}/cv4pve-autosnap-linux-x64.zip
cv4pve_version: "v1.14.8"
cv4pve_base_path: /usr/local/bin/cv4pve

View File

@ -1,42 +0,0 @@
---
- name: Ensure needed directories exist
ansible.builtin.file:
path: "{{ cv4pve_base_path }}"
state: directory
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
mode: "0644"
- name: Download specified version
ansible.builtin.unarchive:
src: "{{ cv4pve_dl_link }}"
dest: "{{ cv4pve_base_path }}"
mode: '0755'
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
remote_src: true
creates: "{{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }}"
list_files: true
register: download
- name: Rename binary # noqa no-changed-when no-handler
ansible.builtin.command: |
mv "{{ cv4pve_base_path }}/cv4pve-autosnap" "{{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }}"
when: download.changed
# https://stackoverflow.com/questions/20252057/using-ansible-how-would-i-delete-all-items-except-for-a-specified-set-in-a-dire
- name: Find old versions
ansible.builtin.find:
paths: "{{ cv4pve_base_path }}"
file_type: file
use_regex: false
excludes:
- "cv4pve-autosnap-{{ cv4pve_version }}"
register: found_files
- name: Ensure old versions are absent
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
with_items: "{{ found_files['files'] }}"
...

View File

@ -2,9 +2,74 @@
- name: include user tasks
ansible.builtin.include_tasks: user.yml
- name: include install tasks
ansible.builtin.include_tasks: install.yml
- name: include systemd tasks
ansible.builtin.include_tasks: systemd.yml
...
- name: create directories
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
mode: "0644"
loop:
- '/tmp/cv4pve'
- '/usr/local/bin/cv4pve'
- name: download archives
become: true
ansible.builtin.get_url:
url: "{{ cv4pve_dl_link }}"
dest: /tmp/cv4pve/cv4pve-autosnap-linux-x64.zip
mode: '0775'
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
- name: extract archives
become: true
ansible.builtin.unarchive:
src: /tmp/cv4pve/cv4pve-autosnap-linux-x64.zip
dest: /usr/local/bin/cv4pve
remote_src: true
mode: a+x
owner: "{{ cv4pve_user }}"
group: "{{ cv4pve_user_group }}"
- name: template cv4pve.service
become: true
ansible.builtin.template:
src: cv4pve.service.j2
dest: /etc/systemd/system/cv4pve.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: template cv4pve_mail.service
become: true
ansible.builtin.template:
src: cv4pve_mail.service.j2
dest: /etc/systemd/system/cv4pve_mail.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: template cv4pve.timer
become: true
ansible.builtin.template:
src: cv4pve.timer.j2
dest: /etc/systemd/system/cv4pve.timer
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: systemctl start cv4pve.timer
become: true
ansible.builtin.systemd:
name: cv4pve.timer
state: started
enabled: true

View File

@ -1,38 +0,0 @@
---
- name: Ensure service-unit (cv4pve) is templated
ansible.builtin.template:
src: cv4pve.service.j2
dest: /etc/systemd/system/cv4pve.service
owner: root
group: root
mode: "0644"
no_log: true
notify:
- systemctl daemon-reload
- name: Ensure service-unit (mail) is templated
ansible.builtin.template:
src: cv4pve_mail.service.j2
dest: /etc/systemd/system/cv4pve_mail.service
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: Ensure service-unit (timer) is templated
ansible.builtin.template:
src: cv4pve.timer.j2
dest: /etc/systemd/system/cv4pve.timer
owner: root
group: root
mode: "0644"
notify:
- systemctl daemon-reload
- name: Ensure timer is started is templated
ansible.builtin.systemd:
name: cv4pve.timer
state: started
enabled: true
...

View File

@ -1,5 +1,5 @@
---
- name: Ensure group exists
- name: ensure group exists
become: true
ansible.builtin.group:
name: "{{ cv4pve_user_group }}"
@ -7,7 +7,7 @@
when:
- cv4pve_user_group is defined
- name: Ensure user exists
- name: ensure user exists
become: true
ansible.builtin.user:
name: "{{ cv4pve_user }}"
@ -17,4 +17,3 @@
when:
- cv4pve_user_group is defined
- cv4pve_user is defined
...

View File

@ -6,4 +6,4 @@ OnFailure=cv4pve_mail.service
[Service]
Type=simple
ExecStart={{ cv4pve_base_path }}/cv4pve-autosnap-{{ cv4pve_version }} --host=127.0.0.1 --api-token {{ cv4pve_api_user }}={{ cv4pve_api_token }} --vmid="{{ cv4pve_vmid }}" snap --label='daily' --keep="{{ cv4pve_keep_snapshots }}" --state
ExecStart=/usr/local/bin/cv4pve/cv4pve-autosnap --host=127.0.0.1 --api-token {{ cv4pve_api_user }}={{ cv4pve_api_token }} --vmid="{{ cv4pve_vmid }}" snap --label='daily' --keep="{{ cv4pve_keep_snapshots }}" --state

View File

@ -6,5 +6,6 @@ Description=Timer: Trigger VM-Snapshots in PVE with cv4pve.
OnCalendar=*-*-* {{ cv4pve_cron_hour }}:{{ cv4pve_cron_minute }}:00
RandomizedDelaySec=10 min
[Install]
WantedBy=timers.target multi-user.target

View File

@ -1,4 +1,5 @@
{{ file_header | default () }}
[Unit]
Description=Send a Mail in case of an error in cv4pve.service.

View File

@ -1,16 +0,0 @@
---
- name: ensure etckeeper is installed
become: true
ansible.builtin.package:
name:
- etckeeper
- git
state: present
install_recommends: false
- name: ensure repository is initialized
ansible.builtin.command: etckeeper init
args:
chdir: /etc/
creates: /etc/.etckeeper
...

View File

@ -83,16 +83,16 @@ smb_packages:
## sorgt dafur das statt "A0KDC9~F" die Ordnernamen als "autosnap_2021-11-04_23÷59÷02_daily" angezeigt werden
## https://www.samba.org/samba/docs/current/man-html/vfs_catia.8.html
# aktiv?
smb_enable_snapshots_dir: false
smb_enable_snapshots_dir: true
# welche Character/zeichen-Ersetzungen soll catia ausführen
smb_catia_mappings: "0x3a:0xf7" # ersetzt ":" durch "÷"
# als Windows-Shattenkopien einbinden
## https://www.samba.org/samba/docs/current/man-html/vfs_shadow_copy2.8.html
## BUG: Windows sieht die Schattenkopien, kann die Ausgewählte Datei aber nicht öffnen wenn sie seit dem Snapshot gelöscht wurde, vmtl da Windows nicht den kompletten Snapshot-Pfad verwendet
## Format ist passend für sanoid-Snapshots
## Format ist passend fur sanoid-Snapshots
# aktiv?
smb_enable_snapshots_shadow: true
smb_enable_snapshots_shadow: false
# wo liegen die Snapshots
smb_shadow_snapdir: ".zfs/snapshot"
# Sortierung
@ -104,4 +104,4 @@ smb_shadow_snapprefix: "^autosnap"
# Snapshot-"Trenner"
smb_shadow_delimiter: "_"
# zeitformat Snapshots
smb_shadow_localtime: "yes"
smb_shadow_localtime: "no"

View File

@ -11,21 +11,20 @@
##======================= catia =======================
vfs objects = catia
catia: mappings = {{ smb_catia_mappings }}
{% elif smb_enable_snapshots_dir is sameas false and smb_enable_snapshots_shadow is sameas true %}
{% elif smb_enable_snapshots_shadow is sameas true and smb_enable_snapshots_dir is sameas false %}
##======================= shadow_copy2 =======================
vfs objects = shadow_copy2
vfs objects = {{ smb_shadow_vfs_objects }}
shadow: snapdir = {{ smb_shadow_snapdir }}
shadow: sort = {{ smb_shadow_sort }}
shadow: format = {{ smb_shadow_format }}
shadow: snapprefix = {{ smb_shadow_snapprefix }}
shadow: delimiter = {{ smb_shadow_delimiter }}
shadow: localtime = {{ smb_shadow_localtime }}
shadow: snapdirseverywhere = yes
{% elif smb_enable_snapshots_shadow is sameas true and smb_enable_snapshots_dir is sameas true %}
#======================= vfs objects =======================
vfs objects = shadow_copy2, catia
##======================= catia =======================
catia:mappings = {{ smb_catia_mappings }}
catia: mappings = {{ smb_catia_mappings }}
##======================= shadow_copy2 =======================
shadow: snapdir = {{ smb_shadow_snapdir }}
shadow: sort = {{ smb_shadow_sort }}
@ -33,7 +32,6 @@ shadow: format = {{ smb_shadow_format }}
shadow: snapprefix = {{ smb_shadow_snapprefix }}
shadow: delimiter = {{ smb_shadow_delimiter }}
shadow: localtime = {{ smb_shadow_localtime }}
shadow: snapdirseverywhere = yes
{% endif %}

View File

@ -1,34 +0,0 @@
---
# die Variablen kommen aus
# - https://docs.gitea.com/administration/command-line
# - https://github.com/lldap/lldap/blob/main/example_configs/gitea.md
# und
# den jeweiligen group/host-Vars!
- name: Check if Admin-User exists
no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin user list \
--config "{{ gitea_configuration_path }}/gitea.ini"
register: check
changed_when: false
- name: Ensure Admin-User exists
#no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin user create \
--config "{{ gitea_configuration_path }}/gitea.ini" \
--username "{{ gitea_admin_user }}" \
--password "{{ gitea_admin_user_pass }}" \
--email "{{ gitea_admin_user }}@mgrote.net" \
--admin
when: 'not "{{ gitea_admin_user }}@mgrote.net" in check.stdout'
- name: Show existing users
ansible.builtin.debug:
msg: "{{ check.stdout_lines }}"
...

View File

@ -1,56 +0,0 @@
---
# die Variablen kommen aus
# - https://docs.gitea.com/administration/command-line
# - https://github.com/lldap/lldap/blob/main/example_configs/gitea.md
# und
# den jeweiligen group/host-Vars!
- name: Ensure LDAP config is set up
no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin auth add-ldap \
--config "{{ gitea_configuration_path }}/gitea.ini" \
--name "lldap" \
--security-protocol "unencrypted" \
--host "{{ gitea_ldap_host }}" \
--port "3890" \
--bind-dn "uid={{ gitea_ldap_bind_user }},ou=people,{{ gitea_ldap_base_path }}" \
--bind-password "{{ gitea_ldap_bind_pass }}" \
--user-search-base "ou=people,{{ gitea_ldap_base_path }}" \
--user-filter "(&(memberof=cn=gitea,ou=groups,{{ gitea_ldap_base_path }})(|(uid=%[1]s)(mail=%[1]s)))" \
--username-attribute "uid" \
--email-attribute "mail" \
--firstname-attribute "givenName" \
--surname-attribute "sn" \
--avatar-attribute "jpegPhoto" \
--synchronize-users
register: setup
ignore_errors: true
failed_when: 'not "Command error: login source already exists [name: lldap]" in setup.stderr' # fail Task wenn LDAP schon konfiguriert ist
changed_when: "setup.rc == 0" # chnaged nur wenn Task rc 0 hat, sollte nur beim ersten lauf vorkommen; ungetestet
- name: Modify LDAP config
no_log: true
become_user: gitea
become: true
ansible.builtin.command: |
forgejo admin auth update-ldap \
--config "{{ gitea_configuration_path }}/gitea.ini" \
--id "1" \
--security-protocol "unencrypted" \
--host "{{ gitea_ldap_host }}" \
--port "3890" \
--bind-dn "uid={{ gitea_ldap_bind_user }},ou=people,{{ gitea_ldap_base_path }}" \
--bind-password "{{ gitea_ldap_bind_pass }}" \
--user-search-base "ou=people,{{ gitea_ldap_base_path }}" \
--user-filter "(&(memberof=cn=gitea,ou=groups,{{ gitea_ldap_base_path }})(|(uid=%[1]s)(mail=%[1]s)))" \
--username-attribute "uid" \
--email-attribute "mail" \
--firstname-attribute "givenName" \
--surname-attribute "sn" \
--avatar-attribute "jpegPhoto" \
--synchronize-users
when: '"Command error: login source already exists [name: lldap]" in setup.stderr' # führe nur aus wenn erster Task fehlgeschlagen ist
changed_when: false # keine idee wie ich changed feststellen kann
...

View File

@ -1,7 +0,0 @@
---
- name: Include LDAP tasks
ansible.builtin.include_tasks: ldap.yml
- name: Include User tasks
ansible.builtin.include_tasks: admin.yml
...

View File

@ -1,21 +0,0 @@
---
lldap_package_url: "https://download.opensuse.org/repositories/home:/Masgalor:/LLDAP/xUbuntu_22.04/amd64/lldap_0.5.0-1+3.1_amd64.deb"
lldap_logging_verbose: "false"
lldap_http_port: "17170"
lldap_http_host: "0.0.0.0"
lldap_ldap_host: "0.0.0.0"
lldap_public_url: http://localhost
lldap_jwt_secret: supersecret
lldap_ldap_base_dn: "dc=example,dc=com"
lldap_admin_username: ladmin # only used on setup
lldap_admin_password: supersecret # also bind-secret; only used on setup
lldap_admin_mailaddress: lldap-admin@mgrote.net # only used on setup
lldap_database_url: "postgres://postgres-user:password@postgres-server/my-database"
lldap_key_seed: supersecretseed
lldap_smtp_from: "LLDAP Admin <info@mgrote.net>"
lldap_smtp_reply_to: "Do not reply <info@mgrote.net>"
lldap_smtp_server: "mail.domain.net"
lldap_smtp_port: "25"
lldap_smtp_smtp_encryption: "NONE"
lldap_smtp_user: "info@mgrote.net"
lldap_smtp_enable_password_reset: "true"

View File

@ -1,15 +0,0 @@
---
- name: Ensure services are enabled and started
become: true
ansible.builtin.systemd:
name: lldap.service
masked: false
enabled: true
state: started
- name: Ensure service is restarted
become: true
ansible.builtin.systemd:
name: lldap.service
state: restarted
...

View File

@ -1,29 +0,0 @@
---
- name: Ensure package is installed
ansible.builtin.apt:
deb: "{{ lldap_package_url }}"
notify: Ensure services are enabled and started
- name: Ensure needed directories exist
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: lldap
group: lldap
mode: '0755'
loop:
- /usr/share/lldap/app/static/fonts
- /usr/share/lldap/app/static
- /usr/share/lldap/app/pkg
- name: Ensure config is templated
ansible.builtin.template:
src: lldap_config.toml.j2
dest: /etc/lldap/lldap_config.toml
owner: lldap
group: lldap
mode: "0644"
notify:
- Ensure services are enabled and started
- Ensure service is restarted
...

View File

@ -1,144 +0,0 @@
{{ file_header | default () }}
## Tune the logging to be more verbose by setting this to be true.
## You can set it with the LLDAP_VERBOSE environment variable.
verbose={{ lldap_logging_verbose }}
## The host address that the LDAP server will be bound to.
## To enable IPv6 support, simply switch "ldap_host" to "::":
## To only allow connections from localhost (if you want to restrict to local self-hosted services),
## change it to "127.0.0.1" ("::1" in case of IPv6)".
ldap_host = "{{ lldap_ldap_host }}"
## The port on which to have the LDAP server.
#ldap_port = 3890
## The host address that the HTTP server will be bound to.
## To enable IPv6 support, simply switch "http_host" to "::".
## To only allow connections from localhost (if you want to restrict to local self-hosted services),
## change it to "127.0.0.1" ("::1" in case of IPv6)".
http_host = "{{ lldap_http_host }}"
## The port on which to have the HTTP server, for user login and
## administration.
http_port = {{ lldap_http_port }}
## The public URL of the server, for password reset links.
http_url = "{{ lldap_public_url }}"
## Random secret for JWT signature.
## This secret should be random, and should be shared with application
## servers that need to consume the JWTs.
## Changing this secret will invalidate all user sessions and require
## them to re-login.
## You should probably set it through the LLDAP_JWT_SECRET environment
## variable from a secret ".env" file.
## This can also be set from a file's contents by specifying the file path
## in the LLDAP_JWT_SECRET_FILE environment variable
## You can generate it with (on linux):
## LC_ALL=C tr -dc 'A-Za-z0-9!#%&'\''()*+,-./:;<=>?@[\]^_{|}~' </dev/urandom | head -c 32; echo ''
jwt_secret = "{{ lldap_jwt_secret }}"
## Base DN for LDAP.
## This is usually your domain name, and is used as a
## namespace for your users. The choice is arbitrary, but will be needed
## to configure the LDAP integration with other services.
## The sample value is for "example.com", but you can extend it with as
## many "dc" as you want, and you don't actually need to own the domain
## name.
ldap_base_dn = "{{ lldap_ldap_base_dn }}"
## Admin username.
## For the LDAP interface, a value of "admin" here will create the LDAP
## user "cn=admin,ou=people,dc=example,dc=com" (with the base DN above).
## For the administration interface, this is the username.
ldap_user_dn = "{{ lldap_admin_username }}"
## Admin email.
## Email for the admin account. It is only used when initially creating
## the admin user, and can safely be omitted.
ldap_user_email = "{{ lldap_admin_mailaddress }}"
## Admin password.
## Password for the admin account, both for the LDAP bind and for the
## administration interface. It is only used when initially creating
## the admin user.
## It should be minimum 8 characters long.
## You can set it with the LLDAP_LDAP_USER_PASS environment variable.
## This can also be set from a file's contents by specifying the file path
## in the LLDAP_LDAP_USER_PASS_FILE environment variable
## Note: you can create another admin user for user administration, this
## is just the default one.
ldap_user_pass = "{{ lldap_admin_password }}"
## Database URL.
## This encodes the type of database (SQlite, MySQL, or PostgreSQL)
## , the path, the user, password, and sometimes the mode (when
## relevant).
## Note: SQlite should come with "?mode=rwc" to create the DB
## if not present.
## Example URLs:
## - "postgres://postgres-user:password@postgres-server/my-database"
## - "mysql://mysql-user:password@mysql-server/my-database"
##
## This can be overridden with the LLDAP_DATABASE_URL env variable.
database_url = "{{ lldap_database_url }}"
## Private key file.
## Contains the secret private key used to store the passwords safely.
## Note that even with a database dump and the private key, an attacker
## would still have to perform an (expensive) brute force attack to find
## each password.
## Randomly generated on first run if it doesn't exist.
## Alternatively, you can use key_seed to override this instead of relying on
## a file.
## Env variable: LLDAP_KEY_FILE
key_file = "/var/lib/lldap/private_key"
## Seed to generate the server private key, see key_file above.
## This can be any random string, the recommendation is that it's at least 12
## characters long.
## Env variable: LLDAP_KEY_SEED
key_seed = "{{ lldap_key_seed }}"
## Ignored attributes.
## Some services will request attributes that are not present in LLDAP. When it
## is the case, LLDAP will warn about the attribute being unknown. If you want
## to ignore the attribute and the service works without, you can add it to this
## list to silence the warning.
#ignored_user_attributes = [ "sAMAccountName" ]
#ignored_group_attributes = [ "mail", "userPrincipalName" ]
## Options to configure SMTP parameters, to send password reset emails.
## To set these options from environment variables, use the following format
## (example with "password"): LLDAP_SMTP_OPTIONS__PASSWORD
[smtp_options]
## Whether to enabled password reset via email, from LLDAP.
enable_password_reset={{ lldap_smtp_enable_password_reset }}
## The SMTP server.
server="{{ lldap_smtp_server }}"
## The SMTP port.
port={{ lldap_smtp_port }}
## How the connection is encrypted, either "NONE" (no encryption), "TLS" or "STARTTLS".
smtp_encryption = "{{ lldap_smtp_smtp_encryption }}"
## The SMTP user, usually your email address.
#user="{{ lldap_smtp_user }}"
## The SMTP password.
#password="password" #gitleaks:allow
## The header field, optional: how the sender appears in the email. The first
## is a free-form name, followed by an email between <>.
#from="{{ lldap_smtp_from }}"
## Same for reply-to, optional.
reply_to="{{ lldap_smtp_reply_to }}"
## Options to configure LDAPS.
## To set these options from environment variables, use the following format
## (example with "port"): LLDAP_LDAPS_OPTIONS__PORT
[ldaps_options]
## Whether to enable LDAPS.
#enabled=true
## Port on which to listen.
#port=6360
## Certificate file.
#cert_file="/data/cert.pem"
## Certificate key file.
#key_file="/data/key.pem"

View File

@ -4,4 +4,4 @@ network:
renderer: networkd
ethernets:
{{ ansible_default_ipv4.interface }}:
dhcp4: true
dhcp4: yes

Some files were not shown because too many files have changed in this diff Show More