onyx_pods
Go to file
Joerg Lehmann ed7c77638a new doc 2025-09-20 11:39:09 +02:00
gitea bind-mount-options do no longer work with new podman version 2024-11-21 15:09:14 +01:00
minibeieli new doc 2025-09-20 11:39:09 +02:00
nbitwebsite bind-mount-options do no longer work with new podman version 2024-11-21 15:09:14 +01:00
nbitwiki bind-mount-options do no longer work with new podman version 2024-11-21 15:09:14 +01:00
traefik bind-mount-options do no longer work with new podman version 2024-11-21 15:09:14 +01:00
wobisch update wo-bisch-web because of address 2025-05-14 06:35:56 +02:00
wordpressacmoag bind-mount-options do no longer work with new podman version 2024-11-21 15:09:14 +01:00
wordpresscmoag revert Update Wordpress Image 2025-09-13 19:53:00 +02:00
README.md fix README 2024-08-10 14:09:54 +02:00

README.md

onyx - Container Server of nbit Informatik GmbH

onyx.nbit.ch is used to run rootless Podman containers, using Traefik as a Reverse Proxy

Additionally it acts as a secondary nameserver ns2.nbit.ch

In this directory (/home/containers/onyx_pods), you will find all configuration files to run the containers (with Podman and Kubernetes YAML files)

Specs:

  • Rocky Linux 9
  • Hetzner Cloud Server CX 31
    • 2 vCPUs
    • 8 GB RAM
    • 80 GB Disk

Persistent data is stored in /data

Create Server

Name: onyx.nbit.ch

Set Root-Password (by hand)

`

dnf update

groupadd containers

useradd -m -g containers containers

passwd containers

For containers User, add the following to ~/.bash_profile:

User specific environment and startup programs

export XDG_RUNTIME_DIR=/run/user/$(id -u)

hostnamectl set-hostname onyx.nbit.ch

timedatectl set-timezone Europe/Zurich

dnf install glibc-langpack-de

localectl set-locale LANG=en_US.UTF-8

localectl set-locale LC_TIME=de_CH.UTF-8

Set Journalctl-Config to persistent Storage: /etc/systemd/journald.conf: Storage=persistent

Enable Selinux in Enforcing Mode: change /etc/selinux/config

dnf install setroubleshoot

`

enable EPEL Repo: `

dnf install epel-release

`

Firewall

# dnf install firewalld
# firewall-cmd --add-service={http,https} --permanent
# firewall-cmd --remove-service=cockpit --permanent
# firewall-cmd --add-port=1883/tcp --permanent   ; only for MQTT
# firewall-cmd --add-port=25/tcp --permanent     ; for Postfix
# firewall-cmd --reload

List Rules:
# firewall-cmd  --list-all

fail2ban on Host for ssh

# dnf install fail2ban
# cp /etc/fail2ban/jail.conf /etc/fail2ban/jail.local
edit /etc/fail2ban/jail.local:
enabled = true  => below [sshd]

# systemctl enable fail2ban
# systemctl restart fail2ban

command to check who is banned:
# fail2ban-client status sshd

Install Software

# dnf install passt
# dnf install git
# dnf install podman
# dnf install jq
# dnf install sysstat
# dnf install lftp
# dnf install binutils

Setup Mail

# dnf install s-nail procmail
# cp /usr/share/doc/esmtp/sample.esmtprc /etc/esmtprc 
# /usr/bin/esmtp-wrapper 

Backup Server

# dnf install restic
# mkdir /backup
# mkdir /backup-restic
# restic init --repo /backup-restic/restic-repo-$(hostname --short)      # Passwort in Keepass

SSH Keypaar fuer User root erstellen und auf Hetzner Storagebox hinterlegen:

# ssh-keygen

Restic Script:

/usr/local/bin/backup-to-disk.sh
#!/bin/bash
# Backup der wichtigsten Verzeichnisse nach einem Verzeichnis
#
# Es wird restic verwendet.
#
PATH=$PATH:/usr/local/bin
export RESTIC_PASSWORD="$(hostname --short)7355"
restic backup --quiet --repo /backup-restic/restic-repo-$(hostname --short) /home /etc /var /opt /data /usr/local/bin /backup --exclude=/var/log

if [ $? -eq 0 ]; then
  restic forget --quiet --repo /backup-restic/restic-repo-$(hostname --short) --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 20 --prune
else
  >&2 echo "Problem with restic Backup $(hostname --short)"
fi

/etc/cron.d/backup-to-disk:
#
# Backup important Files to Disk
#
55 6 * * * root /usr/local/bin/backup-to-disk.sh >/dev/null

Backup auf Storag Box:

# cat > /etc/cron.d/rsync-backup-to-other-host <<HERE
#
# Rsync /backup-restic to backup space
#
20 7 * * * root /usr/bin/rsync -avzH --delete --numeric-ids -e 'ssh -p23' /backup-restic u152662@u152662.your-storagebox.de:onyx-backup-restic-rsync >/dev/null
HERE
Backup MySQL-DBs:

# mkdir /backup/mysql-dbs
# chown containers:containers /backup/mysql-dbs/

/usr/local/bin/backup-mysql-dbs.sh (sinngemaess, eine Zeile pro Container):
[root@onyx bin]# more backup-mysql-dbs.sh
#!/bin/bash
# Backup der MySQL DBs (Podman)
#
for container_name in $(podman ps --format "{{.Image}} {{.Names}}" |grep mysql |awk '{print $2}'); do
  if [ -f /usr/local/bin/${container_name}.pwd ]; then
    # im pwd-File muss "PWD=XXXX" (root) gesetzt werden
    . /usr/local/bin/${container_name}.pwd
    podman exec ${container_name} /usr/bin/mysqldump -u root --password=${PWD} --all-databases > /backup//mysql-dbs/mysql-databases-${container_name}-$(date +%Y%m%W).sql 2>/dev/null
  else
    >&2 echo "Password must be set as PWD=XXXX in /usr/local/bin/${container_name}.pwd"
  fi
done

# Cleanup Old Backups
find /backup/mysql-dbs -type f -mtime +30 -exec rm {} \;

[root@onyx bin]# ls -l *pwd
-r--------. 1 containers containers 15 Dec 10 09:42 wordpressacmoag-pod-db.pwd
-r--------. 1 containers containers 15 Dec 10 09:38 wordpresscmoag-pod-db.pwd

/etc/cron.d/backup-mysql-dbs:
# Backup MySQL DBs
#
45 5 * * * containers /usr/local/bin/backup-mysql-dbs.sh >/dev/null


Restore: just in case:
cat backup.sql | podman exec -i CONTAINER /usr/bin/mysql -u root --password=root DATABASE


## Wordpress behind Traefik

following needs to be inserted in wp-config.php (on top of PHP Code):

```bash
if (strpos($_SERVER['HTTP_X_FORWARDED_PROTO'], 'https') !== false)
   $_SERVER['HTTPS']='on';

Setup Env for Podman

# mkdir /data
# chown containers:containers /data

Set Defaults:

containers$ cat ~/.config/containers/containers.conf
[network]
network_backend = "netavark"


Credentials for Docker Registry should be available after Reboot

[containers@onyx-dev ~]$ podman login git.nbit.ch
Username: jlehmann
Password:
Login Succeeded!

[containers@onyx-dev ~]$ cp /run/user/1000/containers/auth.json ~/.config/containers/auth.json


Setup Traefik

Traefik will be started with podman play kube (with yaml file) and attach to HostNetwork (hostNetwork: true in yaml). Backends will map there port and will be accessed on localhost:

we do not use the dynamic config using docker provider, but rather the file provider (one file per backend)

Example Backend Service File:

[containers@onyx onyx_pods]$ cat traefik/configuration/nbitwebsite.yml
http:
  routers:
    nbitwebsite:
      entrypoints:
      - websecure
      tls:
        certresolver: "myresolver"
        domains:
          - main: "www.linux-freelancer.ch"
            sans: "linux-freelancer.ch"
      rule: "Host(`linux-freelancer.ch`,`www.linux-freelancer.ch`)"
      service: nbitwebsite

  services:
    nbitwebsite:
      loadBalancer:
        servers:
        - url: http://127.0.0.1:9000/
        passHostHeader: false
# echo 'net.ipv4.ip_unprivileged_port_start=80' >> /etc/sysctl.d/containers.conf
# loginctl enable-linger containers
# The following fixes "Failed to connect to bus: No medium found"
export XDG_RUNTIME_DIR=/run/user/$(id -u)

containers$ touch /data/traefik/acme.json
containers$ chmod 0600 /data/traefik/acme.json

!!!!!!!!!!!!!!
IMPORTANT: we changed the startup Method to Quadlets, put into
~containers/.config/containers/systemd/

See also https://www.redhat.com/sysadmin/quadlet-podman

Example of such a quadlet config (Network=pasta is not to be used for Traefik):

[containers@onyx-dev ~]$ more ~containers/.config/containers/systemd/onyx-dev-mqtt.kube
[Unit]
Description=MQTT
Before=local-fs.target

[Kube]
Yaml=/home/containers/onyx-dev_pods/mqtt/mqtt.yaml
LogDriver=journald
Network=pasta

[Install]
# Start by default on boot
WantedBy=multi-user.target default.target
!!!!!!!!!!!!!!

containers$ mkdir -p ~/.config/systemd/user/
containers$ cd ~/.config/systemd/user/

containers$ escaped=$(systemd-escape ~/onyx_pods/traefik/traefik.yaml)
containers$ systemctl --user start podman-kube@$escaped.service
containers$ systemctl --user enable podman-kube@$escaped.service

Setup Backend Services

Create Kubernetes YAML File:

backendservice=nbitwebsite
containers$ mkdir ~/onyx_pods/${backendservice}

Create File ~/onyx_pods/${backendservice}/${backendservice}.yaml

containers$ escaped=$(systemd-escape ~/onyx_pods/${backendservice}/${backendservice}.yaml)
containers$ systemctl --user start podman-kube@$escaped.service
containers$ systemctl --user enable podman-kube@$escaped.service

Postfix

# dnf install postfix

/etc/postfix/main.cf:
---
inet_interfaces = all

smtpd_tls_security_level = none

virtual_alias_domains = wo-bisch.ch mini-beieli.ch
virtual_alias_maps = hash:/etc/postfix/virtual
---

# tail -12 /etc/postfix/virtual
abuse@mini-beieli.ch nbitinf@nbit.ch
hostmaster@mini-beieli.ch nbitinf@nbit.ch
info@mini-beieli.ch nbitinf@nbit.ch
mail@mini-beieli.ch nbitinf@nbit.ch
postmaster@mini-beieli.ch nbitinf@nbit.ch
register@mini-beieli.ch nbitinf@nbit.ch
abuse@wo-bisch.ch nbitinf@nbit.ch
hostmaster@wo-bisch.ch nbitinf@nbit.ch
info@wo-bisch.ch nbitinf@nbit.ch
mail@wo-bisch.ch nbitinf@nbit.ch
postmaster@wo-bisch.ch nbitinf@nbit.ch
register@wo-bisch.ch nbitinf@nbit.ch

# postmap /etc/postfix/virtual

# systemctl enable postfix
# systemctl start postfix

Creat DNS Records:
mx1.nbit.ch and mx2.nbit.ch (A/AAAA Records to the same IPs as onyx and onyx-dev)

Change SPF Records to include mx1 and mx2

Secondary Nameserver

# dnf install bind bind-utils
# systemctl enable --now named
# firewall-cmd --add-service=dns --permanent
# firewall-cmd --reload

Adopt /etc/named.conf:

Important => recursion no

[root@onyx etc]# diff named.conf named.conf.9dec2022
11,12c11,12
< 	listen-on port 53 { any; };
< 	listen-on-v6 port 53 { any; };
---
> 	listen-on port 53 { 127.0.0.1; };
> 	listen-on-v6 port 53 { ::1; };
19,20c19
< 	allow-query     { any; };
<         version "You are too curious";
---
> 	allow-query     { localhost; };
32c31
< 	recursion no;
---
> 	recursion yes;
61,143d59
< zone "nbit.ch" IN {
<   type slave;
<   file "slaves/nbit.ch.zone";
<   allow-notify { 94.130.184.127; 2a01:4f8:c2c:12ed::1; };
<   masters {
<     94.130.184.127; 2a01:4f8:c2c:12ed::1;
<   };
<   allow-transfer {
<         127.0.0.1;
<   };
< };
<

on master:

[root@onyx-dev named]# semanage fcontext -a -t named_cache_t "/var/named/master(/.*)?"
[root@onyx-dev named]# restorecon -r -v /var/named/master
...


Read Rights for Grafana Agent:

setfacl -R -m u:grafana-agent:rX /var/log