🚚 move archive

This commit is contained in:
auricom
2023-11-29 23:20:33 +01:00
parent 3015719b8e
commit c9e9788f6c
54 changed files with 3 additions and 1 deletions

View File

@@ -0,0 +1,136 @@
# Socket address to listen on
listen = "[::]:8080"
# Allowed `Host` headers
#
# This _must_ be configured for production use. If unconfigured or the
# list is empty, all `Host` headers are allowed.
allowed-hosts = []
# The canonical API endpoint of this server
#
# This is the endpoint exposed to clients in `cache-config` responses.
#
# This _must_ be configured for production use. If not configured, the
# API endpoint is synthesized from the client's `Host` header which may
# be insecure.
#
# The API endpoint _must_ end with a slash (e.g., `https://domain.tld/attic/`
# not `https://domain.tld/attic`).
api-endpoint = "https://attic.${SECRET_CLUSTER_DOMAIN}/"
# Whether to soft-delete caches
#
# If this is enabled, caches are soft-deleted instead of actually
# removed from the database. Note that soft-deleted caches cannot
# have their names reused as long as the original database records
# are there.
#soft-delete-caches = false
# Whether to require fully uploading a NAR if it exists in the global cache.
#
# If set to false, simply knowing the NAR hash is enough for
# an uploader to gain access to an existing NAR in the global
# cache.
#require-proof-of-possession = true
# JWT signing token
#
# Set this to the Base64 encoding of some random data.
# You can also set it via the `ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64` environment
# variable.
# token-hs256-secret-base64 = ""
# Database connection
[database]
# Connection URL
#
# For production use it's recommended to use PostgreSQL.
# url = "postgresql://USERNAME:PASSWORD@YOUR_POSTGRESQL_URL:5432/DB_NAME"
# Whether to enable sending on periodic heartbeat queries
#
# If enabled, a heartbeat query will be sent every minute
#heartbeat = false
# File storage configuration
[storage]
# Storage type
#
# Can be "local" or "s3".
type = "s3"
# ## Local storage
# The directory to store all files under
path = "/config/storage"
# ## S3 Storage (set type to "s3" and uncomment below)
# The AWS region
region = "us-east-1"
# The name of the bucket
bucket = "attic"
# Custom S3 endpoint
#
# Set this if you are using an S3-compatible object storage (e.g., Minio).
endpoint = "https://minio.${SECRET_DOMAIN}:9000"
# Credentials
#
# If unset, the credentials are read from the `AWS_ACCESS_KEY_ID` and
# `AWS_SECRET_ACCESS_KEY` environment variables.
#[storage.credentials]
# access_key_id = ""
# secret_access_key = ""
# Data chunking
#
# Warning: If you change any of the values here, it will be
# difficult to reuse existing chunks for newly-uploaded NARs
# since the cutpoints will be different. As a result, the
# deduplication ratio will suffer for a while after the change.
[chunking]
# The minimum NAR size to trigger chunking
#
# If 0, chunking is disabled entirely for newly-uploaded NARs.
# If 1, all NARs are chunked.
nar-size-threshold = 65536 # chunk files that are 64 KiB or larger
# The preferred minimum size of a chunk, in bytes
min-size = 16384 # 16 KiB
# The preferred average size of a chunk, in bytes
avg-size = 65536 # 64 KiB
# The preferred maximum size of a chunk, in bytes
max-size = 262144 # 256 KiB
# Compression
[compression]
# Compression type
#
# Can be "none", "brotli", "zstd", or "xz"
type = "zstd"
# Compression level
#level = 8
# Garbage collection
[garbage-collection]
# The frequency to run garbage collection at
#
# By default it's 12 hours. You can use natural language
# to specify the interval, like "1 day".
#
# If zero, automatic garbage collection is disabled, but
# it can still be run manually with `atticd --mode garbage-collector-once`.
interval = "12 hours"
# Default retention period
#
# Zero (default) means time-based garbage-collection is
# disabled by default. You can enable it on a per-cache basis.
default-retention-period = "3 months"

View File

@@ -0,0 +1,33 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: attic
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: attic-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
# App
ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64: "{{ .ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64 }}"
ATTIC_SERVER_DATABASE_URL: "postgresql://{{ .POSTGRES_USER }}:{{ .POSTGRES_PASS }}@postgres-rw.default.svc.cluster.local:5432/attic"
AWS_ACCESS_KEY_ID: "{{ .ATTIC_AWS_ACCESS_KEY_ID }}"
AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}"
# Postgres Init
INIT_POSTGRES_DBNAME: attic
INIT_POSTGRES_HOST: postgres-rw.default.svc.cluster.local
INIT_POSTGRES_USER: "{{ .POSTGRES_USER }}"
INIT_POSTGRES_PASS: "{{ .POSTGRES_PASS }}"
INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}"
dataFrom:
- extract:
key: attic
- extract:
key: cloudnative-pg

View File

@@ -0,0 +1,22 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: attic-gatus-ep
namespace: default
labels:
gatus.io/enabled: "true"
data:
config.yaml: |
endpoints:
- name: attic
group: external
url: https://nix-cache.${SECRET_CLUSTER_DOMAIN}
interval: 1m
client:
dns-resolver: tcp://1.1.1.1:53
insecure: true
conditions:
- "[STATUS] == 200"
alerts:
- type: pushover

View File

@@ -0,0 +1,103 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app attic-apiserver
namespace: default
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
initContainers:
01-init-db:
image: ghcr.io/auricom/postgres-init:15.4
imagePullPolicy: IfNotPresent
envFrom: &envFrom
- secretRef:
name: &secret attic-secret
controller:
replicas: 2
strategy: RollingUpdate
annotations:
configmap.reloader.stakater.com/reload: &configMap attic-configmap
secret.reloader.stakater.com/reload: *secret
image:
repository: ghcr.io/zhaofengli/attic
tag: latest@sha256:06d9ca943cfe38ef954cbe2dd453dac0788f55661f84c31254a3a8044aa3100f
args: ["-f", "/config/server.toml", "--mode", "api-server" ]
envFrom: *envFrom
service:
main:
ports:
http:
port: &port 8080
probes:
liveness: &probes
enabled: true
custom: true
spec:
httpGet:
path: /
port: *port
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
readiness: *probes
startup:
enabled: false
ingress:
main:
enabled: true
ingressClassName: nginx
annotations:
# external-dns.home.arpa/enabled: "true"
hajimari.io/enable: "false"
hosts:
- host: &host nix-cache.${SECRET_CLUSTER_DOMAIN}
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
persistence:
config:
enabled: true
type: configMap
name: *configMap
subPath: server.toml
mountPath: /config/server.toml
readOnly: false
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: *app
resources:
requests:
cpu: 50m
memory: 200Mi
limits:
memory: 1Gi

View File

@@ -0,0 +1,15 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./externalsecret.yaml
- ./gatus.yaml
- ./helmrelease.yaml
configMapGenerator:
- name: attic-configmap
files:
- ./config/server.toml
generatorOptions:
disableNameSuffixHash: true

View File

@@ -0,0 +1,65 @@
# Attic
## S3 Configuration
1. Create `~/.mc/config.json`
```json
{
"version": "10",
"aliases": {
"minio": {
"url": "https://s3.<domain>",
"accessKey": "<access-key>",
"secretKey": "<secret-key>",
"api": "S3v4",
"path": "auto"
}
}
}
```
2. Create the attic user and password
```sh
mc admin user add minio attic <super-secret-password>
```
3. Create the attic bucket
```sh
mc mb minio/attic
```
4. Create `attic-user-policy.json`
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:ListBucket",
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject"
],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::attic/*", "arn:aws:s3:::attic"],
"Sid": ""
}
]
}
```
5. Apply the bucket policies
```sh
mc admin policy create minio attic-private attic-user-policy.json
```
6. Associate private policy with the user
```sh
mc admin policy set minio attic-private user=attic
```

View File

@@ -0,0 +1,73 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app attic-garbage-collector
namespace: default
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
controller:
replicas: 1
strategy: Recreate
annotations:
configmap.reloader.stakater.com/reload: &configMap attic-configmap
secret.reloader.stakater.com/reload: &secret attic-secret
image:
repository: ghcr.io/zhaofengli/attic
tag: latest@sha256:06d9ca943cfe38ef954cbe2dd453dac0788f55661f84c31254a3a8044aa3100f
args: ["-f", "/config/server.toml", "--mode", "garbage-collector" ]
envFrom:
- secretRef:
name: *secret
service:
main:
ports:
http:
port: &port 8080
probes:
liveness: &probe
enabled: false
readiness: *probe
startup: *probe
persistence:
config:
enabled: true
type: configMap
name: *configMap
subPath: server.toml
mountPath: /config/server.toml
readOnly: false
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: *app
resources:
requests:
cpu: 50m
memory: 200Mi
limits:
memory: 1Gi

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./helmrelease.yaml

View File

@@ -0,0 +1,41 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-attic-appiserver
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-cloudnative-pg-cluster
- name: cluster-apps-external-secrets-stores
path: ./kubernetes/apps/default/attic/apiserver
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-attic-garbage-collector
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-attic-appiserver
path: ./kubernetes/apps/default/attic/garbage-collector
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,125 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: crowdsec
namespace: crowdsec
spec:
interval: 30m
chart:
spec:
chart: crowdsec
version: 0.9.7
sourceRef:
kind: HelmRepository
name: crowdsec
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
container_runtime: containerd
image:
repository: crowdsecurity/crowdsec
tag: v1.5.2
lapi:
env:
# by default disable the agent for local API pods
- name: DISABLE_AGENT
value: "true"
- name: ENROLL_KEY
valueFrom:
secretKeyRef:
name: crowdsec-config
key: enroll_key
- name: ENROLL_INSTANCE_NAME
value: "talos@cluster-0"
dashboard:
enabled: false
ingress:
enabled: false
annotations:
ingressClassName: nginx
host: &host "{{ .Release.Name }}.${SECRET_CLUSTER_DOMAIN}"
tls:
- hosts:
- *host
resources:
requests:
cpu: 150m
memory: 100M
limits:
memory: 100M
# -- Enable persistent volumes
persistentVolume:
# -- Persistent volume for data folder. Stores e.g. registered bouncer api keys
data:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-filesystem
size: 1Gi
# -- Persistent volume for config folder. Stores e.g. online api credentials
config:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-filesystem
size: 100Mi
metrics:
enabled: false
serviceMonitor:
enabled: false
strategy:
type: Recreate
agent:
# To specify each pod you want to process it logs (pods present in the node)
acquisition:
# The namespace where the pod is located
- namespace: ingress-nginx
# The pod name
podName: ingress-nginx-controller-*
# as in crowdsec configuration, we need to specify the program name so the parser will match and parse logs
program: nginx
# Those are ENV variables
env:
# As it's a test, we don't want to share signals with CrowdSec so disable the Online API.
- name: DISABLE_crONLINE_API
value: "true"
# As we are running Nginx, we want to install the Nginx collection
- name: COLLECTIONS
value: "crowdsecurity/nginx crowdsecurity/linux crowdsecurity/base-http-scenarios crowdsecurity/http-cve crowdsecurity/pgsql crowdsecurity/sshd"
- name: PARSERS
value: "crowdsecurity/cri-logs"
- name: TZ
value: "${TIMEZONE}"
- name: DISABLE_ONLINE_API
value: "false"
resources:
limits:
memory: 100Mi
requests:
cpu: 150m
memory: 100Mi
# -- Enable persistent volumes
persistentVolume:
# -- Persistent volume for config folder. Stores local config (parsers, scenarios etc.)
config:
enabled: true
accessModes:
- ReadWriteMany
storageClassName: rook-ceph-filesystem
size: 100Mi
metrics:
enabled: true
serviceMonitor:
enabled: true

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml
- ./secret.sops.yaml

View File

@@ -0,0 +1,30 @@
# yamllint disable
# yaml-language-server: $schema=https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/v1.26.1-standalone-strict/secret-v1.json
apiVersion: v1
kind: Secret
metadata:
name: crowdsec-config
namespace: crowdsec
type: Opaque
stringData:
enroll_key: ENC[AES256_GCM,data:ret34T4Bcdua76M8s19bLeNTUWweVqPg5Q==,iv:q9sXlIUAkRi4Gu1+uhVWW5WCDuUCn6ZAV+UjtK1hkAQ=,tag:zXCtO2dpokZ57/NTthItig==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBoN0VJaHVYcXNDZDlZUGRn
YUViZDU0TCtmbzkycUpiZUVDbkluSzdSM2hVClpMRDdKREJBZEpEYUIxUGlIem9Q
Z08rVUVLUFhWNGdncElCR2hFVFNJUEUKLS0tIDZzcDVyb0lMTzRrNStBRU1KN2wy
OU81anNCMk13bXNXRVM3ZWcxTjd6SUkKd5FvLfeXe4p7j5eryl9ZuVh6oT920yiy
hsaI1Cwm2WH55lR++P1jtIyTo+lOL5M+IZUeyC7LXBpMp2UBNbllcw==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-12-23T02:14:45Z"
mac: ENC[AES256_GCM,data:Y5ZzEfUbfy4hs6CpxZOW9/jSzp/lRaL28vB81BHFnUCDH9hHiCLMhb64SfJdCOgxP1HjKRbsQgSLdQD0W1Q7udtsXFVFg+LnND++ukWaXESj/USb25o9RT8Kn94RePLzeDdOkAR9hYS+YViKjdvdck2oKwr1cy8slcgHDXi83LI=,iv:/iBS+i43BaSOBZGUeNxUnqn4sgX12GozkQdUuLLsvMM=,tag:JLwY15QfNLWRJax2nKdcbw==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -0,0 +1,87 @@
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: &app gitea-external-backup
namespace: default
spec:
schedule: "@daily"
jobTemplate:
spec:
template:
metadata:
name: *app
spec:
containers:
- name: *app
image: ghcr.io/auricom/kubectl:1.28.2@sha256:ac6ffc31b4632cd0c390d2e91a91916b66aee1363d406410b69bcf4f556a1038
imagePullPolicy: IfNotPresent
command:
- "/bin/bash"
- "-c"
- |
#!/bin/bash
set -o nounset
set -o errexit
mkdir -p ~/.ssh
cp /opt/id_rsa ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh -o StrictHostKeyChecking=no homelab@${LOCAL_LAN_TRUENAS} << 'EOF'
set -o nounset
set -o errexit
WORK_DIR="/mnt/storage/backups/apps/gitea"
ORGANISATIONS=$(curl --silent --location --request GET "https://gitea.${SECRET_CLUSTER_DOMAIN}/api/v1/orgs" --header "Authorization: Bearer ${SECRET_GITEA_API_TOKEN}" | jq --raw-output .[].username)
ORGANISATIONS+=" auricom"
for org in $ORGANISATIONS
do
mkdir -p $WORK_DIR/$org
if [ $org == "auricom" ]; then
keyword="users"
else
keyword="orgs"
fi
REPOSITORIES=$(curl --silent --location --request GET "https://gitea.${SECRET_CLUSTER_DOMAIN}/api/v1/$keyword/$org/repos?limit=1000" --header "Authorization: Bearer ${SECRET_GITEA_API_TOKEN}" | jq --raw-output .[].name)
for repo in $REPOSITORIES
do
if [ -d "$WORK_DIR/$org/$repo" ]; then
echo "INFO: pull $org/$repo..."
cd $WORK_DIR/$org/$repo
git remote show origin -n | grep -c main &> /dev/null && MAIN_BRANCH="main" || MAIN_BRANCH="master"
git fetch --all
test $? -ne 0 && exit 1
git reset --hard origin/$MAIN_BRANCH
test $? -ne 0 && exit 1
git pull origin $MAIN_BRANCH
test $? -ne 0 && exit 1
echo "INFO: clean $org/$repo..."
git fetch --prune
for branch in $(git branch -vv | grep ': gone]' | awk '{print $1}')
do
git branch -D $branch
done
else
echo "INFO: clone $org/$repo..."
cd $WORK_DIR/$org
git clone git@gitea.${SECRET_DOMAIN}:$org/$repo.git
test $? -ne 0 && exit 1
fi
done
done
echo "INFO: Backup done"
EOF
volumeMounts:
- name: secret
mountPath: /opt/id_rsa
subPath: GITEA_DEPLOYMENT_PRIVATE_KEY
volumes:
- name: secret
secret:
secretName: gitea-secret
restartPolicy: Never

View File

@@ -0,0 +1,36 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: gitea-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
# App
GITEA_ADMIN_EMAIL: "{{ .GITEA_ADMIN_EMAIL }}"
GITEA_ADMIN_PASSWORD: "{{ .GITEA_ADMIN_PASSWORD }}"
GITEA_AWS_S3_ACCESS_KEY: "{{ .GITEA_AWS_S3_ACCESS_KEY }}"
GITEA_AWS_S3_SECRET_KEY: "{{ .GITEA_AWS_S3_SECRET_KEY }}"
GITEA_DEPLOYMENT_PRIVATE_KEY: "{{ .GITEA_DEPLOYMENT_PRIVATE_KEY }}"
POSTGRES_USERNAME: &dbUser "{{ .POSTGRES_USERNAME }}"
POSTGRES_PASSWORD: &dbPass "{{ .POSTGRES_PASSWORD }}"
# Postgres Init
INIT_POSTGRES_DBNAME: gitea
INIT_POSTGRES_HOST: postgres-rw.default.svc.cluster.local
INIT_POSTGRES_USER: *dbUser
INIT_POSTGRES_PASS: *dbPass
INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}"
dataFrom:
- extract:
key: cloudnative-pg
- extract:
key: gitea

View File

@@ -0,0 +1,195 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: gitea
namespace: default
spec:
interval: 30m
chart:
spec:
chart: gitea
version: 9.2.0
sourceRef:
kind: HelmRepository
name: gitea
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
image:
repository: gitea/gitea
tag: 1.20.3
rootless: true
containerSecurityContext:
capabilities:
add: ["SYS_CHROOT"]
gitea:
admin:
username: auricom
config:
APP_NAME: "Gitea Homelab"
cron.resync_all_sshkeys:
ENABLED: true
RUN_AT_START: true
database:
DB_TYPE: postgres
HOST: postgres-rw.default.svc.cluster.local:5432
NAME: gitea
SCHEMA: public
SSL_MODE: disable
server:
SSH_PORT: 22
SSH_LISTEN_PORT: 30322
SSH_DOMAIN: gitea.${SECRET_DOMAIN}
ROOT_URL: https://gitea.${SECRET_CLUSTER_DOMAIN}
respository:
DEFAULT_BRANCH: main
DEFAULT_PRIVATE: true
admin:
DISABLE_REGULAR_ORG_CREATION: true
security:
PASSWORD_COMPLEXITY: "lower,upper"
MIN_PASSWORD_LENGTH: 12
service:
DISABLE_REGISTRATION: true
REQUIRE_SIGNIN_VIEW: true
cron:
ENABLED: true
attachment:
STORAGE_TYPE: minio
MINIO_ENDPOINT: truenas.${SECRET_DOMAIN}:51515
MINIO_BUCKET: gitea
MINIO_USE_SSL: true
storage:
STORAGE_TYPE: minio
MINIO_ENDPOINT: truenas.${SECRET_DOMAIN}:51515
MINIO_BUCKET: gitea
MINIO_USE_SSL: true
mailer:
ENABLED: true
MAILER_TYPE: smtp
SMTP_ADDR: smtp-relay.default
SMTP_PORT: 2525
FROM: "Gitea <gitea@${SECRET_DOMAIN}>"
webhook:
ALLOWED_HOST_LIST: drone.default.svc
cache:
ADAPTER: redis
HOST: redis://redis-master.default.svc.cluster.local:6379
session:
PROVIDER: redis
PROVIDER_CONFIG: redis://redis-master.default.svc.cluster.local:6379
# openid:
# ENABLE_OPENID_SIGNIN: false
# ENABLE_OPENID_SIGNUP: true
# WHITELISTED_URIS: "auth.${SECRET_CLUSTER_DOMAIN}"
# oauth:
# - name: authelia
# provider: openidConnect
# key: gitea
# secret: "${SECRET_GITEA_OAUTH_CLIENT_SECRET}"
# autoDiscoverUrl: "https://auth.${SECRET_CLUSTER_DOMAIN}/.well-known/openid-configuration"
# groupClaimName: groups
# adminGroup: admins
# restrictedGroup: people
metrics:
enabled: true
serviceMonitor:
enabled: true
postgresql:
enabled: false
postgresql-ha:
enabled: false
memcached:
enabled: false
redis-cluster:
enabled: false
persistence:
enabled: true
existingClaim: "gitea-config"
service:
ssh:
type: LoadBalancer
port: 22
loadBalancerIP: ${CLUSTER_LB_GITEA}
ingress:
enabled: true
className: nginx
annotations:
hajimari.io/icon: mdi:code-json
hosts:
- host: "gitea.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- "gitea.${SECRET_CLUSTER_DOMAIN}"
resources:
requests:
cpu: 15m
memory: 226M
limits:
cpu: 500m
memory: 1Gi
valuesFrom:
- targetPath: gitea.admin.email
kind: Secret
name: gitea-secret
valuesKey: GITEA_ADMIN_EMAIL
- targetPath: gitea.admin.password
kind: Secret
name: gitea-secret
valuesKey: GITEA_ADMIN_PASSWORD
- targetPath: gitea.config.attachment.MINIO_ACCESS_KEY_ID
kind: Secret
name: gitea-secret
valuesKey: GITEA_AWS_S3_ACCESS_KEY
- targetPath: gitea.config.attachment.MINIO_SECRET_ACCESS_KEY
kind: Secret
name: gitea-secret
valuesKey: GITEA_AWS_S3_SECRET_KEY
- targetPath: gitea.config.database.PASSWD
kind: Secret
name: gitea-secret
valuesKey: POSTGRES_PASSWORD
- targetPath: gitea.config.database.USER
kind: Secret
name: gitea-secret
valuesKey: POSTGRES_USERNAME
- targetPath: gitea.config.storage.MINIO_ACCESS_KEY_ID
kind: Secret
name: gitea-secret
valuesKey: GITEA_AWS_S3_ACCESS_KEY
- targetPath: gitea.config.storage.MINIO_SECRET_ACCESS_KEY
kind: Secret
name: gitea-secret
valuesKey: GITEA_AWS_S3_SECRET_KEY
postRenderers:
- kustomize:
patches:
- kind: Deployment
apiVersion: apps/v1
metadata:
name: gitea
spec:
template:
spec:
initContainers:
- name: 01-init-db
image: ghcr.io/auricom/postgres-init:15.4
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:
name: gitea-secret

View File

@@ -0,0 +1,10 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./externalbackup.yaml
- ./externalsecret.yaml
- ./helmrelease.yaml
- ./volsync.yaml
- ./volume.yaml

View File

@@ -0,0 +1,49 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea-restic
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: gitea-restic-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
RESTIC_REPOSITORY: '{{ .REPOSITORY_TEMPLATE }}/gitea'
RESTIC_PASSWORD: '{{ .RESTIC_PASSWORD }}'
AWS_ACCESS_KEY_ID: '{{ .AWS_ACCESS_KEY_ID }}'
AWS_SECRET_ACCESS_KEY: '{{ .AWS_SECRET_ACCESS_KEY }}'
dataFrom:
- extract:
key: volsync-restic-template
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/volsync.backube/replicationsource_v1alpha1.json
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: gitea
namespace: default
spec:
sourcePVC: gitea-config
trigger:
schedule: "0 7 * * *"
restic:
copyMethod: Snapshot
pruneIntervalDays: 7
repository: gitea-restic-secret
cacheCapacity: 10Gi
volumeSnapshotClassName: csi-ceph-blockpool
storageClassName: rook-ceph-block
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
retain:
daily: 7
within: 3d

View File

@@ -0,0 +1,17 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-config
namespace: default
labels:
app.kubernetes.io/name: &name gitea
app.kubernetes.io/instance: *name
snapshot.home.arpa/enabled: "true"
spec:
accessModes:
- ReadWriteOnce
storageClassName: rook-ceph-block
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,22 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-gitea
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/default/gitea/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
dependsOn:
- name: cluster-apps-cloudnative-pg-cluster
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-volsync-app
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,65 @@
# Gitea
## S3 Configuration
1. Create `~/.mc/config.json`
```json
{
"version": "10",
"aliases": {
"minio": {
"url": "https://s3.<domain>",
"accessKey": "<access-key>",
"secretKey": "<secret-key>",
"api": "S3v4",
"path": "auto"
}
}
}
```
2. Create the gitea user and password
```sh
mc admin user add minio gitea <super-secret-password>
```
3. Create the gitea bucket
```sh
mc mb minio/gitea
```
4. Create `gitea-user-policy.json`
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:ListBucket",
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject"
],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::gitea/*", "arn:aws:s3:::gitea"],
"Sid": ""
}
]
}
```
5. Apply the bucket policies
```sh
mc admin policy add minio gitea-private gitea-user-policy.json
```
6. Associate private policy with the user
```sh
mc admin policy set minio gitea-private user=gitea
```

View File

@@ -0,0 +1,74 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app local-path-provisioner
namespace: kube-system
spec:
interval: 30m
chart:
spec:
chart: ./deploy/chart/local-path-provisioner
sourceRef:
kind: GitRepository
name: local-path-provisioner
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
replicaCount: 2
helperImage:
repository: public.ecr.aws/docker/library/busybox
tag: latest
storageClass:
defaultClass: false
nodePathMap:
- node: DEFAULT_PATH_FOR_NON_LISTED_NODES
paths: ["/var/lib/kubernetes/storage"]
# Note: Do not enable Flux variable substitution on this HelmRelease
configmap:
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
chmod 701 ${absolutePath}/..
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kube-system
resources:
- ./helmrelease.yaml

View File

@@ -0,0 +1,16 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-local-path-provisioner
namespace: flux-system
spec:
path: ./kubernetes/apps/kube-system/local-path-provisioner/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,5 @@
addresses:
- address: "0xd14a28667d263efda2033ceb3b466399723c9c9c"
memo: "@Defi_Maestro"
- address: "0xc880e1befe692db8b1c71357130f25630239e6fc"
memo: "@Defi_Maestro2"

View File

@@ -0,0 +1,100 @@
import requests
import psycopg2
import yaml
import os
import json
# Load configuration
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
# Pushover credentials
PUSHOVER_API_URL = "https://api.pushover.net/1/messages.json"
PUSHOVER_API_TOKEN = os.environ["PUSHOVER_API_TOKEN"]
PUSHOVER_USER_KEY = os.environ["PUSHOVER_USER_KEY"]
# PostgreSQL connection
connection = psycopg2.connect(
dbname=os.environ["POSTGRES_DB"],
user=os.environ["POSTGRES_USER"],
password=os.environ["POSTGRES_PASS"],
host=os.environ["POSTGRES_HOST"],
port=os.environ.get("POSTGRES_PORT", "5432"),
)
cursor = connection.cursor()
# Create the database structure
cursor.execute("""
CREATE TABLE IF NOT EXISTS ankr_queries_transactions (
id SERIAL PRIMARY KEY,
address VARCHAR NOT NULL,
tx_hash VARCHAR NOT NULL,
blockchain VARCHAR NOT NULL,
timestamp VARCHAR NOT NULL
);
""")
connection.commit()
# Send notification using Pushover
def send_pushover_notification(title, message):
payload = {
'token': PUSHOVER_API_TOKEN,
'user': PUSHOVER_USER_KEY,
'html': 1,
'title': title,
'message': message
}
response = requests.post(PUSHOVER_API_URL, data=payload)
response.raise_for_status()
# Process new transactions
def process_new_transactions(address, memo):
url = "https://rpc.ankr.com/multichain/?ankr_getTransactionsByAddress"
headers = {"Content-Type": "application/json"}
payload = {
"id": 1,
"jsonrpc": "2.0",
"method": "ankr_getTransactionsByAddress",
"params": {
"address": f"{address}",
"descOrder": True
}
}
response = requests.post(url, headers=headers, data=json.dumps(payload))
if response.status_code != 200:
print(f"Failed to fetch transactions: {response.text}")
return
for tx in response.json()["result"]["transactions"]:
tx_hash = tx['hash']
timestamp = tx['timestamp']
blockchain = tx['blockchain']
cursor.execute("""
SELECT COUNT(*) FROM ankr_queries_transactions WHERE address=%s AND tx_hash=%s AND blockchain=%s;
""", (address, tx_hash, blockchain))
exists = cursor.fetchone()[0]
if not exists:
cursor.execute("""
INSERT INTO ankr_queries_transactions (address, tx_hash, blockchain, timestamp)
VALUES (%s, %s, %s, %s);
""", (address, tx_hash, blockchain, timestamp))
connection.commit()
send_pushover_notification(
f"New Transaction: {memo}",
f"Transaction Hash: <a href=\"http://www.debank.com/profile/{address}/history\">{tx_hash}</a><br>Blockchain: {blockchain}<br>Timestamp: {timestamp}"
)
# Main function
def main():
for entry in config["addresses"]:
address = entry["address"]
memo = entry["memo"]
process_new_transactions(address, memo)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,69 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app pushover-notifier-ankr-queries
namespace: default
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
controller:
type: cronjob
cronjob:
concurrencyPolicy: Forbid
schedule: "*/30 * * * *"
01-init-db:
image: ghcr.io/auricom/postgres-init:15.4
imagePullPolicy: IfNotPresent
envFrom: &envFrom
- secretRef:
name: pushover-notifier-secret
image:
repository: ghcr.io/auricom/python
tag: 1.0.0@sha256:d22581793a6803cabcb283ec1f224fe2bdd98efb5d837ad14c52b8d99c0d8c1e
command:
- python3
- /app/script.py
service:
main:
enabled: false
envFrom: *envFrom
resources:
requests:
cpu: 50m
memory: 250Mi
limits:
memory: 250Mi
persistence:
config:
enabled: true
type: configMap
name: pushover-notifier-ankr-queries-configmap
mountPath: /app/config.yaml
subPath: config.yaml
script:
enabled: true
type: configMap
name: pushover-notifier-ankr-queries-configmap
mountPath: /app/script.py
subPath: script.py

View File

@@ -0,0 +1,14 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./helmrelease.yaml
configMapGenerator:
- name: pushover-notifier-ankr-queries-configmap
files:
- ./config/config.yaml
- ./config/script.py
generatorOptions:
disableNameSuffixHash: true

View File

@@ -0,0 +1,33 @@
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: pushover-notifier-ankr-queries-secret
namespace: default
type: Opaque
stringData:
POSTGRES_DB: ENC[AES256_GCM,data:QTTAnp99RU4DhC3mn9IUaTw=,iv:VP6oHP3N9mG9TboqQ9jbIUlK+CoVqxWXFIus692bw/I=,tag:Y0CAs3yH4OM+rZlmqYJTfg==,type:str]
POSTGRES_USER: ENC[AES256_GCM,data:wtl7bwSp2EMTwUsA8MzhTXQ=,iv:RccrE8s7XNtNwF2z59BD36GEPmbEw6n6xPVPuS+/6oE=,tag:2xaXDK1cR3KXkljdQtHVNQ==,type:str]
POSTGRES_PASS: ENC[AES256_GCM,data:HifiMzAawK0mls6hrE58j2c23lc=,iv:O59tbU+JN4LAfuhLo+4y+AJx7ZrTPWPxPX9QtGLFvYQ=,tag:xtdaVNj6D0Wr/Ven+p8tJg==,type:str]
PUSHOVER_API_TOKEN: ENC[AES256_GCM,data:waPntuH+JjGBr2t9I4U9D/llZC9KW/QyyMUu3EHH,iv:NU6/tbrYRoUSME5ecachU0LDNsz7W31DkEw1S8fSIqw=,tag:YbmZbOOn81+kkGb4Sf2Q2w==,type:str]
PUSHOVER_USER_KEY: ENC[AES256_GCM,data:zgoGVo8k7xjuT0+W5AyAkGtJpmTkplW3wmAWqZrY,iv:8ZYZT1I7EOK2mfvjSY+4RfRHQeczYmxihfDHcjRpUSI=,tag:Vkq+ny1eVmAOHmBiAutuNg==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJaU16anJNV2pBZmxPR3h2
bWREUnpjcTFvd05ZQ2E4VVBDdm1FL2k4WEYwCkdQSStTNWtpdjNkUW51WS9MekdC
VkpTUUFjSjY2a1JMOUtqOVh5M0JRR2sKLS0tIDRmcWpJSEVvaUp4U1lsaTZYZGNw
OGVKWU0zNUZJSFh4aFJxQWFsYm1VeFkKaDeI/hl7z0Qh8t5W39Kxu9ert1dt4xo+
LX+MjpVqxiZNcfwROD4bkWeQSN+VsxoGOOyj4L15BlggNnlg+L7Hww==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2023-03-18T15:36:57Z"
mac: ENC[AES256_GCM,data:L1q6+ngZzlrpCreFyBaOCik7v3JoTrNJekv2gxsIynaMQuFTtHVGx8/+m2UvEmt3Upc8tbN6N3JYIxoske91EI2mEuv3DEJPBmHcWtuQ/eXyd5E0kowqobasdnTJHGSo7ym2I0BsbYM4v4ZJj83Zm9fUigjRP874N/QCbs829/A=,iv:xO/iVXiWzbATJNUvyOLkQMt++rK837n+iygS9aWBKrE=,tag:eLMaq/VvvKM65JRNlxtEng==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -0,0 +1,37 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: pushover-notifier
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: pushover-notifier-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
# App
POSTGRES_DB: &dbName pushover-notifier
POSTGRES_HOST: &dbHost postgres-rw.default.svc.cluster.local
POSTGRES_USER: &dbUser "{{ .POSTGRES_USER }}"
POSTGRES_PASS: &dbPass "{{ .POSTGRES_PASS }}"
PUSHOVER_API_TOKEN: "{{ .PUSHOVER_API_TOKEN }}"
PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}"
# Postgres Init
INIT_POSTGRES_DBNAME: *dbName
INIT_POSTGRES_HOST: *dbHost
INIT_POSTGRES_USER: *dbUser
INIT_POSTGRES_PASS: *dbPass
INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}"
dataFrom:
- extract:
key: cloudnative-pg
- extract:
key: pushover-notifier
- extract:
key: pushover

View File

@@ -0,0 +1,8 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./ankr-queries
- ./externalsecret.yaml

View File

@@ -0,0 +1,21 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-pushover-notifier
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-cloudnative-pg-cluster
- name: cluster-apps-external-secrets-stores
path: ./kubernetes/apps/default/pushover-notifier/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,37 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: semaphore
namespace: default
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: semaphore-secret
creationPolicy: Owner
template:
data:
# Ansible Semaphore
SEMAPHORE_DB_USER: &dbUser "{{ .POSTGRES_USER }}"
SEMAPHORE_DB_PASS: &dbPass "{{ .POSTGRES_PASS }}"
SEMAPHORE_DB_HOST: &dbHost postgres-rw.default.svc.cluster.local
SEMAPHORE_DB_PORT: "5432"
SEMAPHORE_DB: &dbName semaphore
SEMAPHORE_ADMIN: "{{ .username }}"
SEMAPHORE_ADMIN_PASSWORD: "{{ .password }}"
SEMAPHORE_ADMIN_NAME: "{{ .SEMAPHORE_ADMIN_NAME }}"
SEMAPHORE_ACCESS_KEY_ENCRYPTION: "{{ .SEMAPHORE_ACCESS_KEY_ENCRYPTION }}"
# Postgres Init
INIT_POSTGRES_DBNAME: *dbName
INIT_POSTGRES_HOST: *dbHost
INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}"
INIT_POSTGRES_USER: *dbUser
INIT_POSTGRES_PASS: *dbPass
dataFrom:
- extract:
key: cloudnative-pg
- extract:
key: semaphore

View File

@@ -0,0 +1,22 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: semaphore-gatus-ep
namespace: default
labels:
gatus.io/enabled: "true"
data:
config.yaml: |
endpoints:
- name: semaphore
group: internal
url: https://semaphore.${SECRET_CLUSTER_DOMAIN}/auth/login
interval: 1m
client:
insecure: true
conditions:
- "[STATUS] == 200"
alerts:
- type: pushover

View File

@@ -0,0 +1,73 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: semaphore
namespace: default
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
initContainers:
01-init-db:
image: ghcr.io/auricom/postgres-init:15.4
imagePullPolicy: IfNotPresent
envFrom: &envFrom
- secretRef:
name: semaphore-secret
controller:
annotations:
reloader.stakater.com/auto: "true"
image:
repository: ghcr.io/onedr0p/semaphore
tag: v2.9.4
env:
SEMAPHORE_DB_DIALECT: postgres
SEMAPHORE_LDAP_ACTIVATED: "no"
SEMAPHORE_PLAYBOOK_PATH: /tmp/semaphore/
SEMAPHORE_ADMIN_EMAIL: "${SECRET_CLUSTER_DOMAIN_EMAIL}"
envFrom: *envFrom
service:
main:
ports:
http:
port: 3000
ingress:
main:
enabled: true
ingressClassName: nginx
annotations:
hajimari.io/icon: mdi:ansible
hosts:
- host: &host "{{ .Release.Name }}.kube.${SECRET_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
resources:
requests:
cpu: 10m
memory: 50Mi
limits:
memory: 1000Mi

View File

@@ -0,0 +1,9 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./externalsecret.yaml
- ./gatus.yaml
- ./helmrelease.yaml

View File

@@ -0,0 +1,21 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-semaphore
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-cloudnative-pg-app
path: ./kubernetes/apps/default/semaphore/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,38 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app smartctl-exporter
namespace: default
spec:
interval: 30m
chart:
spec:
chart: prometheus-smartctl-exporter
version: 0.6.0
sourceRef:
kind: HelmRepository
name: prometheus-community
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
fullnameOverride: *app
config:
devices:
- /dev/sda
- /dev/nvme0n1
serviceMonitor:
enabled: true
prometheusRules:
enabled: false

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./helmrelease.yaml

View File

@@ -0,0 +1,18 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-smartctl-exporter
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/monitoring/smartctl-exporter/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,9 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./trivy-operator/ks.yaml

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: trivy-system
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

View File

@@ -0,0 +1,41 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: trivy-operator
namespace: trivy-system
spec:
interval: 30m
chart:
spec:
chart: trivy-operator
version: 0.18.1
sourceRef:
kind: HelmRepository
name: aqua
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
crds: CreateReplace
remediation:
retries: 3
upgrade:
cleanupOnFail: true
crds: CreateReplace
remediation:
retries: 3
uninstall:
keepHistory: false
values:
excludeNamespaces: "{{ .Release.Namespace }}"
operator:
replicas: 3
scanJobsConcurrentLimit: 3
vulnerabilityScannerScanOnlyCurrentRevisions: true
configAuditScannerScanOnlyCurrentRevisions: true
trivy:
ignoreUnfixed: true
serviceMonitor:
enabled: true

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: trivy-system
resources:
- ./helmrelease.yaml

View File

@@ -0,0 +1,18 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-trivy-operator
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/trivy-system/trivy-operator/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,84 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app vector-agent
namespace: monitoring
spec:
interval: 30m
chart:
spec:
chart: vector
version: 0.26.0
sourceRef:
kind: HelmRepository
name: vector
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
image:
repository: timberio/vector
tag: 0.33.0-debian
role: "Agent"
podAnnotations:
configmap.reloader.stakater.com/reload: vector-agent
customConfig:
data_dir: /vector-data-dir
api:
enabled: false
# Sources
sources:
kubernetes_logs:
type: kubernetes_logs
talos_kernel_logs:
type: socket
mode: udp
address: 127.0.0.1:12000
talos_service_logs:
type: socket
mode: udp
address: 127.0.0.1:12001
# Sinks
sinks:
kubernetes_sink:
type: vector
inputs:
- kubernetes_logs
address: "vector-aggregator.monitoring:6000"
version: "2"
talos_kernel_sink:
type: vector
inputs:
- talos_kernel_logs
address: "vector-aggregator.monitoring:6050"
version: "2"
talos_service_sink:
type: vector
inputs:
- talos_service_logs
address: "vector-aggregator.monitoring:6051"
version: "2"
podMonitor:
enabled: true
resources:
requests:
cpu: 23m
memory: 249M
limits:
memory: 918M
service:
enabled: false
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./helmrelease.yaml

View File

@@ -0,0 +1,160 @@
data_dir: /vector-data-dir
api:
enabled: true
address: 0.0.0.0:8686
enrichment_tables:
geoip_table:
type: geoip
path: /usr/share/GeoIP/GeoLite2-City.mmdb
# Sources
sources:
kubernetes_source:
address: 0.0.0.0:6000
type: vector
version: "2"
opnsense_logs:
address: 0.0.0.0:6001
type: vector
version: "2"
journald_source:
type: vector
address: 0.0.0.0:6002
version: "2"
vector_metrics:
type: internal_metrics
talos_kernel_logs:
address: 0.0.0.0:6050
type: socket
mode: udp
max_length: 102400
decoding:
codec: json
host_key: __host
talos_service_logs:
address: 0.0.0.0:6051
type: socket
mode: udp
max_length: 102400
decoding:
codec: json
host_key: __host
# Transformations
transforms:
talos_kernel_logs_xform:
type: remap
inputs:
- talos_kernel_logs
source: |-
.__host = replace!(.__host, "192.168.8.101", "talos-node-1")
.__host = replace(.__host, "192.168.8.102", "talos-node-2")
.__host = replace(.__host, "192.168.8.103", "talos-node-3")
.__host = replace(.__host, "192.168.8.104", "talos-node-4")
talos_service_logs_xform:
type: remap
inputs:
- talos_service_logs
source: |-
.__host = replace!(.__host, "192.168.8.101", "talos-node-1")
.__host = replace(.__host, "192.168.8.102", "talos-node-2")
.__host = replace(.__host, "192.168.8.103", "talos-node-3")
.__host = replace(.__host, "192.168.8.104", "talos-node-4")
kubernetes_remap:
type: remap
inputs:
- kubernetes_source
source: |
# Standardize 'app' index
.custom_app_name = .pod_labels."app.kubernetes.io/name" || .pod_labels.app || .pod_labels."k8s-app" || "unknown"
# Sinks
sinks:
loki_kubernetes:
type: loki
inputs:
- kubernetes_source
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
batch:
max_bytes: 2049000
out_of_order_action: rewrite_timestamp
remove_label_fields: true
remove_timestamp: true
labels:
k8s_app: '{{ custom_app_name }}'
k8s_container: '{{ kubernetes.container_name }}'
k8s_filename: '{{ kubernetes.file }}'
k8s_instance: '{{ kubernetes.pod_labels."app.kubernetes.io/instance" }}'
k8s_namespace: '{{ kubernetes.pod_namespace }}'
k8s_node: '{{ kubernetes.pod_node_name }}'
k8s_pod: '{{ kubernetes.pod_name }}'
loki_opnsense:
type: loki
inputs:
- opnsense_logs
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
batch:
max_bytes: 400000
out_of_order_action: rewrite_timestamp
labels:
hostname: '{{ host }}'
syslog_identifier: '{{SYSLOG_IDENTIFIER }}'
loki_journal:
type: loki
inputs:
- journald_source
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
batch:
max_bytes: 2049000
out_of_order_action: accept
remove_label_fields: true
remove_timestamp: true
labels:
hostname: '{{ host }}'
talos_kernel:
type: loki
inputs:
- talos_kernel_logs_xform
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
except_fields:
- __host
batch:
max_bytes: 1048576
out_of_order_action: rewrite_timestamp
labels:
hostname: '{{ __host }}'
service: '{{ facility }}'
talos_service:
type: loki
inputs:
- talos_service_logs_xform
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
except_fields:
- __host
batch:
max_bytes: 524288
out_of_order_action: rewrite_timestamp
labels:
hostname: '{{ __host }}'
service: "talos-service"
namespace: "talos:service"

View File

@@ -0,0 +1,78 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app vector-aggregator
namespace: monitoring
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
controller:
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
image:
repository: docker.io/timberio/vector
tag: 0.33.0-debian
args: ["--config", "/etc/vector/vector.yaml"]
service:
main:
type: LoadBalancer
loadBalancerIP: "${CLUSTER_LB_VECTOR}"
externalTrafficPolicy: Local
ports:
http:
port: 8686
kubernetes-logs:
enabled: true
port: 6000
opnsense-logs:
enabled: true
port: 6001
journald-logs:
enabled: true
port: 6002
talos-kernel:
enabled: true
port: 6050
protocol: UDP
talos-service:
enabled: true
port: 6051
protocol: UDP
persistence:
config:
enabled: true
type: configMap
name: vector-aggregator-configmap
subPath: vector.yaml
mountPath: /etc/vector/vector.yaml
readOnly: true
data:
enabled: true
type: emptyDir
mountPath: /vector-data-dir
geoip:
enabled: true
type: emptyDir
mountPath: /usr/share/GeoIP

View File

@@ -0,0 +1,15 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./secret.sops.yaml
- ./helmrelease.yaml
configMapGenerator:
- files:
- vector.yaml=./config/vector.yaml
name: vector-aggregator-configmap
generatorOptions:
disableNameSuffixHash: true
patches:
- path: ./patches/geoip.yaml

View File

@@ -0,0 +1,25 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: vector-aggregator
namespace: monitoring
spec:
values:
initContainers:
init-geoip:
image: docker.io/maxmindinc/geoipupdate:v6.0
env:
- name: GEOIPUPDATE_EDITION_IDS
value: GeoLite2-City
- name: GEOIPUPDATE_FREQUENCY
value: "0"
- name: GEOIPUPDATE_VERBOSE
value: "true"
envFrom:
- secretRef:
name: vector-aggregator-secret
volumeMounts:
- name: geoip
mountPath: /usr/share/GeoIP

View File

@@ -0,0 +1,30 @@
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: vector-aggregator-secret
namespace: monitoring
type: Opaque
stringData:
GEOIPUPDATE_ACCOUNT_ID: ENC[AES256_GCM,data:vBU+Iwuv,iv:cK005QUa8iKK+2M2OsKvCXJAkUyhUgReDw8hBBhcNLQ=,tag:k3vrqqyMkp8cnGWfeLbu0A==,type:str]
GEOIPUPDATE_LICENSE_KEY: ENC[AES256_GCM,data:XuCipRddaBHI2umUb1+SPA==,iv:gwbTaK5KCmTF+8mQNjkmLkTdSqz2uFAINo6rJ6F2R4U=,tag:cvevnXWf7xFcdMkwKRF4pQ==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlUHFQcWJaRTlGT2RLK3R3
YlJDVTMvRThTR1dXdGN5a1RQd2FxTy84SFdNCnFEWEVpU1o3Y2hISkJrNzBMZFYr
emZyeW9ySnZEYnlvMWFQeXpYeHMzeUkKLS0tIEtPTm9JM0o0ZVBKN05oa0JSbHBL
b2pLSXUyS2lCbmZYYmk0WnVpRU9xRUUKAMUoEprOuR/xgtHZDBmDNTrLEyD9vbeb
dvQZ/7KrgRKVq4Eq3wI254CvajnNs3mACp175DhTsLyX0hBO77FZ2A==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-09-15T03:04:22Z"
mac: ENC[AES256_GCM,data:rDDMbtb8xSULRF6RUSNl+Pw4KIiCXJZ5kQ70U5Ap3oB3Ci6miw0EXAVCZC699iJ2YS8cqhUe6VwRCdVn+1bYxz4Dbjm1/dAvkXNbBruhe6KhwSpF/sx6viVH2238ReG+jHr7l/AXVDYyWCxH7hzHWn2f2hTqncpuvr1uyyhU0kg=,iv:JN6F4XDLypDyw9UX9WnhJu+UZzR/A9IW+8NtP4QXnWU=,tag:s+F3V/DNNlvTjFWgjxefoA==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -0,0 +1,40 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-vector-aggregator
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-loki-app
path: ./kubernetes/apps/monitoring/vector/aggregator
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-vector-agent
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-vector-aggregator
path: ./kubernetes/apps/monitoring/vector/agent
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
interval: 30m
retryInterval: 1m
timeout: 3m