⚰️ archive loki + vector + smartctl

This commit is contained in:
auricom
2023-10-15 10:58:54 +02:00
parent ed1f67eb86
commit d5a1fa23da
17 changed files with 0 additions and 3 deletions

View File

@@ -9,7 +9,4 @@ resources:
- ./gatus/ks.yaml
- ./grafana/ks.yaml
- ./kube-prometheus-stack/ks.yaml
# - ./loki/ks.yaml
# - ./smartctl-exporter/ks.yaml
- ./thanos/ks.yaml
# - ./vector/ks.yaml

View File

@@ -1,130 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-alerting-rules
namespace: monitoring
data:
loki-alerting-rules.yaml: |-
groups:
#
# SMART Failures
#
- name: smart-failure
rules:
- alert: SmartFailures
expr: |
sum by (hostname) (count_over_time({hostname=~".+"} | json | _SYSTEMD_UNIT = "smartmontools.service" !~ "(?i)previous self-test completed without error" !~ "(?i)Prefailure" |~ "(?i)(error|fail)"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "SMART has reported failures on host {{ $labels.hostname }}"
#
# zigbee2mqtt
#
- name: zigbee2mqtt
rules:
- alert: ZigbeeUnableToReachMQTT
expr: |
sum(count_over_time({app="zigbee2mqtt"} |~ "(?i)not connected to mqtt server"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "Zigbee2mqtt is unable to reach MQTT"
#
# zwavejs2mqtt
#
- name: zwavejs2mqtt
rules:
- alert: ZwaveUnableToReachMQTT
expr: |
sum(count_over_time({app="zwavejs2mqtt"} |~ "(?i)error while connecting mqtt"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "Zwavejs2mqtt is unable to reach MQTT"
#
# frigate
#
- name: frigate
rules:
- alert: FrigateUnableToReachMQTT
expr: |
sum(count_over_time({app="frigate"} |~ "(?i)unable to connect to mqtt server"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "Frigate is unable to reach MQTT"
#
# *arr
#
- name: arr
rules:
- alert: ArrDatabaseIsLocked
expr: |
sum by (app) (count_over_time({app=~".*arr"} |~ "(?i)database is locked"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "{{ $labels.app }} is experiencing locked database issues"
- alert: ArrDatabaseIsMalformed
expr: |
sum by (app) (count_over_time({app=~".*arr"} |~ "(?i)database disk image is malformed"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "{{ $labels.app }} is experiencing malformed database disk image issues"
#
# home-assistant
#
- name: home-assistant
rules:
- alert: HomeAssistantUnableToReachPostgresql
expr: |
sum by (app) (count_over_time({app="home-assistant"} |~ "(?i)error in database connectivity"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "Home Assistant is unable to connect to postgresql"
#
# valetudo
#
- name: valetudo
rules:
- alert: ValetudoUnableToReachMQTT
expr: |
sum by (hostname) (count_over_time({hostname="valetudo"} |~ "(?i).*error.*mqtt.*"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "Valetudo is unable to connect to mqtt"
#
# node-red
#
- name: node-red
rules:
- alert: NodeRedUnableToReachHomeAssistant
expr: |
sum by (app) (count_over_time({app="node-red"} |~ "(?i)home assistant.*connecting to undefined"[2m])) > 0
for: 2m
labels:
severity: critical
category: logs
annotations:
summary: "Node-Red is unable to connect to Home Assistant"

View File

@@ -1,195 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: loki
namespace: monitoring
spec:
interval: 30m
chart:
spec:
chart: loki
version: 5.27.0
sourceRef:
kind: HelmRepository
name: grafana
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
loki:
structuredConfig:
auth_enabled: false
server:
log_level: info
http_listen_port: 3100
grpc_listen_port: 9095
memberlist:
join_members: ["loki-memberlist"]
limits_config:
retention_period: 14d
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
max_cache_freshness_per_query: 10m
split_queries_by_interval: 15m
ingestion_rate_mb: 8
ingestion_burst_size_mb: 16
schema_config:
configs:
- from: "2021-08-01"
store: boltdb-shipper
object_store: s3
schema: v11
index:
prefix: loki_index_
period: 24h
common:
path_prefix: /var/loki
replication_factor: 3
storage:
s3:
s3: null
insecure: true
s3forcepathstyle: true
ring:
kvstore:
store: memberlist
ruler:
enable_api: true
enable_alertmanager_v2: true
alertmanager_url: http://kube-prometheus-stack-alertmanager:9093
storage:
type: local
local:
directory: /rules
rule_path: /tmp/scratch
ring:
kvstore:
store: memberlist
distributor:
ring:
kvstore:
store: memberlist
compactor:
working_directory: /var/loki/boltdb-shipper-compactor
shared_store: s3
compaction_interval: 10m
retention_enabled: true
retention_delete_delay: 2h
retention_delete_worker_count: 150
ingester:
max_chunk_age: 1h
lifecycler:
ring:
kvstore:
store: memberlist
analytics:
reporting_enabled: false
gateway:
enabled: true
replicas: 3
affinity: |
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchLabels:
{{- include "loki.gatewaySelectorLabels" . | nindent 12 }}
topologyKey: kubernetes.io/hostname
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
hajimari.io/enable: "false"
hosts:
- host: &host "loki.${SECRET_CLUSTER_DOMAIN}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
write:
replicas: 3
affinity: |
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchLabels:
{{- include "loki.writeSelectorLabels" . | nindent 12 }}
topologyKey: kubernetes.io/hostname
persistence:
size: 10Gi
storageClass: rook-ceph-block
read:
replicas: 3
affinity: |
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchLabels:
{{- include "loki.readSelectorLabels" . | nindent 12 }}
topologyKey: kubernetes.io/hostname
extraVolumeMounts:
- name: loki-rules
mountPath: /rules/fake
- name: loki-rules-tmp
mountPath: /tmp/scratch
- name: loki-tmp
mountPath: /tmp/loki-tmp
extraVolumes:
- name: loki-rules
emptyDir: {}
- name: loki-rules-tmp
emptyDir: {}
- name: loki-tmp
emptyDir: {}
persistence:
size: 10Gi
storageClass: rook-ceph-block
monitoring:
serviceMonitor:
enabled: false
metricsInstance:
enabled: false
selfMonitoring:
enabled: false
grafanaAgent:
installOperator: false
lokiCanary:
enabled: false
test:
enabled: false
valuesFrom:
- kind: ConfigMap
name: loki-chunks-bucket
valuesKey: BUCKET_NAME
targetPath: loki.structuredConfig.common.storage.s3.bucketnames
- kind: ConfigMap
name: loki-chunks-bucket
valuesKey: BUCKET_HOST
targetPath: loki.structuredConfig.common.storage.s3.endpoint
- kind: Secret
name: loki-chunks-bucket
valuesKey: AWS_ACCESS_KEY_ID
targetPath: loki.structuredConfig.common.storage.s3.access_key_id
- kind: Secret
name: loki-chunks-bucket
valuesKey: AWS_SECRET_ACCESS_KEY
targetPath: loki.structuredConfig.common.storage.s3.secret_access_key

View File

@@ -1,9 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./object-bucket-claim.yaml
- ./config-map.yaml
- ./helmrelease.yaml

View File

@@ -1,11 +0,0 @@
---
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: loki-chunks-bucket
namespace: monitoring
spec:
bucketName: loki-chunks
storageClassName: rook-ceph-bucket
additionalConfig:
maxSize: "50G"

View File

@@ -1,25 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-loki-app
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-rook-ceph-cluster
path: ./kubernetes/apps/monitoring/loki/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: loki
namespace: monitoring
interval: 30m
retryInterval: 1m
timeout: 5m

View File

@@ -1,38 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app smartctl-exporter
namespace: default
spec:
interval: 30m
chart:
spec:
chart: prometheus-smartctl-exporter
version: 0.6.0
sourceRef:
kind: HelmRepository
name: prometheus-community
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
fullnameOverride: *app
config:
devices:
- /dev/sda
- /dev/nvme0n1
serviceMonitor:
enabled: true
prometheusRules:
enabled: false

View File

@@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./helmrelease.yaml

View File

@@ -1,23 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-smartctl-exporter
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/monitoring/smartctl-exporter/app
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: smartctl-exporter
namespace: monitoring
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -1,84 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app vector-agent
namespace: monitoring
spec:
interval: 30m
chart:
spec:
chart: vector
version: 0.26.0
sourceRef:
kind: HelmRepository
name: vector
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
image:
repository: timberio/vector
tag: 0.33.0-debian
role: "Agent"
podAnnotations:
configmap.reloader.stakater.com/reload: vector-agent
customConfig:
data_dir: /vector-data-dir
api:
enabled: false
# Sources
sources:
kubernetes_logs:
type: kubernetes_logs
talos_kernel_logs:
type: socket
mode: udp
address: 127.0.0.1:12000
talos_service_logs:
type: socket
mode: udp
address: 127.0.0.1:12001
# Sinks
sinks:
kubernetes_sink:
type: vector
inputs:
- kubernetes_logs
address: "vector-aggregator.monitoring:6000"
version: "2"
talos_kernel_sink:
type: vector
inputs:
- talos_kernel_logs
address: "vector-aggregator.monitoring:6050"
version: "2"
talos_service_sink:
type: vector
inputs:
- talos_service_logs
address: "vector-aggregator.monitoring:6051"
version: "2"
podMonitor:
enabled: true
resources:
requests:
cpu: 23m
memory: 249M
limits:
memory: 918M
service:
enabled: false
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule

View File

@@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./helmrelease.yaml

View File

@@ -1,160 +0,0 @@
data_dir: /vector-data-dir
api:
enabled: true
address: 0.0.0.0:8686
enrichment_tables:
geoip_table:
type: geoip
path: /usr/share/GeoIP/GeoLite2-City.mmdb
# Sources
sources:
kubernetes_source:
address: 0.0.0.0:6000
type: vector
version: "2"
opnsense_logs:
address: 0.0.0.0:6001
type: vector
version: "2"
journald_source:
type: vector
address: 0.0.0.0:6002
version: "2"
vector_metrics:
type: internal_metrics
talos_kernel_logs:
address: 0.0.0.0:6050
type: socket
mode: udp
max_length: 102400
decoding:
codec: json
host_key: __host
talos_service_logs:
address: 0.0.0.0:6051
type: socket
mode: udp
max_length: 102400
decoding:
codec: json
host_key: __host
# Transformations
transforms:
talos_kernel_logs_xform:
type: remap
inputs:
- talos_kernel_logs
source: |-
.__host = replace!(.__host, "192.168.8.101", "talos-node-1")
.__host = replace(.__host, "192.168.8.102", "talos-node-2")
.__host = replace(.__host, "192.168.8.103", "talos-node-3")
.__host = replace(.__host, "192.168.8.104", "talos-node-4")
talos_service_logs_xform:
type: remap
inputs:
- talos_service_logs
source: |-
.__host = replace!(.__host, "192.168.8.101", "talos-node-1")
.__host = replace(.__host, "192.168.8.102", "talos-node-2")
.__host = replace(.__host, "192.168.8.103", "talos-node-3")
.__host = replace(.__host, "192.168.8.104", "talos-node-4")
kubernetes_remap:
type: remap
inputs:
- kubernetes_source
source: |
# Standardize 'app' index
.custom_app_name = .pod_labels."app.kubernetes.io/name" || .pod_labels.app || .pod_labels."k8s-app" || "unknown"
# Sinks
sinks:
loki_kubernetes:
type: loki
inputs:
- kubernetes_source
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
batch:
max_bytes: 2049000
out_of_order_action: rewrite_timestamp
remove_label_fields: true
remove_timestamp: true
labels:
k8s_app: '{{ custom_app_name }}'
k8s_container: '{{ kubernetes.container_name }}'
k8s_filename: '{{ kubernetes.file }}'
k8s_instance: '{{ kubernetes.pod_labels."app.kubernetes.io/instance" }}'
k8s_namespace: '{{ kubernetes.pod_namespace }}'
k8s_node: '{{ kubernetes.pod_node_name }}'
k8s_pod: '{{ kubernetes.pod_name }}'
loki_opnsense:
type: loki
inputs:
- opnsense_logs
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
batch:
max_bytes: 400000
out_of_order_action: rewrite_timestamp
labels:
hostname: '{{ host }}'
syslog_identifier: '{{SYSLOG_IDENTIFIER }}'
loki_journal:
type: loki
inputs:
- journald_source
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
batch:
max_bytes: 2049000
out_of_order_action: accept
remove_label_fields: true
remove_timestamp: true
labels:
hostname: '{{ host }}'
talos_kernel:
type: loki
inputs:
- talos_kernel_logs_xform
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
except_fields:
- __host
batch:
max_bytes: 1048576
out_of_order_action: rewrite_timestamp
labels:
hostname: '{{ __host }}'
service: '{{ facility }}'
talos_service:
type: loki
inputs:
- talos_service_logs_xform
endpoint: http://loki-gateway.monitoring.svc.cluster.local:80
encoding:
codec: json
except_fields:
- __host
batch:
max_bytes: 524288
out_of_order_action: rewrite_timestamp
labels:
hostname: '{{ __host }}'
service: "talos-service"
namespace: "talos:service"

View File

@@ -1,78 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app vector-aggregator
namespace: monitoring
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
controller:
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
image:
repository: docker.io/timberio/vector
tag: 0.33.0-debian
args: ["--config", "/etc/vector/vector.yaml"]
service:
main:
type: LoadBalancer
loadBalancerIP: "${CLUSTER_LB_VECTOR}"
externalTrafficPolicy: Local
ports:
http:
port: 8686
kubernetes-logs:
enabled: true
port: 6000
opnsense-logs:
enabled: true
port: 6001
journald-logs:
enabled: true
port: 6002
talos-kernel:
enabled: true
port: 6050
protocol: UDP
talos-service:
enabled: true
port: 6051
protocol: UDP
persistence:
config:
enabled: true
type: configMap
name: vector-aggregator-configmap
subPath: vector.yaml
mountPath: /etc/vector/vector.yaml
readOnly: true
data:
enabled: true
type: emptyDir
mountPath: /vector-data-dir
geoip:
enabled: true
type: emptyDir
mountPath: /usr/share/GeoIP

View File

@@ -1,15 +0,0 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./secret.sops.yaml
- ./helmrelease.yaml
configMapGenerator:
- files:
- vector.yaml=./config/vector.yaml
name: vector-aggregator-configmap
generatorOptions:
disableNameSuffixHash: true
patches:
- path: ./patches/geoip.yaml

View File

@@ -1,25 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: vector-aggregator
namespace: monitoring
spec:
values:
initContainers:
init-geoip:
image: docker.io/maxmindinc/geoipupdate:v6.0
env:
- name: GEOIPUPDATE_EDITION_IDS
value: GeoLite2-City
- name: GEOIPUPDATE_FREQUENCY
value: "0"
- name: GEOIPUPDATE_VERBOSE
value: "true"
envFrom:
- secretRef:
name: vector-aggregator-secret
volumeMounts:
- name: geoip
mountPath: /usr/share/GeoIP

View File

@@ -1,30 +0,0 @@
# yamllint disable
apiVersion: v1
kind: Secret
metadata:
name: vector-aggregator-secret
namespace: monitoring
type: Opaque
stringData:
GEOIPUPDATE_ACCOUNT_ID: ENC[AES256_GCM,data:vBU+Iwuv,iv:cK005QUa8iKK+2M2OsKvCXJAkUyhUgReDw8hBBhcNLQ=,tag:k3vrqqyMkp8cnGWfeLbu0A==,type:str]
GEOIPUPDATE_LICENSE_KEY: ENC[AES256_GCM,data:XuCipRddaBHI2umUb1+SPA==,iv:gwbTaK5KCmTF+8mQNjkmLkTdSqz2uFAINo6rJ6F2R4U=,tag:cvevnXWf7xFcdMkwKRF4pQ==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlUHFQcWJaRTlGT2RLK3R3
YlJDVTMvRThTR1dXdGN5a1RQd2FxTy84SFdNCnFEWEVpU1o3Y2hISkJrNzBMZFYr
emZyeW9ySnZEYnlvMWFQeXpYeHMzeUkKLS0tIEtPTm9JM0o0ZVBKN05oa0JSbHBL
b2pLSXUyS2lCbmZYYmk0WnVpRU9xRUUKAMUoEprOuR/xgtHZDBmDNTrLEyD9vbeb
dvQZ/7KrgRKVq4Eq3wI254CvajnNs3mACp175DhTsLyX0hBO77FZ2A==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2022-09-15T03:04:22Z"
mac: ENC[AES256_GCM,data:rDDMbtb8xSULRF6RUSNl+Pw4KIiCXJZ5kQ70U5Ap3oB3Ci6miw0EXAVCZC699iJ2YS8cqhUe6VwRCdVn+1bYxz4Dbjm1/dAvkXNbBruhe6KhwSpF/sx6viVH2238ReG+jHr7l/AXVDYyWCxH7hzHWn2f2hTqncpuvr1uyyhU0kg=,iv:JN6F4XDLypDyw9UX9WnhJu+UZzR/A9IW+8NtP4QXnWU=,tag:s+F3V/DNNlvTjFWgjxefoA==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -1,50 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-vector-aggregator
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-loki-app
path: ./kubernetes/apps/monitoring/vector/aggregator
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: vector-aggregator
namespace: monitoring
interval: 30m
retryInterval: 1m
timeout: 3m
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-vector-agent
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
dependsOn:
- name: cluster-apps-vector-aggregator
path: ./kubernetes/apps/monitoring/vector/agent
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: vector-agent
namespace: monitoring
interval: 30m
retryInterval: 1m
timeout: 3m