new talos cluster

This commit is contained in:
auricom
2022-11-19 04:47:32 +01:00
parent 42346bd99b
commit 4ac38f95e9
548 changed files with 1642 additions and 2331 deletions

View File

@@ -0,0 +1,203 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: rook-ceph-cluster
namespace: rook-ceph
spec:
interval: 15m
chart:
spec:
chart: rook-ceph-cluster
version: v1.10.6
sourceRef:
kind: HelmRepository
name: rook-ceph
namespace: flux-system
install:
createNamespace: true
remediation:
retries: 5
upgrade:
remediation:
retries: 5
dependsOn:
- name: rook-ceph-operator
namespace: rook-ceph
values:
monitoring:
enabled: true
createPrometheusRules: true
ingress:
dashboard:
ingressClassName: "nginx"
host:
name: "rook.${SECRET_CLUSTER_DOMAIN}"
path: "/"
tls:
- hosts:
- "rook.${SECRET_CLUSTER_DOMAIN}"
configOverride: |
[global]
bdev_enable_discard = true
bdev_async_discard = true
cephClusterSpec:
mgr:
count: 1
dashboard:
enabled: true
urlPrefix: /
ssl: false
storage:
useAllNodes: false
useAllDevices: false
config:
osdsPerDevice: "1"
nodes:
- name: "talos-node-2"
devices:
- name: "nvme0n1"
- name: "talos-node-3"
devices:
- name: "nvme0n1"
- name: "talos-node-4"
devices:
- name: "nvme0n1"
resources:
mgr:
requests:
cpu: "125m"
memory: "512Mi"
limits:
memory: "1Gi"
mon:
requests:
cpu: "50m"
memory: "512Mi"
limits:
memory: "1Gi"
osd:
requests:
cpu: "300m"
memory: "512Mi"
limits:
memory: "6Gi"
mgr-sidecar:
requests:
cpu: "50m"
memory: "100Mi"
limits:
memory: "200Mi"
crashcollector:
requests:
cpu: "15m"
memory: "64Mi"
limits:
memory: "128Mi"
logcollector:
requests:
cpu: "100m"
memory: "100Mi"
limits:
memory: "1Gi"
prepareosd:
requests:
cpu: "250m"
memory: "50Mi"
limits:
memory: "2Gi"
cleanup:
requests:
cpu: "250m"
memory: "100Mi"
limits:
memory: "1Gi"
cephBlockPoolsVolumeSnapshotClass:
enabled: false
cephBlockPools:
- name: replicapool
spec:
failureDomain: host
replicated:
size: 3
storageClass:
enabled: true
name: rook-ceph-block
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
parameters:
imageFormat: "2"
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: rook-ceph-filesystem
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
metadataServer:
activeCount: 1
activeStandby: true
resources:
requests:
cpu: "35m"
memory: "64M"
limits:
memory: "600M"
storageClass:
enabled: true
isDefault: false
name: rook-ceph-filesystem
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephObjectStores:
- name: rook-ceph-objectstore
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
erasureCoded:
dataChunks: 2
codingChunks: 1
preservePoolsOnDelete: true
gateway:
port: 80
resources:
requests:
cpu: 100m
memory: 128M
limits:
memory: 2Gi
instances: 1
healthCheck:
bucket:
interval: 60s
storageClass:
enabled: true
name: rook-ceph-bucket
reclaimPolicy: Delete
parameters:
region: us-east-1

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- helm-release.yaml