mirror of
https://github.com/auricom/home-cluster.git
synced 2025-10-03 01:00:54 +02:00
rook-ceph
This commit is contained in:
@@ -0,0 +1,11 @@
|
||||
---
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephBlockPool
|
||||
metadata:
|
||||
name: replicapool
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 3
|
||||
requireSafeReplicaSize: true
|
@@ -0,0 +1,71 @@
|
||||
---
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephCluster
|
||||
metadata:
|
||||
name: rook-ceph
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
cephVersion:
|
||||
image: ceph/ceph:v15.2.10
|
||||
allowUnsupported: false
|
||||
dataDirHostPath: /var/lib/rook
|
||||
skipUpgradeChecks: false
|
||||
continueUpgradeAfterChecksEvenIfNotHealthy: false
|
||||
removeOSDsIfOutAndSafeToRemove: false
|
||||
mon:
|
||||
count: 3
|
||||
allowMultiplePerNode: false
|
||||
monitoring:
|
||||
enabled: true
|
||||
rulesNamespace: rook-ceph
|
||||
network:
|
||||
crashCollector:
|
||||
disable: false
|
||||
cleanupPolicy:
|
||||
confirmation: ""
|
||||
sanitizeDisks:
|
||||
method: quick
|
||||
dataSource: zero
|
||||
iteration: 1
|
||||
mgr:
|
||||
modules:
|
||||
- name: pg_autoscaler
|
||||
enabled: true
|
||||
dashboard:
|
||||
enabled: true
|
||||
port: 7000
|
||||
ssl: false
|
||||
disruptionManagement:
|
||||
managePodBudgets: false
|
||||
osdMaintenanceTimeout: 30
|
||||
manageMachineDisruptionBudgets: false
|
||||
machineDisruptionBudgetNamespace: openshift-machine-api
|
||||
resources:
|
||||
mon:
|
||||
requests:
|
||||
cpu: 35m
|
||||
memory: 800Mi
|
||||
limits:
|
||||
memory: 1024Mi
|
||||
osd:
|
||||
requests:
|
||||
cpu: 35m
|
||||
memory: 2048Mi
|
||||
limits:
|
||||
memory: 4096Mi
|
||||
storage:
|
||||
useAllNodes: false
|
||||
useAllDevices: false
|
||||
config:
|
||||
metadataDevice:
|
||||
osdsPerDevice: "1"
|
||||
nodes:
|
||||
- name: "k3s-worker1"
|
||||
devices:
|
||||
- name: "nvme0n1"
|
||||
- name: "k3s-worker2"
|
||||
devices:
|
||||
- name: "nvme0n1"
|
||||
- name: "k3s-worker3"
|
||||
devices:
|
||||
- name: "nvme0n1"
|
@@ -0,0 +1,7 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- cephblockpool.yaml
|
||||
- cephcluster.yaml
|
||||
- storageclass.yaml
|
||||
- volumesnapshotclass.yaml
|
@@ -0,0 +1,22 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: rook-ceph-block
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: rook-ceph.rbd.csi.ceph.com
|
||||
parameters:
|
||||
clusterID: rook-ceph
|
||||
pool: replicapool
|
||||
imageFormat: "2"
|
||||
imageFeatures: layering
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/fstype: ext4
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
@@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: csi-rbdplugin-snapclass
|
||||
annotations:
|
||||
k10.kasten.io/is-snapshot-class: "true"
|
||||
driver: rook-ceph.rbd.csi.ceph.com
|
||||
parameters:
|
||||
# Specify a string that identifies your cluster. Ceph CSI supports any
|
||||
# unique string. When Ceph CSI is deployed by Rook use the Rook namespace,
|
||||
# for example "rook-ceph".
|
||||
clusterID: rook-ceph
|
||||
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner
|
||||
csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
|
||||
deletionPolicy: Delete
|
Reference in New Issue
Block a user