Create Ceph cluster
This commit is contained in:
parent
0534e973de
commit
3b10ad2abd
@ -448,28 +448,32 @@ spec:
|
|||||||
prune: true
|
prune: true
|
||||||
syncOptions:
|
syncOptions:
|
||||||
- CreateNamespace=true
|
- CreateNamespace=true
|
||||||
# ---
|
---
|
||||||
# apiVersion: argoproj.io/v1alpha1
|
apiVersion: argoproj.io/v1alpha1
|
||||||
# kind: Application
|
kind: Application
|
||||||
# metadata:
|
metadata:
|
||||||
# name: ceph
|
name: ceph
|
||||||
# namespace: argo
|
namespace: argo
|
||||||
# finalizers:
|
finalizers:
|
||||||
# - resources-finalizer.argocd.argoproj.io
|
- resources-finalizer.argocd.argoproj.io
|
||||||
# spec:
|
spec:
|
||||||
# project: default
|
project: default
|
||||||
|
|
||||||
# source:
|
source:
|
||||||
# repoURL: https://charts.rook.io/release
|
repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git
|
||||||
# targetRevision: "1.12.0"
|
targetRevision: HEAD
|
||||||
# chart: rook-ceph-cluster
|
path: charts/ceph
|
||||||
|
|
||||||
# destination:
|
helm:
|
||||||
# server: "https://kubernetes.default.svc"
|
valueFiles:
|
||||||
# namespace: rook-ceph
|
- values.yaml
|
||||||
|
|
||||||
# syncPolicy:
|
destination:
|
||||||
# automated:
|
server: "https://kubernetes.default.svc"
|
||||||
# prune: true
|
namespace: rook-ceph
|
||||||
# syncOptions:
|
|
||||||
# - CreateNamespace=true
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
||||||
|
14
charts/ceph/Chart.yaml
Normal file
14
charts/ceph/Chart.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: rook-ceph-cluster
|
||||||
|
description: A Ceph cluster managed by Rook
|
||||||
|
|
||||||
|
type: application
|
||||||
|
version: 0.0.1
|
||||||
|
appVersion: 0.0.1
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
# https://github.com/k8s-at-home/charts/tree/master/charts/stable/pod-gateway
|
||||||
|
# https://github.com/k8s-at-home/charts/commit/bc8aee9648feb02fbe03246026e799cd1bd50ae5
|
||||||
|
- name: rook-ceph-cluster
|
||||||
|
version: "1.12.0"
|
||||||
|
repository: https://charts.rook.io/release
|
1
charts/ceph/README.md
Normal file
1
charts/ceph/README.md
Normal file
@ -0,0 +1 @@
|
|||||||
|
Creating this as a separate directory, rather than defining directly in `app-of-apps/templates/apps.yaml`, because this method allows the creation of a separate `values.yaml` (which I've adapted from [here](https://github.com/rook/rook/blob/cb44c7b88fb346f3b120e8bda769ac808ec60880/deploy/examples/cluster-on-pvc.yaml) based on advice [here](https://rook.io/docs/rook/latest/Helm-Charts/ceph-cluster-chart/#ceph-cluster-spec))
|
209
charts/ceph/values.yaml
Normal file
209
charts/ceph/values.yaml
Normal file
@ -0,0 +1,209 @@
|
|||||||
|
# Adapted from https://github.com/rook/rook/blob/cb44c7b88fb346f3b120e8bda769ac808ec60880/deploy/examples/cluster-on-pvc.yaml
|
||||||
|
# to be used in Helm installation
|
||||||
|
# (Initially, only change is to `dataDirHostPath`)
|
||||||
|
rook-ceph-cluster:
|
||||||
|
cephClusterSpec:
|
||||||
|
dataDirHostPath: /mnt/HDD/ceph
|
||||||
|
mon:
|
||||||
|
# Set the number of mons to be started. Generally recommended to be 3.
|
||||||
|
# For highest availability, an odd number of mons should be specified.
|
||||||
|
count: 3
|
||||||
|
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
|
||||||
|
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
|
||||||
|
allowMultiplePerNode: false
|
||||||
|
# A volume claim template can be specified in which case new monitors (and
|
||||||
|
# monitors created during fail over) will construct a PVC based on the
|
||||||
|
# template for the monitor's primary storage. Changes to the template do not
|
||||||
|
# affect existing monitors. Log data is stored on the HostPath under
|
||||||
|
# dataDirHostPath. If no storage requirement is specified, a default storage
|
||||||
|
# size appropriate for monitor data will be used.
|
||||||
|
volumeClaimTemplate:
|
||||||
|
spec:
|
||||||
|
storageClassName: gp2
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
|
cephVersion:
|
||||||
|
image: quay.io/ceph/ceph:v17.2.6
|
||||||
|
allowUnsupported: false
|
||||||
|
skipUpgradeChecks: false
|
||||||
|
continueUpgradeAfterChecksEvenIfNotHealthy: false
|
||||||
|
mgr:
|
||||||
|
count: 1
|
||||||
|
modules:
|
||||||
|
- name: pg_autoscaler
|
||||||
|
enabled: true
|
||||||
|
dashboard:
|
||||||
|
enabled: true
|
||||||
|
ssl: true
|
||||||
|
crashCollector:
|
||||||
|
disable: false
|
||||||
|
logCollector:
|
||||||
|
enabled: true
|
||||||
|
periodicity: daily # one of: hourly, daily, weekly, monthly
|
||||||
|
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
|
||||||
|
storage:
|
||||||
|
storageClassDeviceSets:
|
||||||
|
- name: set1
|
||||||
|
# The number of OSDs to create from this device set
|
||||||
|
count: 3
|
||||||
|
# IMPORTANT: If volumes specified by the storageClassName are not portable across nodes
|
||||||
|
# this needs to be set to false. For example, if using the local storage provisioner
|
||||||
|
# this should be false.
|
||||||
|
portable: true
|
||||||
|
# Certain storage class in the Cloud are slow
|
||||||
|
# Rook can configure the OSD running on PVC to accommodate that by tuning some of the Ceph internal
|
||||||
|
# Currently, "gp2" has been identified as such
|
||||||
|
tuneDeviceClass: true
|
||||||
|
# Certain storage class in the Cloud are fast
|
||||||
|
# Rook can configure the OSD running on PVC to accommodate that by tuning some of the Ceph internal
|
||||||
|
# Currently, "managed-premium" has been identified as such
|
||||||
|
tuneFastDeviceClass: false
|
||||||
|
# whether to encrypt the deviceSet or not
|
||||||
|
encrypted: false
|
||||||
|
# Since the OSDs could end up on any node, an effort needs to be made to spread the OSDs
|
||||||
|
# across nodes as much as possible. Unfortunately the pod anti-affinity breaks down
|
||||||
|
# as soon as you have more than one OSD per node. The topology spread constraints will
|
||||||
|
# give us an even spread on K8s 1.18 or newer.
|
||||||
|
placement:
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 1
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
whenUnsatisfiable: ScheduleAnyway
|
||||||
|
labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rook-ceph-osd
|
||||||
|
preparePlacement:
|
||||||
|
podAntiAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rook-ceph-osd
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rook-ceph-osd-prepare
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 1
|
||||||
|
# IMPORTANT: If you don't have zone labels, change this to another key such as kubernetes.io/hostname
|
||||||
|
topologyKey: topology.kubernetes.io/zone
|
||||||
|
whenUnsatisfiable: DoNotSchedule
|
||||||
|
labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rook-ceph-osd-prepare
|
||||||
|
resources:
|
||||||
|
# These are the OSD daemon limits. For OSD prepare limits, see the separate section below for "prepareosd" resources
|
||||||
|
# limits:
|
||||||
|
# cpu: "500m"
|
||||||
|
# memory: "4Gi"
|
||||||
|
# requests:
|
||||||
|
# cpu: "500m"
|
||||||
|
# memory: "4Gi"
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: data
|
||||||
|
# if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph
|
||||||
|
# annotations:
|
||||||
|
# crushDeviceClass: hybrid
|
||||||
|
spec:
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
|
# IMPORTANT: Change the storage class depending on your environment
|
||||||
|
storageClassName: gp2
|
||||||
|
volumeMode: Block
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
# dedicated block device to store bluestore database (block.db)
|
||||||
|
# - metadata:
|
||||||
|
# name: metadata
|
||||||
|
# spec:
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing
|
||||||
|
# storage: 5Gi
|
||||||
|
# # IMPORTANT: Change the storage class depending on your environment
|
||||||
|
# storageClassName: io1
|
||||||
|
# volumeMode: Block
|
||||||
|
# accessModes:
|
||||||
|
# - ReadWriteOnce
|
||||||
|
# dedicated block device to store bluestore wal (block.wal)
|
||||||
|
# - metadata:
|
||||||
|
# name: wal
|
||||||
|
# spec:
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing
|
||||||
|
# storage: 5Gi
|
||||||
|
# # IMPORTANT: Change the storage class depending on your environment
|
||||||
|
# storageClassName: io1
|
||||||
|
# volumeMode: Block
|
||||||
|
# accessModes:
|
||||||
|
# - ReadWriteOnce
|
||||||
|
# Scheduler name for OSD pod placement
|
||||||
|
# schedulerName: osd-scheduler
|
||||||
|
# when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement.
|
||||||
|
onlyApplyOSDPlacement: false
|
||||||
|
resources:
|
||||||
|
# prepareosd:
|
||||||
|
# limits:
|
||||||
|
# cpu: "200m"
|
||||||
|
# memory: "200Mi"
|
||||||
|
# requests:
|
||||||
|
# cpu: "200m"
|
||||||
|
# memory: "200Mi"
|
||||||
|
priorityClassNames:
|
||||||
|
# If there are multiple nodes available in a failure domain (e.g. zones), the
|
||||||
|
# mons and osds can be portable and set the system-cluster-critical priority class.
|
||||||
|
mon: system-node-critical
|
||||||
|
osd: system-node-critical
|
||||||
|
mgr: system-cluster-critical
|
||||||
|
disruptionManagement:
|
||||||
|
managePodBudgets: true
|
||||||
|
osdMaintenanceTimeout: 30
|
||||||
|
pgHealthCheckTimeout: 0
|
||||||
|
# security oriented settings
|
||||||
|
# security:
|
||||||
|
# Settings to enable key rotation for KEK(Key Encryption Key).
|
||||||
|
# Currently, this is supported only for the default encryption type,
|
||||||
|
# using kubernetes secrets.
|
||||||
|
# keyRotation:
|
||||||
|
# enabled: true
|
||||||
|
# # The schedule, written in [cron format](https://en.wikipedia.org/wiki/Cron),
|
||||||
|
# # with which key rotation [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/)
|
||||||
|
# # is created. The default value is `"@weekly"`.
|
||||||
|
# schedule: "@monthly"
|
||||||
|
# To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file
|
||||||
|
# kms:
|
||||||
|
# # name of the config map containing all the kms connection details
|
||||||
|
# connectionDetails:
|
||||||
|
# KMS_PROVIDER: "vault"
|
||||||
|
# VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: https://vault.my-domain.com:8200
|
||||||
|
# VAULT_BACKEND_PATH: "rook"
|
||||||
|
# VAULT_SECRET_ENGINE: "kv"
|
||||||
|
# # name of the secret containing the kms authentication token
|
||||||
|
# tokenSecretName: rook-vault-token
|
||||||
|
# UNCOMMENT THIS TO ENABLE A KMS CONNECTION
|
||||||
|
# Also, do not forget to replace both:
|
||||||
|
# * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use
|
||||||
|
# * VAULT_ADDR_CHANGE_ME: with the Vault address
|
||||||
|
# ---
|
||||||
|
# apiVersion: v1
|
||||||
|
# kind: Secret
|
||||||
|
# metadata:
|
||||||
|
# name: rook-vault-token
|
||||||
|
# namespace: rook-ceph # namespace:cluster
|
||||||
|
# data:
|
||||||
|
# token: ROOK_TOKEN_CHANGE_ME
|
Loading…
x
Reference in New Issue
Block a user