Jack Jackson 496c2f13b0 Expand (and explicitly specify storageclass of) Vault storage
Due to currently-unknown fault, my Vault storage got full up (I
_suspect_ it's due to not setting a default TTL on Tokens, and so they
all hung around. Surprised they were created at such a rate, but w/e). I
wasn't able to directly expand the volume - and, anyway, it's on
Longhorn which is a Storage Provisioner that I'm moving away from - so
the solution was to:
* Create a temporary PV (on FreeNas, though that doesn't actually
  matter) and copy data onto it (by mounting both it and the existing
  Volume onto a debug pod, using a variant of [this
  script](https://blog.scubbo.org/posts/pvc-debug-pod/))
* Delete the existing PVC and PV
* Make this update, and sync
  * A new _empty_ PV will be created (and probably populated with some
    stuff)
* Scale-down the StatefulSet, do the double-mount-to-debug-pod trick
  again, and copy data from the temporary PV onto this one
* Delete Debug Pod, re-scale-up StatefulSet...and hope that there's
  nothing stateful in the data which means that copying it from one
  volume to another makes it invalid (e.g. if encrypted with an
  encryption key which would change on a new spin-up of the pod - which
  _seems_ unlikely, but 🤷)
2024-06-04 14:07:45 -07:00

400 lines
9.1 KiB
YAML

apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
chart: cert-manager
repoURL: https://charts.jetstack.io
targetRevision: 1.11.0
helm:
values: |
installCRDs: true
destination:
server: "https://kubernetes.default.svc"
namespace: security
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
# https://github.com/prometheus-community/helm-charts/issues/1500#issuecomment-1030201685
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prom-crds
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/prometheus-community/helm-charts.git
path: charts/kube-prometheus-stack/crds/
targetRevision: kube-prometheus-stack-45.7.1
helm:
values: |
tolerations:
- key: architecture
operator: Equal
value: x86
directory:
recurse: true
destination:
server: "https://kubernetes.default.svc"
namespace: prometheus
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
- Replace=true
---
# https://github.com/prometheus-community/helm-charts/issues/1500#issuecomment-1030201685
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-community
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
chart: kube-prometheus-stack
repoURL: https://prometheus-community.github.io/helm-charts
targetRevision: 45.7.1
helm:
parameters:
- name: grafana.enabled
value: "false"
# https://cogarius.medium.com/3-3-complete-guide-to-ci-cd-pipelines-with-drone-io-on-kubernetes-drone-metrics-with-prometheus-c2668e42b03f
# "Kubernetes configuration"
- name: prometheus.ingress.enabled
value: "true"
- name: prometheus.ingress.ingressClassName
value: traefik
- name: prometheus.ingress.hosts[0]
value: prometheus.avril
# https://github.com/prometheus-operator/prometheus-operator/issues/5197
# Updating CRDs to matching version didn't work
- name: prometheus.prometheusSpec.scrapeInterval
value: 30s
- name: prometheus.prometheusSpec.evaluationInterval
value: 30s
- name: prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName
value: longhorn
- name: prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0]
value: ReadWriteOnce
- name: prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage
value: 50Gi
- name: prometheus-node-exporter.prometheus.monitor.relabelings[0].sourceLabels[0]
value: "__meta_kubernetes_pod_node_name"
- name: prometheus-node-exporter.prometheus.monitor.relabelings[0].targetLabel
value: node_name
skipCrds: true
destination:
server: "https://kubernetes.default.svc"
namespace: prometheus
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: grafana
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
chart: grafana
repoURL: https://grafana.github.io/helm-charts
targetRevision: "6.49.0"
helm:
values: |
image:
tag: "10.1.0"
tolerations:
- key: architecture
operator: Equal
value: x86
ingress:
enabled: true
hosts:
- grafana.avril
persistence:
enabled: true
storageClassName: longhorn
accessModes:
- ReadWriteMany
sidecar:
dashboards:
enabled: true
defaultFolderName: General
label: grafana_dashboard
labelValue: "1"
folderAnnotation: grafana_folder
searchNamespace: ALL
provider:
foldersFromFilesStructure: "true"
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus.avril
destination:
server: "https://kubernetes.default.svc"
namespace: grafana
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
# https://github.com/dotdc/grafana-dashboards-kubernetes/blob/master/argocd-app.yml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: grafana-dashboards-kubernetes
namespace: argo
labels:
app.kubernetes.io/name: grafana-dashboards-kubernetes
app.kubernetes.io/version: HEAD
app.kubernetes.io/managed-by: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default # You may need to change this!
source:
path: ./
repoURL: https://github.com/dotdc/grafana-dashboards-kubernetes
targetRevision: HEAD
destination:
server: https://kubernetes.default.svc
namespace: monitoring
syncPolicy:
## https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- Replace=true
---
# TODO - use Jsonnet or similar to automate building this from all the directories
# (and pull out the common config)
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jellyfin
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git
targetRevision: HEAD
path: charts/jellyfin
helm:
valueFiles:
- values.yaml
destination:
server: "https://kubernetes.default.svc"
namespace: jellyfin
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: proton-vpn
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git
targetRevision: HEAD
path: charts/proton-vpn
helm:
valueFiles:
- values.yaml
destination:
server: "https://kubernetes.default.svc"
namespace: proton-vpn
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ombi
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git
targetRevision: HEAD
path: charts/ombi
helm:
valueFiles:
- values.yaml
destination:
server: "https://kubernetes.default.svc"
namespace: ombi
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jackjack-app-of-apps-private
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.scubbo.org/scubbo/private-apps.git
targetRevision: HEAD
path: app-of-apps
destination:
server: "https://kubernetes.default.svc"
namespace: default
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
chart: vault
repoURL: https://helm.releases.hashicorp.com
targetRevision: 0.25.0
helm:
values: |
global:
namespace: "vault"
ui:
enabled: true
serverTelemetry:
serviceMonitor:
enabled: true
server:
ingress:
enabled: true
ingressClassName: traefik
hosts:
- host: vault.avril
paths: []
dataStorage:
size: 20Gi
storageClass: freenas-iscsi-csi
destination:
server: "https://kubernetes.default.svc"
namespace: vault
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: blog
namespace: argo
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.scubbo.org/scubbo/blog-infrastructure.git
targetRevision: HEAD
path: helm
helm:
valueFiles:
- values.yaml
parameters:
- name: targetEnv
value: prod
destination:
server: "https://kubernetes.default.svc"
namespace: blog
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true