apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: cert-manager namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: chart: cert-manager repoURL: https://charts.jetstack.io targetRevision: 1.11.0 helm: values: | installCRDs: true destination: server: "https://kubernetes.default.svc" namespace: security syncPolicy: automated: prune: true syncOptions: - CreateNamespace=true --- # https://github.com/prometheus-community/helm-charts/issues/1500#issuecomment-1030201685 apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: prom-crds namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: repoURL: https://github.com/prometheus-community/helm-charts.git path: charts/kube-prometheus-stack/crds/ targetRevision: kube-prometheus-stack-45.7.1 directory: recurse: true destination: server: "https://kubernetes.default.svc" namespace: prometheus syncPolicy: automated: prune: true syncOptions: - CreateNamespace=true - Replace=true --- # https://github.com/prometheus-community/helm-charts/issues/1500#issuecomment-1030201685 apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: prometheus-community namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: chart: kube-prometheus-stack repoURL: https://prometheus-community.github.io/helm-charts targetRevision: 45.7.1 helm: parameters: - name: grafana.enabled value: "false" # https://cogarius.medium.com/3-3-complete-guide-to-ci-cd-pipelines-with-drone-io-on-kubernetes-drone-metrics-with-prometheus-c2668e42b03f # "Kubernetes configuration" - name: prometheus.ingress.enabled value: "true" - name: prometheus.ingress.ingressClassName value: traefik - name: prometheus.ingress.hosts[0] value: prometheus.avril # https://github.com/prometheus-operator/prometheus-operator/issues/5197 # Updating CRDs to matching version didn't work - name: prometheus.prometheusSpec.scrapeInterval value: 30s - name: prometheus.prometheusSpec.evaluationInterval value: 30s - name: prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName value: longhorn - name: prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] value: ReadWriteOnce - name: prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage value: 50Gi - name: prometheus-node-exporter.prometheus.monitor.relabelings[0].sourceLabels[0] value: "__meta_kubernetes_pod_node_name" - name: prometheus-node-exporter.prometheus.monitor.relabelings[0].targetLabel value: node_name skipCrds: true destination: server: "https://kubernetes.default.svc" namespace: prometheus syncPolicy: automated: prune: true syncOptions: - CreateNamespace=true --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: grafana namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: chart: grafana repoURL: https://grafana.github.io/helm-charts targetRevision: "6.49.0" helm: values: | image: tag: "9.3.2" ingress: enabled: true hosts: - grafana.avril persistence: enabled: true storageClassName: longhorn accessModes: - ReadWriteMany sidecar: dashboards: enabled: true defaultFolderName: General label: grafana_dashboard labelValue: "1" folderAnnotation: grafana_folder searchNamespace: ALL provider: foldersFromFilesStructure: "true" datasources: datasources.yaml: apiVersion: 1 datasources: - name: Prometheus type: prometheus url: http://prometheus.avril destination: server: "https://kubernetes.default.svc" namespace: grafana syncPolicy: automated: prune: true syncOptions: - CreateNamespace=true --- # https://github.com/dotdc/grafana-dashboards-kubernetes/blob/master/argocd-app.yml apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: grafana-dashboards-kubernetes namespace: argo labels: app.kubernetes.io/name: grafana-dashboards-kubernetes app.kubernetes.io/version: HEAD app.kubernetes.io/managed-by: argocd finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default # You may need to change this! source: path: ./ repoURL: https://github.com/dotdc/grafana-dashboards-kubernetes targetRevision: HEAD destination: server: https://kubernetes.default.svc namespace: monitoring syncPolicy: ## https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync automated: prune: true selfHeal: true syncOptions: - CreateNamespace=true - Replace=true --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: grafana-oncall namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: chart: oncall repoURL: https://grafana.github.io/helm-charts targetRevision: "1.3.1" helm: values: | global: storageClass: longhorn oncall: base_url: oncall.grafana.avril externalGrafana: url: http://grafana.avril fullnameOverride: "oncall-ovrd" # Without this, there are clashes between resources (ServiceAccounts, Secrets) that are similarly named between Oncall itself and the underlying Grafana templates. Also this needs to b e short otherwise the batch jobs created have names that are too long env: - name: FEATURE_TELEGRAM_INTEGRATION_ENABLED value: "true" - name: TELEGRAM_WEBHOOK_HOST valueFrom: secretKeyRef: name: telegram-webhook-host-secret key: url image: tag: "v1.3.1-arm64-linux" cert-manager: enabled: false grafana: enabled: false ingress: enabled: false ingress-nginx: enabled: false rabbitmq: enabled: true image: repository: rabbitmq tag: 3.10.10 auth: username: user password: user extraEnvVars: - name: RABBITMQ_DEFAULT_USER value: user - name: RABBITMQ_DEFAULT_PASS value: user redis: image: repository: arm64v8/redis tag: 7.0 mariadb: enabled: false # Have to use Postgres because the Mysql setup doesn't permit using existingSecret database: type: postgresql postgresql: enabled: true image: repository: arm64v8/postgres tag: 15.1 telegram: enabled: true existingSecret: telegram-auth-secret tokenKey: token webhookUrl: https://oncall-grafana.scubbo.org destination: server: https://kubernetes.default.svc namespace: grafana syncPolicy: ## https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync automated: prune: true selfHeal: true syncOptions: - CreateNamespace=true - Replace=true --- # TODO - use Jsonnet or similar to automate building this from all the directories # (and pull out the common config) apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: jellyfin namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git targetRevision: HEAD path: charts/jellyfin helm: valueFiles: - values.yaml destination: server: "https://kubernetes.default.svc" namespace: jellyfin syncPolicy: automated: prune: true syncOptions: - CreateNamespace=true --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: proton-vpn namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git targetRevision: HEAD path: charts/proton-vpn helm: valueFiles: - values.yaml destination: server: "https://kubernetes.default.svc" namespace: proton-vpn syncPolicy: automated: prune: true syncOptions: - CreateNamespace=true --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: ombi namespace: argo finalizers: - resources-finalizer.argocd.argoproj.io spec: project: default source: repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git targetRevision: HEAD path: charts/ombi helm: valueFiles: - values.yaml destination: server: "https://kubernetes.default.svc" namespace: ombi syncPolicy: automated: prune: true syncOptions: - CreateNamespace=true