Compare commits
170 Commits
testing-ci
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
1e767ec1eb | ||
![]() |
6aba9bf11b | ||
![]() |
f49906b12f | ||
![]() |
9c504e5145 | ||
![]() |
a225b0130a | ||
![]() |
5e1bf66aeb | ||
![]() |
d379cafc7b | ||
![]() |
b6ce1b3a24 | ||
![]() |
4c29e3f62e | ||
![]() |
441b4c5e3c | ||
![]() |
4d743a87bd | ||
![]() |
0946871712 | ||
![]() |
a90cc33d1c | ||
![]() |
d8cad832ba | ||
![]() |
fb7e8cd98e | ||
![]() |
5e08c653a3 | ||
![]() |
6925418684 | ||
![]() |
ddd9be2280 | ||
![]() |
4710c36228 | ||
![]() |
dcb62c838d | ||
![]() |
cbc77be2a3 | ||
![]() |
a5f24642ae | ||
![]() |
668e1c01bb | ||
![]() |
6dbc94cec0 | ||
![]() |
37704b2433 | ||
![]() |
19c0577655 | ||
![]() |
1dd75693cb | ||
![]() |
e9145df641 | ||
![]() |
807785daca | ||
![]() |
60417775be | ||
![]() |
1b617368b8 | ||
![]() |
b5eee54ac3 | ||
![]() |
46d6ee105f | ||
![]() |
42e40bf23e | ||
![]() |
73accc5b7b | ||
![]() |
ab2d7a3c30 | ||
![]() |
78302757cd | ||
![]() |
f5cbefc00e | ||
![]() |
843252d917 | ||
![]() |
0244e53970 | ||
![]() |
144c55c2b1 | ||
![]() |
489ad4b726 | ||
![]() |
34e6f91ba0 | ||
![]() |
a64a420e94 | ||
![]() |
facac2a99f | ||
![]() |
2fd086fa34 | ||
![]() |
0671898319 | ||
![]() |
492bf8e10d | ||
![]() |
f71cbf8c50 | ||
![]() |
8ee06464a7 | ||
![]() |
cb8d11ec1a | ||
![]() |
d204131de3 | ||
![]() |
378046ac62 | ||
![]() |
6004858c85 | ||
![]() |
19089def9b | ||
![]() |
1ae48be3ea | ||
![]() |
46c20001ca | ||
![]() |
322db77194 | ||
![]() |
7e6c394929 | ||
![]() |
be10ebe8a4 | ||
![]() |
93dd5c424f | ||
![]() |
e879b0ba05 | ||
![]() |
89511e3747 | ||
![]() |
864b8189e3 | ||
![]() |
2ff2c4224c | ||
![]() |
8d70bbe78b | ||
![]() |
4cc1c531e2 | ||
![]() |
2d1fd9ef0c | ||
![]() |
496c2f13b0 | ||
![]() |
e798564692 | ||
![]() |
bcb2bd28d7 | ||
![]() |
4c82c014f8 | ||
![]() |
1926560274 | ||
![]() |
b856fd2bc5 | ||
![]() |
3140ea8b0d | ||
![]() |
185af7901a | ||
![]() |
b4c9947e4c | ||
![]() |
6d338157fa | ||
![]() |
abc71fd7f1 | ||
![]() |
40427c0426 | ||
![]() |
a98d915658 | ||
![]() |
68f83a23b3 | ||
![]() |
de944bac48 | ||
![]() |
b107f1e839 | ||
![]() |
d1e000dc10 | ||
![]() |
7c3364fef9 | ||
![]() |
3dfc818f5f | ||
![]() |
a3b154adf8 | ||
![]() |
5548684b7a | ||
![]() |
657942071a | ||
![]() |
feee5d6979 | ||
![]() |
ab1bc63f84 | ||
![]() |
7eb215f7fa | ||
![]() |
69b15c1ad6 | ||
![]() |
a3e807c406 | ||
![]() |
499d3acaf5 | ||
![]() |
b183c2bf6b | ||
![]() |
58bc49412e | ||
![]() |
0bc8d9b219 | ||
![]() |
7373ba6346 | ||
![]() |
9689cbc52e | ||
![]() |
1dd97e7338 | ||
![]() |
6f73b57afe | ||
![]() |
98ae54614b | ||
![]() |
311c15b4a8 | ||
![]() |
22bc25bc1d | ||
![]() |
f73941fb8c | ||
![]() |
a0957a85ea | ||
![]() |
f22892e482 | ||
![]() |
f2cd112341 | ||
![]() |
9fdb389814 | ||
![]() |
ed039061bd | ||
![]() |
b13c2a3c50 | ||
![]() |
8d2b346490 | ||
![]() |
9c84e93e65 | ||
![]() |
dd63fb1d2c | ||
![]() |
766998c026 | ||
![]() |
a01a1a68f4 | ||
![]() |
2d622ee971 | ||
![]() |
56ef7ddcc4 | ||
![]() |
d9d4031ab7 | ||
![]() |
3b58d942ae | ||
![]() |
6ab568964c | ||
![]() |
f693819cb6 | ||
![]() |
82f7405d4e | ||
![]() |
db60c3ba9c | ||
![]() |
1f46cad533 | ||
![]() |
bdf2c5dc65 | ||
![]() |
4c257cdf15 | ||
![]() |
6cd7779aae | ||
![]() |
e9c311d837 | ||
![]() |
2b1e5e7f5b | ||
![]() |
808a64b3d4 | ||
![]() |
3e3dddeaec | ||
![]() |
67cf86bf60 | ||
![]() |
670f32b424 | ||
![]() |
36c5c3a41d | ||
![]() |
91d7b2cc72 | ||
![]() |
3b10ad2abd | ||
![]() |
0534e973de | ||
![]() |
f7de513633 | ||
![]() |
324479a769 | ||
![]() |
6c4f138bac | ||
![]() |
4fab765f0b | ||
![]() |
9a808e31ea | ||
![]() |
be0dc53e2b | ||
![]() |
9c4fdc923d | ||
![]() |
5ba0766dad | ||
![]() |
780114f87e | ||
![]() |
84d5759cda | ||
![]() |
ceba50d6f7 | ||
![]() |
c7a24e0847 | ||
![]() |
57b22e6cdb | ||
![]() |
8a65baafa8 | ||
![]() |
9e28dd26de | ||
![]() |
d04f1bc8f5 | ||
![]() |
30dccb06fa | ||
![]() |
c06acb6b74 | ||
![]() |
86b2b339a8 | ||
![]() |
1f455c9e34 | ||
![]() |
a2d2e9cdc4 | ||
![]() |
e0536fd808 | ||
![]() |
b9325384f1 | ||
![]() |
7041bc3757 | ||
![]() |
a66af40b62 | ||
![]() |
dec37388b8 | ||
![]() |
e42bda91b0 | ||
![]() |
b40081eec7 | ||
![]() |
5e37beb9fb | ||
![]() |
160a204a28 |
26
.drone.yml
26
.drone.yml
@ -1,26 +0,0 @@
|
|||||||
kind: pipeline
|
|
||||||
name: publish
|
|
||||||
type: docker
|
|
||||||
|
|
||||||
platform:
|
|
||||||
os: linux
|
|
||||||
arch: arm64
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
branch:
|
|
||||||
- main
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: "Upload New Versions"
|
|
||||||
image: alpine
|
|
||||||
commands:
|
|
||||||
- ./build-tools/upload-new-versions.sh
|
|
||||||
environment:
|
|
||||||
GITEA_PASSWORD:
|
|
||||||
from_secret: gitea_password
|
|
||||||
ARGO_TOKEN:
|
|
||||||
from_secret: argo_token
|
|
||||||
# TODO - step to apply manifests
|
|
||||||
|
|
||||||
image_pull_secrets:
|
|
||||||
- dockerconfigjson
|
|
86
NOTES.md
Normal file
86
NOTES.md
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
# Device exposure
|
||||||
|
|
||||||
|
For [Jellyfin Hardware Acceleration](https://jellyfin.org/docs/general/administration/hardware-acceleration/), following instructions [here](https://github.com/kubernetes/kubernetes/issues/7890#issuecomment-766088805) (originally from [here](https://old.reddit.com/r/jellyfin/comments/i2r4h9/how_to_enable_hardware_acceleration_with_docker/)), I used [smarter-device-manager](https://gitlab.com/arm-research/smarter/smarter-device-manager) to expose devices from the host node (`epsilon`) into containers.
|
||||||
|
|
||||||
|
This was installed via a manual `kubectl apply`, though it should be migrated into GitOps-managed definitions - though I had to make some alterations to get ConfigMap to be read.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# smarter-management-configmap.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: smarter-device-manager
|
||||||
|
namespace: smarter-device-management
|
||||||
|
data:
|
||||||
|
conf.yaml: |
|
||||||
|
- devicematch: ^fb0$
|
||||||
|
nummaxdevices: 2
|
||||||
|
|
||||||
|
# smarter-management-pod.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: smarter-device-management
|
||||||
|
namespace: smarter-device-management
|
||||||
|
spec:
|
||||||
|
# Mark this pod as a critical add-on; when enabled, the critical add-on
|
||||||
|
# scheduler reserves resources for critical add-on pods so that they can
|
||||||
|
# be rescheduled after a failure.
|
||||||
|
# See https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
|
||||||
|
priorityClassName: "system-node-critical"
|
||||||
|
hostNetwork: true
|
||||||
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
|
hostname: smarter-device-management
|
||||||
|
nodeName: epsilon
|
||||||
|
containers:
|
||||||
|
- name: smarter-device-manager
|
||||||
|
image: registry.gitlab.com/arm-research/smarter/smarter-device-manager:v1.20.11
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop: ["ALL"]
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 10Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 10Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: device-plugin
|
||||||
|
mountPath: /var/lib/kubelet/device-plugins
|
||||||
|
- name: dev-dir
|
||||||
|
mountPath: /dev
|
||||||
|
- name: sys-dir
|
||||||
|
mountPath: /sys
|
||||||
|
- name: config
|
||||||
|
mountPath: /root/config
|
||||||
|
volumes:
|
||||||
|
- name: device-plugin
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/kubelet/device-plugins
|
||||||
|
- name: dev-dir
|
||||||
|
hostPath:
|
||||||
|
path: /dev
|
||||||
|
- name: sys-dir
|
||||||
|
hostPath:
|
||||||
|
path: /sys
|
||||||
|
- name: config
|
||||||
|
configMap:
|
||||||
|
name: smarter-device-manager
|
||||||
|
terminationGracePeriodSeconds: 30
|
||||||
|
```
|
||||||
|
|
||||||
|
Re: `device-plugin` path, that apparently changed (from `/var/lib/rancher/k3s/agent/kubelet/device-plugins`, which was the provided value) [some time ago](https://github.com/k3s-io/k3s/issues/2664#issuecomment-742013918)
|
||||||
|
|
||||||
|
This also required the [Device Plugin Feature Gate](https://github.com/k3s-io/k3s/discussions/4596) to be enabled.
|
||||||
|
|
||||||
|
Further useful links:
|
||||||
|
* [Reddit thread](https://old.reddit.com/r/jellyfin/comments/y7i3uc/trouble_with_quicksync_trancoding_on_new_11th_gen/)
|
||||||
|
* [Enabling iGPU](https://community.hetzner.com/tutorials/howto-enable-igpu)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
I spent a couple hours going down the rabbit-hole above, before noting that my server doesn't have an integrated graphics card, and so that was all for naught :) luckily, that is a problem that can be entirely solved with money (those are rare!) - a suitable card should arrive over the weekend and the hacking can continue.
|
||||||
|
|
21
README.md
21
README.md
@ -14,18 +14,21 @@ $ curl --user <username>:<password> -X POST --upload-file ./<package>.tgz https:
|
|||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
```bash
|
Bootstrap with `kubectl apply -f main-manifest.yaml`
|
||||||
$ helm repo add --username <username> --password <password> <repo-alias> https://hostname.of.gitea/api/packages/<user>/helm
|
|
||||||
$ helm install <release-name> <repo-alias>/<name>
|
|
||||||
```
|
|
||||||
|
|
||||||
and/or
|
TODO: [App-of-apps](https://argo-cd.readthedocs.io/en/stable/operator-manual/cluster-bootstrapping/#app-of-apps-pattern) to manage whole-cluster configuration in a more programmatic way.
|
||||||
|
|
||||||
```bash
|
## Initial bootstrap
|
||||||
$ kubectl apply -f application-manifests.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
TODO: [App-of-apps](https://argo-cd.readthedocs.io/en/stable/operator-manual/cluster-bootstrapping/#app-of-apps-pattern) to manage whole-cluster configuration.
|
Note that you need to have manually connected the source Repository _in_ ArgoCD before installing the App-of-apps.
|
||||||
|
|
||||||
|
TODO - when we have a better secrets management system, export Gitea user password so that it can be used by ArgoCD to initialize that repository directly (https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories)
|
||||||
|
|
||||||
|
## Jsonnet
|
||||||
|
|
||||||
|
As of 2024, I started using Jsonnet to define apps in a less repetitious way.
|
||||||
|
|
||||||
|
To check the output before submitting, use `jsonnet -J app-of-apps app-of-apps/<filename>.jsonnet`
|
||||||
|
|
||||||
## Other links
|
## Other links
|
||||||
|
|
||||||
|
162
app-of-apps/app-definitions.libsonnet
Normal file
162
app-of-apps/app-definitions.libsonnet
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
{
|
||||||
|
helmApplication(
|
||||||
|
name,
|
||||||
|
sourceRepoUrl,
|
||||||
|
sourceChart,
|
||||||
|
sourceTargetRevision,
|
||||||
|
namespace="",
|
||||||
|
helmValues={}) ::
|
||||||
|
{
|
||||||
|
apiVersion: "argoproj.io/v1alpha1",
|
||||||
|
kind: "Application",
|
||||||
|
metadata: {
|
||||||
|
name: name,
|
||||||
|
namespace: "argo",
|
||||||
|
finalizers: ["resources-finalizer.argocd.argoproj.io"]
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
project: "default",
|
||||||
|
source: {
|
||||||
|
chart: sourceChart,
|
||||||
|
repoURL: sourceRepoUrl,
|
||||||
|
targetRevision: sourceTargetRevision,
|
||||||
|
[if helmValues != {} then "helm"]: {
|
||||||
|
valuesObject: helmValues
|
||||||
|
}
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
server: "https://kubernetes.default.svc",
|
||||||
|
namespace: if namespace == "" then name else namespace
|
||||||
|
},
|
||||||
|
syncPolicy: {
|
||||||
|
automated: {
|
||||||
|
prune: true
|
||||||
|
},
|
||||||
|
syncOptions: ["CreateNamespace=true"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
localApplication(
|
||||||
|
name,
|
||||||
|
path="",
|
||||||
|
namespace="",
|
||||||
|
nonHelmApp=false) ::
|
||||||
|
{
|
||||||
|
apiVersion: "argoproj.io/v1alpha1",
|
||||||
|
kind: "Application",
|
||||||
|
metadata: {
|
||||||
|
name: name,
|
||||||
|
namespace: "argo",
|
||||||
|
finalizers: ["resources-finalizer.argocd.argoproj.io"]
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
project: "default",
|
||||||
|
source: {
|
||||||
|
repoURL: "https://gitea.scubbo.org/scubbo/helm-charts.git",
|
||||||
|
targetRevision: "HEAD",
|
||||||
|
path: if path == "" then std.join('/', ['charts', name]) else path,
|
||||||
|
// I _think_ every locally-defined chart is going to have a `values.yaml`, but we can make this
|
||||||
|
// parameterized if desired
|
||||||
|
[if nonHelmApp != true then "helm"]: {
|
||||||
|
valueFiles: ['values.yaml']
|
||||||
|
}
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
server: 'https://kubernetes.default.svc',
|
||||||
|
namespace: if namespace == "" then name else namespace
|
||||||
|
},
|
||||||
|
syncPolicy: {
|
||||||
|
automated: {
|
||||||
|
prune: true
|
||||||
|
},
|
||||||
|
syncOptions: ["CreateNamespace=true"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
kustomizeApplication(
|
||||||
|
name,
|
||||||
|
repoUrl="",
|
||||||
|
namespace="",
|
||||||
|
path="") ::
|
||||||
|
{
|
||||||
|
apiVersion: "argoproj.io/v1alpha1",
|
||||||
|
kind: "Application",
|
||||||
|
metadata: {
|
||||||
|
name: name,
|
||||||
|
namespace: "argo",
|
||||||
|
finalizers: ["resources-finalizer.argocd.argoproj.io"]
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
project: "default",
|
||||||
|
source: {
|
||||||
|
repoURL: if repoUrl=="" then std.join('', ['https://gitea.scubbo.org/scubbo/', name, '-deployment']) else repoUrl,
|
||||||
|
targetRevision: "HEAD",
|
||||||
|
path: if path == "" then "." else path
|
||||||
|
},
|
||||||
|
destination: {
|
||||||
|
server: 'https://kubernetes.default.svc',
|
||||||
|
namespace: if namespace == "" then name else namespace
|
||||||
|
},
|
||||||
|
syncPolicy: {
|
||||||
|
automated: {
|
||||||
|
prune: true
|
||||||
|
},
|
||||||
|
syncOptions: ["CreateNamespace=true"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
# Sometimes we want to use an existing remote Helm chart
|
||||||
|
# but add some locally-defined resources into the Application
|
||||||
|
helmRemotePlusLocalApplication(
|
||||||
|
name,
|
||||||
|
sourceRepoUrl,
|
||||||
|
sourceChart,
|
||||||
|
sourceTargetRevision,
|
||||||
|
pathToLocal="",
|
||||||
|
namespace="",
|
||||||
|
helmValues={},
|
||||||
|
nonHelmApp=false) ::
|
||||||
|
{
|
||||||
|
apiVersion: "argoproj.io/v1alpha1",
|
||||||
|
kind: "Application",
|
||||||
|
metadata: {
|
||||||
|
name: name,
|
||||||
|
namespace: "argo",
|
||||||
|
finalizers: ["resources-finalizer.argocd.argoproj.io"]
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
project: "default",
|
||||||
|
sources: [
|
||||||
|
{
|
||||||
|
chart: sourceChart,
|
||||||
|
repoURL: sourceRepoUrl,
|
||||||
|
targetRevision: sourceTargetRevision,
|
||||||
|
[if helmValues != {} then "helm"]: {
|
||||||
|
valuesObject: helmValues
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
repoURL: "https://gitea.scubbo.org/scubbo/helm-charts.git",
|
||||||
|
targetRevision: "HEAD",
|
||||||
|
path: if pathToLocal == "" then std.join('/', ['charts', name]) else pathToLocal,
|
||||||
|
// I _think_ every locally-defined chart is going to have a `values.yaml`, but we can make this
|
||||||
|
// parameterized if desired
|
||||||
|
[if nonHelmApp != true then "helm"]: {
|
||||||
|
valueFiles: ['values.yaml']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
destination: {
|
||||||
|
server: "https://kubernetes.default.svc",
|
||||||
|
namespace: if namespace == "" then name else namespace
|
||||||
|
},
|
||||||
|
syncPolicy: {
|
||||||
|
automated: {
|
||||||
|
prune: true
|
||||||
|
},
|
||||||
|
syncOptions: ["CreateNamespace=true"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -3,6 +3,8 @@ kind: Application
|
|||||||
metadata:
|
metadata:
|
||||||
name: cert-manager
|
name: cert-manager
|
||||||
namespace: argo
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
spec:
|
spec:
|
||||||
project: default
|
project: default
|
||||||
|
|
||||||
@ -30,6 +32,8 @@ kind: Application
|
|||||||
metadata:
|
metadata:
|
||||||
name: prom-crds
|
name: prom-crds
|
||||||
namespace: argo
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
spec:
|
spec:
|
||||||
project: default
|
project: default
|
||||||
|
|
||||||
@ -37,6 +41,12 @@ spec:
|
|||||||
repoURL: https://github.com/prometheus-community/helm-charts.git
|
repoURL: https://github.com/prometheus-community/helm-charts.git
|
||||||
path: charts/kube-prometheus-stack/crds/
|
path: charts/kube-prometheus-stack/crds/
|
||||||
targetRevision: kube-prometheus-stack-45.7.1
|
targetRevision: kube-prometheus-stack-45.7.1
|
||||||
|
helm:
|
||||||
|
values: |
|
||||||
|
tolerations:
|
||||||
|
- key: architecture
|
||||||
|
operator: Equal
|
||||||
|
value: x86
|
||||||
directory:
|
directory:
|
||||||
recurse: true
|
recurse: true
|
||||||
|
|
||||||
@ -57,6 +67,8 @@ kind: Application
|
|||||||
metadata:
|
metadata:
|
||||||
name: prometheus-community
|
name: prometheus-community
|
||||||
namespace: argo
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
spec:
|
spec:
|
||||||
project: default
|
project: default
|
||||||
|
|
||||||
@ -109,6 +121,8 @@ kind: Application
|
|||||||
metadata:
|
metadata:
|
||||||
name: grafana
|
name: grafana
|
||||||
namespace: argo
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
spec:
|
spec:
|
||||||
project: default
|
project: default
|
||||||
|
|
||||||
@ -118,17 +132,39 @@ spec:
|
|||||||
targetRevision: "6.49.0"
|
targetRevision: "6.49.0"
|
||||||
|
|
||||||
helm:
|
helm:
|
||||||
parameters:
|
values: |
|
||||||
- name: image.tag
|
image:
|
||||||
value: "9.3.2"
|
tag: "10.1.0"
|
||||||
- name: ingress.enabled
|
tolerations:
|
||||||
value: true
|
- key: architecture
|
||||||
- name: ingress.hosts[0]
|
operator: Equal
|
||||||
value: grafana.avril
|
value: x86
|
||||||
- name: persistence.enabled
|
ingress:
|
||||||
value: true
|
enabled: true
|
||||||
- name: persistence.storageClassName
|
hosts:
|
||||||
value: longhorn
|
- grafana.avril
|
||||||
|
persistence:
|
||||||
|
enabled: true
|
||||||
|
storageClassName: longhorn
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
sidecar:
|
||||||
|
dashboards:
|
||||||
|
enabled: true
|
||||||
|
defaultFolderName: General
|
||||||
|
label: grafana_dashboard
|
||||||
|
labelValue: "1"
|
||||||
|
folderAnnotation: grafana_folder
|
||||||
|
searchNamespace: ALL
|
||||||
|
provider:
|
||||||
|
foldersFromFilesStructure: "true"
|
||||||
|
datasources:
|
||||||
|
datasources.yaml:
|
||||||
|
apiVersion: 1
|
||||||
|
datasources:
|
||||||
|
- name: Prometheus
|
||||||
|
type: prometheus
|
||||||
|
url: http://prometheus.avril
|
||||||
|
|
||||||
destination:
|
destination:
|
||||||
server: "https://kubernetes.default.svc"
|
server: "https://kubernetes.default.svc"
|
||||||
@ -140,6 +176,37 @@ spec:
|
|||||||
syncOptions:
|
syncOptions:
|
||||||
- CreateNamespace=true
|
- CreateNamespace=true
|
||||||
---
|
---
|
||||||
|
# https://github.com/dotdc/grafana-dashboards-kubernetes/blob/master/argocd-app.yml
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: grafana-dashboards-kubernetes
|
||||||
|
namespace: argo
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: grafana-dashboards-kubernetes
|
||||||
|
app.kubernetes.io/version: HEAD
|
||||||
|
app.kubernetes.io/managed-by: argocd
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
|
spec:
|
||||||
|
project: default # You may need to change this!
|
||||||
|
source:
|
||||||
|
path: ./
|
||||||
|
repoURL: https://github.com/dotdc/grafana-dashboards-kubernetes
|
||||||
|
targetRevision: HEAD
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: https://kubernetes.default.svc
|
||||||
|
namespace: monitoring
|
||||||
|
syncPolicy:
|
||||||
|
## https://argo-cd.readthedocs.io/en/stable/user-guide/auto_sync
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
selfHeal: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
||||||
|
- Replace=true
|
||||||
|
---
|
||||||
# TODO - use Jsonnet or similar to automate building this from all the directories
|
# TODO - use Jsonnet or similar to automate building this from all the directories
|
||||||
# (and pull out the common config)
|
# (and pull out the common config)
|
||||||
apiVersion: argoproj.io/v1alpha1
|
apiVersion: argoproj.io/v1alpha1
|
||||||
@ -147,6 +214,8 @@ kind: Application
|
|||||||
metadata:
|
metadata:
|
||||||
name: jellyfin
|
name: jellyfin
|
||||||
namespace: argo
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
spec:
|
spec:
|
||||||
project: default
|
project: default
|
||||||
|
|
||||||
@ -168,3 +237,86 @@ spec:
|
|||||||
prune: true
|
prune: true
|
||||||
syncOptions:
|
syncOptions:
|
||||||
- CreateNamespace=true
|
- CreateNamespace=true
|
||||||
|
---
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: proton-vpn
|
||||||
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
|
||||||
|
source:
|
||||||
|
repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git
|
||||||
|
targetRevision: HEAD
|
||||||
|
path: charts/proton-vpn
|
||||||
|
|
||||||
|
helm:
|
||||||
|
valueFiles:
|
||||||
|
- values.yaml
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: "https://kubernetes.default.svc"
|
||||||
|
namespace: proton-vpn
|
||||||
|
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
||||||
|
---
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: ombi
|
||||||
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
|
||||||
|
source:
|
||||||
|
repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git
|
||||||
|
targetRevision: HEAD
|
||||||
|
path: charts/ombi
|
||||||
|
|
||||||
|
helm:
|
||||||
|
valueFiles:
|
||||||
|
- values.yaml
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: "https://kubernetes.default.svc"
|
||||||
|
namespace: ombi
|
||||||
|
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
||||||
|
---
|
||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: jackjack-app-of-apps-private
|
||||||
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
|
||||||
|
source:
|
||||||
|
repoURL: https://gitea.scubbo.org/scubbo/private-apps.git
|
||||||
|
targetRevision: HEAD
|
||||||
|
path: app-of-apps
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: "https://kubernetes.default.svc"
|
||||||
|
namespace: default
|
||||||
|
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
5
app-of-apps/blog.jsonnet
Normal file
5
app-of-apps/blog.jsonnet
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
[
|
||||||
|
appDef.kustomizeApplication(name="blog")
|
||||||
|
]
|
53
app-of-apps/crossplane.jsonnet
Normal file
53
app-of-apps/crossplane.jsonnet
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
// https://docs.crossplane.io/v1.15/software/install/#installed-deployments
|
||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
// Installation of Vault Provider is left manually, since it relies on secret creation:
|
||||||
|
// https://github.com/upbound/provider-vault
|
||||||
|
//
|
||||||
|
// Also required created a role to bind to the ServiceAccount:
|
||||||
|
//
|
||||||
|
// apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
// kind: ClusterRoleBinding
|
||||||
|
// metadata:
|
||||||
|
// name: vault-provider-role-binding
|
||||||
|
// namespace: crossplane-system
|
||||||
|
// roleRef:
|
||||||
|
// apiGroup: rbac.authorization.k8s.io
|
||||||
|
// kind: ClusterRole
|
||||||
|
// name: vault-provider-role
|
||||||
|
// subjects:
|
||||||
|
// - kind: ServiceAccount
|
||||||
|
// name: provider-vault-b61923ede364
|
||||||
|
// namespace: crossplane-system
|
||||||
|
// ---
|
||||||
|
// apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
// kind: ClusterRole
|
||||||
|
// metadata:
|
||||||
|
// name: vault-provider-role
|
||||||
|
// namespace: crossplane-system
|
||||||
|
// rules:
|
||||||
|
// - apiGroups:
|
||||||
|
// - identity.vault.upbound.io
|
||||||
|
// resources:
|
||||||
|
// - mfaoktas
|
||||||
|
// - groupmembergroupidsidses
|
||||||
|
// - groupmemberentityidsidses
|
||||||
|
// verbs:
|
||||||
|
// - get
|
||||||
|
// - list
|
||||||
|
// - watch
|
||||||
|
// - apiGroups:
|
||||||
|
// - mfa.vault.upbound.io
|
||||||
|
// resources:
|
||||||
|
// - oktas
|
||||||
|
// verbs:
|
||||||
|
// - get
|
||||||
|
// - list
|
||||||
|
// - watch
|
||||||
|
appDef.helmApplication(
|
||||||
|
name="crossplane",
|
||||||
|
sourceRepoUrl="https://charts.crossplane.io/stable",
|
||||||
|
sourceChart="crossplane",
|
||||||
|
sourceTargetRevision="1.15.0",
|
||||||
|
namespace="crossplane-system"
|
||||||
|
)
|
65
app-of-apps/drone.jsonnet
Normal file
65
app-of-apps/drone.jsonnet
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
[
|
||||||
|
appDef.localApplication(name="drone"),
|
||||||
|
|
||||||
|
// TODO - maybe extract this, too?
|
||||||
|
{
|
||||||
|
apiVersion: "secrets.hashicorp.com/v1beta1",
|
||||||
|
kind: "VaultAuth",
|
||||||
|
metadata: {
|
||||||
|
name: "static-auth",
|
||||||
|
namespace: "drone"
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
method: "kubernetes",
|
||||||
|
mount: "kubernetes",
|
||||||
|
kubernetes: {
|
||||||
|
role: "vault-secrets-operator",
|
||||||
|
serviceAccount: "default",
|
||||||
|
audiences: ["vault"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
// Note that currently this secret is created manually and statically. It'd be really cool for cold-start setup if OAuth
|
||||||
|
// App creation could be triggered at Gitea startup, and a secret automatically created!
|
||||||
|
{
|
||||||
|
apiVersion: "secrets.hashicorp.com/v1beta1",
|
||||||
|
kind: "VaultStaticSecret",
|
||||||
|
metadata: {
|
||||||
|
name: "gitea-oauth-creds",
|
||||||
|
namespace: "drone"
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
type: "kv-v2",
|
||||||
|
mount: "shared-secrets",
|
||||||
|
path: "gitea/oauth-creds",
|
||||||
|
destination: {
|
||||||
|
name: "gitea-oauth-creds",
|
||||||
|
create: true
|
||||||
|
},
|
||||||
|
refreshAfter: "30s",
|
||||||
|
vaultAuthRef: "static-auth"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
apiVersion: "secrets.hashicorp.com/v1beta1",
|
||||||
|
kind: "VaultStaticSecret",
|
||||||
|
metadata: {
|
||||||
|
name: "mastodon-creds",
|
||||||
|
namespace: "drone"
|
||||||
|
},
|
||||||
|
spec: {
|
||||||
|
type: "kv-v2",
|
||||||
|
mount: "shared-secrets",
|
||||||
|
path: "mastodon/creds",
|
||||||
|
destination: {
|
||||||
|
name: "mastodon-creds",
|
||||||
|
create: true
|
||||||
|
},
|
||||||
|
refreshAfter: "30s",
|
||||||
|
vaultAuthRef: "static-auth"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
5
app-of-apps/edh-elo.jsonnet
Normal file
5
app-of-apps/edh-elo.jsonnet
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
[
|
||||||
|
appDef.localApplication(name="edh-elo")
|
||||||
|
]
|
159
app-of-apps/keycloak-backup.yaml
Normal file
159
app-of-apps/keycloak-backup.yaml
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: keycloak-backup
|
||||||
|
namespace: keycloak
|
||||||
|
spec:
|
||||||
|
# Arbitrary non-midnight time.
|
||||||
|
schedule: "10 2 * * *"
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
initContainers:
|
||||||
|
- args:
|
||||||
|
- -ec
|
||||||
|
- |
|
||||||
|
#!/bin/bash
|
||||||
|
cp -r /opt/bitnami/keycloak/lib/quarkus/* /quarkus
|
||||||
|
command:
|
||||||
|
- /bin/bash
|
||||||
|
image: docker.io/bitnami/keycloak:24.0.2
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
name: init-quarkus-directories
|
||||||
|
resources: {}
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
privileged: false
|
||||||
|
readOnlyRootFilesystem: false
|
||||||
|
runAsGroup: 0
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: 1001
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /tmp
|
||||||
|
name: empty-dir
|
||||||
|
subPath: tmp-dir
|
||||||
|
- mountPath: /quarkus
|
||||||
|
name: empty-dir
|
||||||
|
subPath: app-quarkus-dir
|
||||||
|
containers:
|
||||||
|
- args:
|
||||||
|
- /script/backup_keycloak.sh
|
||||||
|
env:
|
||||||
|
- name: KUBERNETES_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: BITNAMI_DEBUG
|
||||||
|
value: "false"
|
||||||
|
- name: KEYCLOAK_ADMIN_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
key: admin-password
|
||||||
|
name: keycloak
|
||||||
|
- name: KEYCLOAK_DATABASE_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
key: password
|
||||||
|
name: keycloak-postgresql
|
||||||
|
- name: KEYCLOAK_HTTP_RELATIVE_PATH
|
||||||
|
value: /
|
||||||
|
- name: KEYCLOAK_CACHE_TYPE
|
||||||
|
value: local
|
||||||
|
envFrom:
|
||||||
|
- configMapRef:
|
||||||
|
name: keycloak-env-vars
|
||||||
|
image: docker.io/bitnami/keycloak:24.0.2
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
name: backup-container
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
name: http
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 7800
|
||||||
|
name: infinispan
|
||||||
|
protocol: TCP
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /tmp
|
||||||
|
name: empty-dir
|
||||||
|
subPath: tmp-dir
|
||||||
|
- mountPath: /opt/bitnami/keycloak/conf
|
||||||
|
name: empty-dir
|
||||||
|
subPath: app-conf-dir
|
||||||
|
- mountPath: /opt/bitnami/keycloak/lib/quarkus
|
||||||
|
name: empty-dir
|
||||||
|
subPath: app-quarkus-dir
|
||||||
|
- mountPath: /backup
|
||||||
|
name: backup-dir
|
||||||
|
- mountPath: /script
|
||||||
|
name: script-volume
|
||||||
|
restartPolicy: Never
|
||||||
|
securityContext:
|
||||||
|
# https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied
|
||||||
|
runAsUser: 501
|
||||||
|
fsGroup: 501
|
||||||
|
volumes:
|
||||||
|
- emptyDir: {}
|
||||||
|
name: empty-dir
|
||||||
|
- name: backup-dir
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: backup-dir-pvc
|
||||||
|
- name: script-volume
|
||||||
|
configMap:
|
||||||
|
name: keycloak-backup-script
|
||||||
|
defaultMode: 0777
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: backup-dir-pv
|
||||||
|
namespace: keycloak
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 2M
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: galactus.avril
|
||||||
|
path: /mnt/high-resiliency/manual-nfs/backups/keycloak/
|
||||||
|
mountOptions:
|
||||||
|
- nfsvers=4.2
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: backup-dir-pvc
|
||||||
|
namespace: keycloak
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: backup-dir-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
volumeMode: Filesystem
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 2M
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2024-04-20T04:14:45Z"
|
||||||
|
name: keycloak-backup-script
|
||||||
|
namespace: keycloak
|
||||||
|
data:
|
||||||
|
backup_keycloak.sh: |+
|
||||||
|
env
|
||||||
|
echo 'That was the env, now running export'
|
||||||
|
/opt/bitnami/keycloak/bin/kc.sh export \
|
||||||
|
--file "/backup/realm-export-$(date '+%Y-%m-%d').json" \
|
||||||
|
--realm avril \
|
||||||
|
--db postgres \
|
||||||
|
--db-url jdbc:postgresql://keycloak-postgresql-hl/bitnami_keycloak \
|
||||||
|
--db-password "$KEYCLOAK_DATABASE_PASSWORD" \
|
||||||
|
--db-username bn_keycloak
|
24
app-of-apps/keycloak.jsonnet
Normal file
24
app-of-apps/keycloak.jsonnet
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
appDef.helmApplication(
|
||||||
|
name="keycloak",
|
||||||
|
sourceRepoUrl="https://charts.bitnami.com/bitnami",
|
||||||
|
sourceChart="keycloak",
|
||||||
|
sourceTargetRevision="19.3.4",
|
||||||
|
helmValues={
|
||||||
|
ingress: {
|
||||||
|
enabled: true,
|
||||||
|
hostname: "keycloak.avril"
|
||||||
|
},
|
||||||
|
image: {
|
||||||
|
tag: "24.0.2"
|
||||||
|
},
|
||||||
|
extraEnvVars: [
|
||||||
|
{
|
||||||
|
// https://github.com/keycloak/keycloak/issues/28384
|
||||||
|
name: "KEYCLOAK_CACHE_TYPE",
|
||||||
|
value: "local"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
)
|
5
app-of-apps/miniflux.jsonnet
Normal file
5
app-of-apps/miniflux.jsonnet
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
[
|
||||||
|
appDef.localApplication(name="miniflux")
|
||||||
|
]
|
36
app-of-apps/open-project.yaml
Normal file
36
app-of-apps/open-project.yaml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: open-project
|
||||||
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
|
||||||
|
source:
|
||||||
|
chart: openproject
|
||||||
|
repoURL: https://charts.openproject.org
|
||||||
|
targetRevision: 4.3.0
|
||||||
|
|
||||||
|
helm:
|
||||||
|
values: |
|
||||||
|
ingress:
|
||||||
|
host: openproject.avril
|
||||||
|
persistence:
|
||||||
|
storageClassName: freenas-nfs-csi
|
||||||
|
postgresql:
|
||||||
|
auth:
|
||||||
|
existingSecret: postgres-auth
|
||||||
|
global:
|
||||||
|
storageClass: freenas-iscsi-csi
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: "https://kubernetes.default.svc"
|
||||||
|
namespace: open-project
|
||||||
|
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
8
app-of-apps/openwebui.jsonnet
Normal file
8
app-of-apps/openwebui.jsonnet
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
appDef.helmApplication(
|
||||||
|
name="openwebui",
|
||||||
|
sourceRepoUrl="https://open-webui.github.io/helm-charts",
|
||||||
|
sourceChart="open-webui",
|
||||||
|
sourceTargetRevision="5.10.0"
|
||||||
|
)
|
3
app-of-apps/vault-crossplane-integration.jsonnet
Normal file
3
app-of-apps/vault-crossplane-integration.jsonnet
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
appDef.localApplication(name="vault-crossplane-integration", nonHelmApp=true)
|
38
app-of-apps/vault-secrets-operator.jsonnet
Normal file
38
app-of-apps/vault-secrets-operator.jsonnet
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// https://developer.hashicorp.com/vault/tutorials/kubernetes/vault-secrets-operator
|
||||||
|
//
|
||||||
|
// Note that this has a prerequiste that the Vault system has been configured with appropriate
|
||||||
|
// authentication first. In particular, the specification of the set of namespaces that secrets can be synced to is set
|
||||||
|
// in `bound_service_account_namespaces` in the Vault role.
|
||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
appDef.helmApplication(
|
||||||
|
name="vault-secrets-operator",
|
||||||
|
sourceRepoUrl="https://helm.releases.hashicorp.com",
|
||||||
|
sourceChart="vault-secrets-operator",
|
||||||
|
sourceTargetRevision="0.5.2",
|
||||||
|
namespace="vault-secrets-operator-system",
|
||||||
|
helmValues={
|
||||||
|
defaultVaultConnection: {
|
||||||
|
enabled: true,
|
||||||
|
address: "http://vault.vault.svc.cluster.local:8200",
|
||||||
|
skipTLSVerify: false
|
||||||
|
},
|
||||||
|
controller: {
|
||||||
|
manager: {
|
||||||
|
clientCache: {
|
||||||
|
persistenceModel: "direct-encrypted",
|
||||||
|
storageEncryption: {
|
||||||
|
enabled: true,
|
||||||
|
mount: "demo-auth-mount",
|
||||||
|
keyName: "vso-client-cache",
|
||||||
|
transitMount: "demo-transit",
|
||||||
|
kubernetes: {
|
||||||
|
role: "auth-role-operator",
|
||||||
|
serviceAccount: "demo-operator"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
69
app-of-apps/vault.jsonnet
Normal file
69
app-of-apps/vault.jsonnet
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
local appDef = import './app-definitions.libsonnet';
|
||||||
|
|
||||||
|
appDef.helmRemotePlusLocalApplication(
|
||||||
|
name="vault",
|
||||||
|
sourceRepoUrl="https://helm.releases.hashicorp.com",
|
||||||
|
sourceChart="vault",
|
||||||
|
sourceTargetRevision="0.25.0",
|
||||||
|
helmValues={
|
||||||
|
global: {
|
||||||
|
namespace: "vault"
|
||||||
|
},
|
||||||
|
ui: {
|
||||||
|
enabled: true
|
||||||
|
},
|
||||||
|
serverTelemetry: {
|
||||||
|
serviceMonitor: {
|
||||||
|
enabled: true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
server: {
|
||||||
|
ingress: {
|
||||||
|
enabled: true,
|
||||||
|
ingressClassName: "traefik",
|
||||||
|
hosts: [
|
||||||
|
{
|
||||||
|
host: "vault.avril",
|
||||||
|
paths: []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
dataStorage: {
|
||||||
|
size: "20Gi",
|
||||||
|
storageClass: "freenas-iscsi-csi"
|
||||||
|
},
|
||||||
|
standalone: {
|
||||||
|
config: |||
|
||||||
|
ui = true
|
||||||
|
listener "tcp" {
|
||||||
|
tls_disable = 1
|
||||||
|
address = "[::]:8200"
|
||||||
|
cluster_address = "[::]:8201"
|
||||||
|
|
||||||
|
}
|
||||||
|
storage "file" {
|
||||||
|
path = "/vault/data"
|
||||||
|
}
|
||||||
|
# Everything above this line is the default.
|
||||||
|
#
|
||||||
|
# Enable Plugins (originally for GitHub Secrets Plugin)
|
||||||
|
plugin_directory = "/etc/vault/plugins"
|
||||||
|
|||
|
||||||
|
},
|
||||||
|
volumes: [
|
||||||
|
{
|
||||||
|
name: "plugins",
|
||||||
|
persistentVolumeClaim: {
|
||||||
|
claimName: "vault-plugin-claim"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
volumeMounts: [
|
||||||
|
{
|
||||||
|
name: "plugins",
|
||||||
|
mountPath: "/etc/vault/plugins"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
20
charts/drone/Chart.yaml
Normal file
20
charts/drone/Chart.yaml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: drone-scubbo
|
||||||
|
description: A personalized Helm chart to deploy Gitea to Kubernetes
|
||||||
|
|
||||||
|
type: application
|
||||||
|
version: 0.1.0
|
||||||
|
appVersion: "1.16.0"
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
- name: drone
|
||||||
|
repository: https://charts.drone.io
|
||||||
|
version: "0.6.4"
|
||||||
|
alias: drone-server
|
||||||
|
- name: drone-runner-docker
|
||||||
|
repository: https://charts.drone.io
|
||||||
|
version: "0.6.1"
|
||||||
|
alias: drone-runner
|
||||||
|
- name: drone-kubernetes-secrets
|
||||||
|
repository: https://charts.drone.io
|
||||||
|
version: "0.1.4"
|
13
charts/drone/README.md
Normal file
13
charts/drone/README.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
TODO:
|
||||||
|
* Create the following in an initContainer if they don't exist:
|
||||||
|
* The Gitea OAuth application at startup
|
||||||
|
* The Prometheus user (https://cogarius.medium.com/3-3-complete-guide-to-ci-cd-pipelines-with-drone-io-on-kubernetes-drone-metrics-with-prometheus-c2668e42b03f) - probably by mounting the volume, using sqlite3 to parse out admin password, then using that to make API call
|
||||||
|
* Create `gitea_password` Organization Secret at init.
|
||||||
|
|
||||||
|
Ensure that Vault has a secret at `shared-secrets/gitea/oauth-creds` with keys `DRONE_GITEA_CLIENT_ID` and `DRONE_GITEA_CLIENT_SECRET` (see the application definition in `app-of-apps/drone.jsonnet` to see how the secret is injected from Vault into k8s). Remember also to create an Organization Secret named `gitea_password` for pulling.
|
||||||
|
|
||||||
|
For MTU problem diagnosis:
|
||||||
|
|
||||||
|
https://github.com/gliderlabs/docker-alpine/issues/307#issuecomment-634852419
|
||||||
|
|
||||||
|
https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
|
BIN
charts/drone/charts/drone-0.6.4.tgz
Normal file
BIN
charts/drone/charts/drone-0.6.4.tgz
Normal file
Binary file not shown.
BIN
charts/drone/charts/drone-runner-docker-0.6.1.tgz
Normal file
BIN
charts/drone/charts/drone-runner-docker-0.6.1.tgz
Normal file
Binary file not shown.
62
charts/drone/templates/_helpers.tpl
Normal file
62
charts/drone/templates/_helpers.tpl
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "drone-scubbo.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "drone-scubbo.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "drone-scubbo.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "drone-scubbo.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "drone-scubbo.chart" . }}
|
||||||
|
{{ include "drone-scubbo.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "drone-scubbo.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "drone-scubbo.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "drone-scubbo.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "drone-scubbo.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
22
charts/drone/templates/kubernetes_secrets_secret.yaml
Normal file
22
charts/drone/templates/kubernetes_secrets_secret.yaml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{{- /*
|
||||||
|
https://itnext.io/manage-auto-generated-secrets-in-your-helm-charts-5aee48ba6918
|
||||||
|
*/}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: "kubernetes-secrets-secret"
|
||||||
|
annotations:
|
||||||
|
"helm.sh/resource-policy": "keep"
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
# retrieve the secret data using lookup function and when not exists, return an empty dictionary / map as result
|
||||||
|
{{- $existing_secret := (lookup "v1" "Secret" .Release.Namespace "kubernetes-secrets-secret") | default dict }}
|
||||||
|
{{- $secretData := (get $existing_secret "data") | default dict }}
|
||||||
|
# set $secret to existing secret data or generate a random one when not exists
|
||||||
|
{{- $secret := (get $secretData "secret") | default (randAlphaNum 32 | b64enc) }}
|
||||||
|
# generate 32 chars long random string, base64 encode it and then double-quote the result string.
|
||||||
|
SECRET_KEY: {{ $secret | quote }}
|
||||||
|
# Duplicate the secret-value with a different key so that it can be mounted into the environment of a pod which
|
||||||
|
# required a different name (to the best of my knowledge, there's no way to mount a secret as an env variable but
|
||||||
|
# transform the key)
|
||||||
|
DRONE_SECRET_PLUGIN_TOKEN: {{ $secret | quote }}
|
20
charts/drone/templates/rpc_secret.yaml
Normal file
20
charts/drone/templates/rpc_secret.yaml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{{- /*
|
||||||
|
https://itnext.io/manage-auto-generated-secrets-in-your-helm-charts-5aee48ba6918
|
||||||
|
*/}}
|
||||||
|
{{- if empty .Values.manualRPCSecretName }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: "rpc-secret"
|
||||||
|
annotations:
|
||||||
|
"helm.sh/resource-policy": "keep"
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
# retrieve the secret data using lookup function and when not exists, return an empty dictionary / map as result
|
||||||
|
{{- $existing_secret := (lookup "v1" "Secret" .Release.Namespace "rpc-secret") | default dict }}
|
||||||
|
{{- $secretData := (get $existing_secret "data") | default dict }}
|
||||||
|
# set $secret to existing secret data or generate a random one when not exists
|
||||||
|
{{- $secret := (get $secretData "secret") | default (randAlphaNum 32 | b64enc) }}
|
||||||
|
# generate 32 chars long random string, base64 encode it and then double-quote the result string.
|
||||||
|
secret: {{ $secret | quote }}
|
||||||
|
{{- end }}
|
74
charts/drone/values.yaml
Normal file
74
charts/drone/values.yaml
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
drone-server:
|
||||||
|
env:
|
||||||
|
DRONE_SERVER_HOST: drone.scubbo.org
|
||||||
|
DRONE_SERVER_PROTO: https
|
||||||
|
DRONE_RPC_SECRET: rpc-secret
|
||||||
|
DRONE_GITEA_SERVER: https://gitea.scubbo.org
|
||||||
|
DRONE_USER_CREATE: username:scubbo,admin:true
|
||||||
|
extraSecretNamesForEnvFrom:
|
||||||
|
- gitea-oauth-creds
|
||||||
|
service:
|
||||||
|
port: 3500
|
||||||
|
persistentVolume:
|
||||||
|
storageClass: longhorn
|
||||||
|
# Keep the Runner untolerant for now, until I progress to intentionally building dual-architecture images.
|
||||||
|
tolerations:
|
||||||
|
- key: architecture
|
||||||
|
operator: Equal
|
||||||
|
value: x86
|
||||||
|
|
||||||
|
drone-runner:
|
||||||
|
env:
|
||||||
|
DRONE_RPC_SECRET: rpc-secret
|
||||||
|
DRONE_RPC_HOST: drone-drone-server:3500 # This is the name of the service for the runner
|
||||||
|
DRONE_RUNNER_NETWORK_OPTS: "com.docker.network.driver.mtu:1450"
|
||||||
|
DRONE_SECRET_PLUGIN_ENDPOINT: "http://drone-drone-kubernetes-secrets:3000"
|
||||||
|
extraSecretNamesForEnvFrom:
|
||||||
|
- kubernetes-secrets-secret
|
||||||
|
dind:
|
||||||
|
commandArgs:
|
||||||
|
- "--host"
|
||||||
|
- "tcp://localhost:2375"
|
||||||
|
- "--mtu=1450"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: kubernetes.io/arch
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- arm64
|
||||||
|
# Avoid the cursed node!
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- rasnu2
|
||||||
|
|
||||||
|
drone-kubernetes-secrets:
|
||||||
|
rbac:
|
||||||
|
secretNamespace: drone
|
||||||
|
env:
|
||||||
|
KUBERNETES_NAMESPACE: drone
|
||||||
|
extraSecretNamesForEnvFrom:
|
||||||
|
- kubernetes-secrets-secret
|
||||||
|
|
||||||
|
drone:
|
||||||
|
server: "drone.scubbo.org"
|
||||||
|
|
||||||
|
volume:
|
||||||
|
nfsServer: rassigma.avril
|
||||||
|
nfsPath: /mnt/BERTHA/drone
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
port: 3500
|
||||||
|
|
||||||
|
gitea:
|
||||||
|
server: https://gitea.scubbo.org
|
||||||
|
# Secret with keys `clientId` and `clientSecret`
|
||||||
|
oauthSecretName: gitea-oauth-creds
|
||||||
|
|
||||||
|
# Set this if you want to use an existing secret for the RPC
|
||||||
|
# secret (otherwise, a fresh one will be created if necessary)
|
||||||
|
manualRPCSecretName: ""
|
6
charts/edh-elo/Chart.lock
Normal file
6
charts/edh-elo/Chart.lock
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
dependencies:
|
||||||
|
- name: postgresql
|
||||||
|
repository: https://charts.bitnami.com/bitnami
|
||||||
|
version: 15.5.9
|
||||||
|
digest: sha256:7f365bc259a1e72293bc76edb00334d277a58f6db69aa0f2021c09c1bab5a089
|
||||||
|
generated: "2024-06-23T15:37:12.419204-07:00"
|
17
charts/edh-elo/Chart.yaml
Normal file
17
charts/edh-elo/Chart.yaml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: edh-elo
|
||||||
|
description: A personalized Helm chart to deploy Gitea to Kubernetes
|
||||||
|
|
||||||
|
type: application
|
||||||
|
version: 0.1.0
|
||||||
|
appVersion: "1.0.0"
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
- name: postgresql
|
||||||
|
version: "15.5.9"
|
||||||
|
repository: https://charts.bitnami.com/bitnami
|
||||||
|
condition: postgresql.enabled
|
||||||
|
tags:
|
||||||
|
- services
|
||||||
|
- db
|
||||||
|
- write
|
BIN
charts/edh-elo/charts/postgresql-15.5.9.tgz
Normal file
BIN
charts/edh-elo/charts/postgresql-15.5.9.tgz
Normal file
Binary file not shown.
62
charts/edh-elo/templates/_helpers.tpl
Normal file
62
charts/edh-elo/templates/_helpers.tpl
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "edh-elo.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "edh-elo.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "edh-elo.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "edh-elo.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "edh-elo.chart" . }}
|
||||||
|
{{ include "edh-elo.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "edh-elo.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "edh-elo.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "edh-elo.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "edh-elo.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
47
charts/edh-elo/templates/deployment.yaml
Normal file
47
charts/edh-elo/templates/deployment.yaml
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "edh-elo.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "edh-elo.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "edh-elo.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "edh-elo.selectorLabels" . | nindent 8 }}
|
||||||
|
spec:
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
{{- if .Values.extraEnv }}
|
||||||
|
{{- with .Values.extraEnv }}
|
||||||
|
env:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end}}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
15
charts/edh-elo/templates/service.yaml
Normal file
15
charts/edh-elo/templates/service.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "edh-elo.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "edh-elo.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
targetPort: 8000
|
||||||
|
protocol: TCP
|
||||||
|
sessionAffinity: ClientIP
|
||||||
|
selector:
|
||||||
|
{{- include "edh-elo.selectorLabels" . | nindent 4 }}
|
104
charts/edh-elo/values.yaml
Normal file
104
charts/edh-elo/values.yaml
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
image:
|
||||||
|
repository: gitea.scubbo.org/scubbo/edh-elo
|
||||||
|
tag: "9b4e6c3b4d852883a372332461253ef9eae6d014"
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
extraEnv:
|
||||||
|
- name: DATABASE_URL
|
||||||
|
value: postgresql://db_user:pass@edh-elo-postgresql/postgres
|
||||||
|
- name: SPREADSHEET_ID
|
||||||
|
value: 1ITgXXfq7KaNP8JTQMvoZJSbu7zPpCcfNio_aooULRfc
|
||||||
|
- name: PATH_TO_GOOGLE_SHEETS_CREDENTIALS
|
||||||
|
value: /vault/secrets/google-credentials.json
|
||||||
|
postgresql:
|
||||||
|
auth:
|
||||||
|
existing-secret: edh-elo-postgresql
|
||||||
|
primary:
|
||||||
|
persistence:
|
||||||
|
enabled: true
|
||||||
|
initdb:
|
||||||
|
# TODO - switch to using a secret (and update `extraEnv`, above)
|
||||||
|
scripts:
|
||||||
|
psql.sql: |
|
||||||
|
CREATE USER db_user WITH PASSWORD 'pass';
|
||||||
|
GRANT ALL PRIVILEGES ON DATABASE postgres TO db_user;
|
||||||
|
GRANT ALL ON SCHEMA public TO db_user;
|
||||||
|
############
|
||||||
|
# Defaults #
|
||||||
|
############
|
||||||
|
replicaCount: 1
|
||||||
|
imagePullSecrets: []
|
||||||
|
nameOverride: ""
|
||||||
|
fullnameOverride: ""
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
podAnnotations:
|
||||||
|
vault.hashicorp.com/agent-inject: "true"
|
||||||
|
vault.hashicorp.com/agent-inject-status: update
|
||||||
|
vault.hashicorp.com/role: "edh-elo"
|
||||||
|
vault.hashicorp.com/agent-inject-secret-google-credentials.json: "edh-elo/data/google-credentials"
|
||||||
|
vault.hashicorp.com/agent-inject-template-google-credentials.json: |
|
||||||
|
{{- with secret "edh-elo/data/google-credentials" -}}
|
||||||
|
{{- .Data.data | toJSON -}}
|
||||||
|
{{- end -}}
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
securityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# runAsUser: 1000
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: LoadBalancer
|
||||||
|
port: 8000
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
className: "traefik"
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
# hosts:
|
||||||
|
# - host: edh-elo.avril
|
||||||
|
# paths:
|
||||||
|
# - path: /
|
||||||
|
# pathType: ImplementationSpecific
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
resources: {}
|
||||||
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
autoscaling:
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 100
|
||||||
|
targetCPUUtilizationPercentage: 80
|
||||||
|
# targetMemoryUtilizationPercentage: 80
|
||||||
|
nodeSelector: {}
|
||||||
|
# architecture: x86
|
||||||
|
|
||||||
|
tolerations: {}
|
||||||
|
# - key: architecture
|
||||||
|
# operator: Equal
|
||||||
|
# value: x86
|
||||||
|
|
||||||
|
affinity: {}
|
91
charts/jellyfin/NOTES.md
Normal file
91
charts/jellyfin/NOTES.md
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
For external availability - use the following CloudFormation template:
|
||||||
|
|
||||||
|
```
|
||||||
|
AWSTemplateFormatVersion: 2010-09-09
|
||||||
|
Resources:
|
||||||
|
SecurityGroup:
|
||||||
|
Type: AWS::EC2::SecurityGroup
|
||||||
|
Properties:
|
||||||
|
GroupName: TailnetProxySecurityGroup
|
||||||
|
GroupDescription: Tailnet Proxy Security Group
|
||||||
|
SecurityGroupEgress:
|
||||||
|
- CidrIp: 0.0.0.0/0
|
||||||
|
FromPort: 443
|
||||||
|
ToPort: 443
|
||||||
|
IpProtocol: -1
|
||||||
|
- CidrIp: 0.0.0.0/0
|
||||||
|
FromPort: 80
|
||||||
|
ToPort: 80
|
||||||
|
IpProtocol: -1
|
||||||
|
SecurityGroupIngress:
|
||||||
|
- CidrIp: 0.0.0.0/0
|
||||||
|
FromPort: 22
|
||||||
|
ToPort: 22
|
||||||
|
IpProtocol: -1
|
||||||
|
VpcId: vpc-952036f0
|
||||||
|
LaunchTemplate:
|
||||||
|
Type: AWS::EC2::LaunchTemplate
|
||||||
|
Properties:
|
||||||
|
LaunchTemplateName: TailnetLaunchTemplate
|
||||||
|
LaunchTemplateData:
|
||||||
|
UserData:
|
||||||
|
Fn::Base64: |
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# https://docs.docker.com/engine/install/ubuntu/
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y ca-certificates curl
|
||||||
|
sudo install -m 0755 -d /etc/apt/keyrings
|
||||||
|
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
|
||||||
|
sudo chmod a+r /etc/apt/keyrings/docker.asc
|
||||||
|
echo \
|
||||||
|
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
|
||||||
|
$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \
|
||||||
|
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
sudo apt-get update
|
||||||
|
|
||||||
|
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
|
cat <<EOF | sudo docker compose -f - up -d
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
image: 'jc21/nginx-proxy-manager:latest'
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "81:81"
|
||||||
|
- "443:443"
|
||||||
|
volumes:
|
||||||
|
- data:/data
|
||||||
|
- letsencrypt:/etc/letsencrypt
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
letsencrypt:
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
curl -fsSL https://tailscale.com/install.sh | sh
|
||||||
|
# Manual setup:
|
||||||
|
# * Access `<public>:81`, log in with `admin@example.com // changeme` - prompted to create new account
|
||||||
|
# * Create "New Proxy Host" from Domain Name to jellyfin.avril
|
||||||
|
# * Set DNS to forward jellyfin.scubbo.org -> <public IP>
|
||||||
|
# * `sudo tailscale up` and follow the resultant URL to connect to the TailNet
|
||||||
|
#
|
||||||
|
# TODO - provide a secret in an AWS Secret so `sudo tailscale up` can be autonomous (then don't need to open port 81)
|
||||||
|
JellyfinProxyInstance:
|
||||||
|
Type: AWS::EC2::Instance
|
||||||
|
DependsOn: "LaunchTemplate"
|
||||||
|
Properties:
|
||||||
|
# ImageId: ami-00beae93a2d981137
|
||||||
|
ImageId: ami-04b4f1a9cf54c11d0
|
||||||
|
InstanceType: t2.micro
|
||||||
|
LaunchTemplate:
|
||||||
|
LaunchTemplateName: TailnetLaunchTemplate
|
||||||
|
Version: "1"
|
||||||
|
NetworkInterfaces:
|
||||||
|
- AssociatePublicIpAddress: "true"
|
||||||
|
DeviceIndex: "0"
|
||||||
|
GroupSet:
|
||||||
|
- Ref: "SecurityGroup"
|
||||||
|
SubnetId: "subnet-535f3d78"
|
||||||
|
```
|
@ -26,18 +26,26 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
{{- if .Values.runtimeClassName }}
|
||||||
|
runtimeClassName: {{ .Values.runtimeClassName }}
|
||||||
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- name: {{ .Chart.Name }}
|
- name: {{ .Chart.Name }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
env:
|
||||||
|
- name: NVIDIA_DRIVER_CAPABILITIES
|
||||||
|
value: all
|
||||||
|
- name: NVIDIA_VISIBLE_DEVICES
|
||||||
|
value: all
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /media
|
- mountPath: /truenas-media
|
||||||
name: jf-media
|
name: jf-truenas-media
|
||||||
readOnly: True
|
# readOnly: True
|
||||||
- mountPath: /config
|
- mountPath: /config
|
||||||
name: jf-config
|
name: jf-config
|
||||||
- mountPath: /cache
|
- mountPath: /cache
|
||||||
@ -48,9 +56,9 @@ spec:
|
|||||||
value: bad
|
value: bad
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
volumes:
|
volumes:
|
||||||
- name: jf-media
|
- name: jf-truenas-media
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: jf-media-pvc
|
claimName: jf-truenas-media-pvc
|
||||||
- name: jf-config
|
- name: jf-config
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: jf-config-pvc
|
claimName: jf-config-pvc
|
||||||
|
@ -25,4 +25,8 @@ spec:
|
|||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: jellyfin-metrics-secret
|
name: jellyfin-metrics-secret
|
||||||
key: api-key
|
key: api-key
|
||||||
|
{{- with .Values.metrics.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
@ -32,6 +32,32 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: {{ .config.size | quote }}
|
storage: {{ .config.size | quote }}
|
||||||
{{- end}}
|
{{- end}}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: jf-truenas-media-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: jf-truenas-media-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 20T
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: jf-truenas-media-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 20T
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: galactus.avril
|
||||||
|
path: /mnt/low-resiliency-with-read-cache/ombi-data/
|
||||||
# ---
|
# ---
|
||||||
# # https://forum.jellyfin.org/t-could-not-apply-migration-migrateactivitylogdatabase
|
# # https://forum.jellyfin.org/t-could-not-apply-migration-migrateactivitylogdatabase
|
||||||
# apiVersion: v1
|
# apiVersion: v1
|
||||||
|
@ -28,8 +28,14 @@ podAnnotations: {}
|
|||||||
podSecurityContext: {}
|
podSecurityContext: {}
|
||||||
# fsGroup: 2000
|
# fsGroup: 2000
|
||||||
|
|
||||||
securityContext: {}
|
securityContext:
|
||||||
# capabilities:
|
runAsUser: 1000
|
||||||
|
fsGroup: 1000
|
||||||
|
supplementalGroups:
|
||||||
|
- 44 # `getent group video | cut -d: -f3` - https://jellyfin.org/docs/general/administration/hardware-acceleration/intel#kubernetes
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- "SYS_ADMIN"
|
||||||
# drop:
|
# drop:
|
||||||
# - ALL
|
# - ALL
|
||||||
# readOnlyRootFilesystem: true
|
# readOnlyRootFilesystem: true
|
||||||
@ -51,22 +57,21 @@ ingress:
|
|||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: ImplementationSpecific
|
pathType: ImplementationSpecific
|
||||||
|
- host: jellyfin.scubbo.org
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
tls: []
|
tls: []
|
||||||
# - secretName: chart-example-tls
|
# - secretName: chart-example-tls
|
||||||
# hosts:
|
# hosts:
|
||||||
# - chart-example.local
|
# - chart-example.local
|
||||||
|
|
||||||
resources: {}
|
# https://github.com/NVIDIA/k8s-device-plugin?tab=readme-ov-file#running-gpu-jobs
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
resources:
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
requests:
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
nvidia.com/gpu: 1
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
limits:
|
||||||
# limits:
|
nvidia.com/gpu: 1
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
autoscaling:
|
autoscaling:
|
||||||
enabled: false
|
enabled: false
|
||||||
@ -75,9 +80,13 @@ autoscaling:
|
|||||||
targetCPUUtilizationPercentage: 80
|
targetCPUUtilizationPercentage: 80
|
||||||
# targetMemoryUtilizationPercentage: 80
|
# targetMemoryUtilizationPercentage: 80
|
||||||
|
|
||||||
nodeSelector: {}
|
nodeSelector:
|
||||||
|
architecture: x86
|
||||||
|
|
||||||
tolerations: []
|
tolerations:
|
||||||
|
- key: architecture
|
||||||
|
operator: Equal
|
||||||
|
value: x86
|
||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
@ -104,17 +113,21 @@ volumes:
|
|||||||
nfs:
|
nfs:
|
||||||
server: rassigma.avril
|
server: rassigma.avril
|
||||||
path: "/mnt/BERTHA/etc/jellyfin/config"
|
path: "/mnt/BERTHA/etc/jellyfin/config"
|
||||||
- name: media
|
|
||||||
config:
|
|
||||||
size: 3T
|
|
||||||
accessMode: ReadOnlyMany
|
|
||||||
nfs:
|
|
||||||
server: rasnu2.avril
|
|
||||||
path: "/mnt/NEW_BERTHA/ombi-data/media"
|
|
||||||
|
|
||||||
metricsImage:
|
metricsImage:
|
||||||
repository: gitea.scubbo.org/scubbo/jellyfin-library-count-prometheus-exporter
|
repository: gitea.scubbo.org/scubbo/jellyfin-library-count-prometheus-exporter
|
||||||
tag: latest
|
tag: latest
|
||||||
|
|
||||||
|
runtimeClassName: nvidia
|
||||||
|
|
||||||
metrics:
|
metrics:
|
||||||
apiUrl: "http://jellyfin.avril"
|
apiUrl: "http://jellyfin.avril"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: kubernetes.io/arch
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- arm64
|
||||||
|
7
charts/miniflux/Chart.yaml
Normal file
7
charts/miniflux/Chart.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: miniflux-scubbo
|
||||||
|
description: A personalized Helm chart deploying Miniflux
|
||||||
|
|
||||||
|
type: application
|
||||||
|
version: 0.1.0
|
||||||
|
appVersion: "1.0.0"
|
22
charts/miniflux/templates/NOTES.txt
Normal file
22
charts/miniflux/templates/NOTES.txt
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
1. Get the application URL by running these commands:
|
||||||
|
{{- if .Values.ingress.enabled }}
|
||||||
|
{{- range $host := .Values.ingress.hosts }}
|
||||||
|
{{- range .paths }}
|
||||||
|
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "miniflux.fullname" . }})
|
||||||
|
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||||
|
echo http://$NODE_IP:$NODE_PORT
|
||||||
|
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||||
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||||
|
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "miniflux.fullname" . }}'
|
||||||
|
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "miniflux.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||||
|
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||||
|
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||||
|
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "miniflux.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||||
|
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||||
|
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||||
|
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||||
|
{{- end }}
|
62
charts/miniflux/templates/_helpers.tpl
Normal file
62
charts/miniflux/templates/_helpers.tpl
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "miniflux.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "miniflux.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "miniflux.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "miniflux.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "miniflux.chart" . }}
|
||||||
|
{{ include "miniflux.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "miniflux.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "miniflux.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "miniflux.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "miniflux.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
98
charts/miniflux/templates/deployment.yaml
Normal file
98
charts/miniflux/templates/deployment.yaml
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "miniflux.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "miniflux.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
{{- if not .Values.autoscaling.enabled }}
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "miniflux.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "miniflux.labels" . | nindent 8 }}
|
||||||
|
{{- with .Values.podLabels }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ include "miniflux.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
env:
|
||||||
|
- name: DATABASE_URL
|
||||||
|
value: postgres://miniflux:secret@localhost:5432/miniflux?sslmode=disable
|
||||||
|
- name: RUN_MIGRATIONS
|
||||||
|
value: "1"
|
||||||
|
- name: CREATE_ADMIN
|
||||||
|
value: "1"
|
||||||
|
- name: ADMIN_USERNAME
|
||||||
|
value: "admin"
|
||||||
|
- name: ADMIN_PASSWORD
|
||||||
|
value: "test123"
|
||||||
|
# Note - values above are only used for initialization. After first installation, they're changed (manually.
|
||||||
|
# It'd be super-cool to have a Job as part of the deployment that makes that change, but :shrug:)
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
containerPort: 8080
|
||||||
|
name: http
|
||||||
|
protocol: TCP
|
||||||
|
# livenessProbe:
|
||||||
|
# httpGet:
|
||||||
|
# path: /
|
||||||
|
# port: http
|
||||||
|
# readinessProbe:
|
||||||
|
# httpGet:
|
||||||
|
# path: /
|
||||||
|
# port: http
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
{{- with .Values.volumeMounts }}
|
||||||
|
volumeMounts:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
- name: postgres
|
||||||
|
image: "postgres:17-alpine"
|
||||||
|
env:
|
||||||
|
- name: POSTGRES_USER
|
||||||
|
value: miniflux
|
||||||
|
- name: POSTGRES_PASSWORD
|
||||||
|
value: secret
|
||||||
|
- name: POSTGRES_DB
|
||||||
|
value: miniflux
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /var/lib/postgresql/data
|
||||||
|
name: postgres-data
|
||||||
|
{{- with .Values.volumes }}
|
||||||
|
volumes:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
61
charts/miniflux/templates/ingress.yaml
Normal file
61
charts/miniflux/templates/ingress.yaml
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
{{- if .Values.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "miniflux.fullname" . -}}
|
||||||
|
{{- $svcPort := .Values.service.port -}}
|
||||||
|
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||||
|
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||||
|
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
labels:
|
||||||
|
{{- include "miniflux.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.ingress.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
ingressClassName: {{ .Values.ingress.className }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.ingress.tls }}
|
||||||
|
tls:
|
||||||
|
{{- range .Values.ingress.tls }}
|
||||||
|
- hosts:
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
secretName: {{ .secretName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
- host: {{ .host | quote }}
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
{{- range .paths }}
|
||||||
|
- path: {{ .path }}
|
||||||
|
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
pathType: {{ .pathType }}
|
||||||
|
{{- end }}
|
||||||
|
backend:
|
||||||
|
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||||
|
service:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
port:
|
||||||
|
number: {{ $svcPort }}
|
||||||
|
{{- else }}
|
||||||
|
serviceName: {{ $fullName }}
|
||||||
|
servicePort: {{ $svcPort }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
15
charts/miniflux/templates/service.yaml
Normal file
15
charts/miniflux/templates/service.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "miniflux.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "miniflux.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
name: http
|
||||||
|
selector:
|
||||||
|
{{- include "miniflux.selectorLabels" . | nindent 4 }}
|
13
charts/miniflux/templates/serviceaccount.yaml
Normal file
13
charts/miniflux/templates/serviceaccount.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "miniflux.serviceAccountName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "miniflux.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||||
|
{{- end }}
|
15
charts/miniflux/templates/tests/test-connection.yaml
Normal file
15
charts/miniflux/templates/tests/test-connection.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: "{{ include "miniflux.fullname" . }}-test-connection"
|
||||||
|
labels:
|
||||||
|
{{- include "miniflux.labels" . | nindent 4 }}
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": test
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: wget
|
||||||
|
image: busybox
|
||||||
|
command: ['wget']
|
||||||
|
args: ['{{ include "miniflux.fullname" . }}:{{ .Values.service.port }}']
|
||||||
|
restartPolicy: Never
|
11
charts/miniflux/templates/volumes.yaml
Normal file
11
charts/miniflux/templates/volumes.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: postgres-data-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: "freenas-iscsi-csi"
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 500Mi
|
96
charts/miniflux/values.yaml
Normal file
96
charts/miniflux/values.yaml
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# Default values for miniflux.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
# Declare variables to be passed into your templates.
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: docker.io/miniflux/miniflux
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
tag: "2.2.7"
|
||||||
|
|
||||||
|
imagePullSecrets: []
|
||||||
|
nameOverride: ""
|
||||||
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# Automatically mount a ServiceAccount's API credentials?
|
||||||
|
automount: true
|
||||||
|
# Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
|
||||||
|
podAnnotations: {}
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
securityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# runAsUser: 1000
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: LoadBalancer
|
||||||
|
port: 8597
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
className: "traefik"
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: miniflux.avril
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
autoscaling:
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 100
|
||||||
|
targetCPUUtilizationPercentage: 80
|
||||||
|
# targetMemoryUtilizationPercentage: 80
|
||||||
|
|
||||||
|
# Additional volumes on the output Deployment definition.
|
||||||
|
volumes:
|
||||||
|
- name: postgres-data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: postgres-data-pvc
|
||||||
|
|
||||||
|
# Additional volumeMounts on the output Deployment definition.
|
||||||
|
volumeMounts: []
|
||||||
|
# - name: foo
|
||||||
|
# mountPath: "/etc/foo"
|
||||||
|
# readOnly: true
|
||||||
|
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
affinity: {}
|
24
charts/ombi/Chart.yaml
Normal file
24
charts/ombi/Chart.yaml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: ombi
|
||||||
|
description: A Helm chart for Kubernetes
|
||||||
|
|
||||||
|
# A chart can be either an 'application' or a 'library' chart.
|
||||||
|
#
|
||||||
|
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||||
|
# to be deployed.
|
||||||
|
#
|
||||||
|
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||||
|
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||||
|
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||||
|
type: application
|
||||||
|
|
||||||
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
|
# to the chart and its templates, including the app version.
|
||||||
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
|
version: 0.1.0
|
||||||
|
|
||||||
|
# This is the version number of the application being deployed. This version number should be
|
||||||
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
|
# It is recommended to use it with quotes.
|
||||||
|
appVersion: "1.16.0"
|
36
charts/ombi/README.md
Normal file
36
charts/ombi/README.md
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
Expects a secret named `nzbget-creds`, with key `password`
|
||||||
|
|
||||||
|
# Supporting services
|
||||||
|
|
||||||
|
Ombi, Sonarr, Radarr, and NzbGet do nothing in isolation - you need to hook them up to supporting services to access any data.
|
||||||
|
|
||||||
|
## Indexers
|
||||||
|
|
||||||
|
These are the services that translate search requests into sets of Usenet post addresses to be downloaded and collated.
|
||||||
|
|
||||||
|
I currently use:
|
||||||
|
|
||||||
|
* NzbPlanet
|
||||||
|
|
||||||
|
And have been advised to try:
|
||||||
|
|
||||||
|
* DrunkenSlug
|
||||||
|
* Nzb.su
|
||||||
|
* NZBFinder
|
||||||
|
* NZBGeek
|
||||||
|
|
||||||
|
## Providers
|
||||||
|
|
||||||
|
These are the services that host the actual data
|
||||||
|
|
||||||
|
I use:
|
||||||
|
|
||||||
|
* Usenetserver
|
||||||
|
|
||||||
|
And have been advised to try:
|
||||||
|
|
||||||
|
* usenet.farm
|
||||||
|
|
||||||
|
# See also
|
||||||
|
|
||||||
|
The helm chart under `proton-vpn`
|
22
charts/ombi/templates/NOTES.txt
Normal file
22
charts/ombi/templates/NOTES.txt
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
1. Get the application URL by running these commands:
|
||||||
|
{{- if .Values.ingress.enabled }}
|
||||||
|
{{- range $host := .Values.ingress.hosts }}
|
||||||
|
{{- range .paths }}
|
||||||
|
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "ombi.fullname" . }})
|
||||||
|
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||||
|
echo http://$NODE_IP:$NODE_PORT
|
||||||
|
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||||
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||||
|
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "ombi.fullname" . }}'
|
||||||
|
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "ombi.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||||
|
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||||
|
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||||
|
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "ombi.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||||
|
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||||
|
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||||
|
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||||
|
{{- end }}
|
103
charts/ombi/templates/_helpers.tpl
Normal file
103
charts/ombi/templates/_helpers.tpl
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "ombi.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "ombi.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "ombi.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "ombi.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "ombi.chart" . }}
|
||||||
|
{{ include "ombi.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "ombi.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "ombi.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "ombi.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Templatify creation of standard config PV-and-PVCs
|
||||||
|
Accepts `service` as a parameter, which should be a mapping containing:
|
||||||
|
* name - a string (like `sonarr` or `qbit`)
|
||||||
|
* size - a string (with the standard Kubernetes restrictions on size-strings)
|
||||||
|
* path - a string (defining the path in the NFS server where this config dir lives)
|
||||||
|
|
||||||
|
Note that this assumes NFS as the storage type. A more extensible definition would permit arbitrary storage types. But hey, this is just for me :P
|
||||||
|
|
||||||
|
Not currently working, but I'm keeping it checked-in for future inspiration!
|
||||||
|
|
||||||
|
*/}}
|
||||||
|
{{- define "ombi.configvolumedefinition" -}}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: ( include "ombi.fullname" . )-( .name )-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: {{ .size }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.nfsServer }}
|
||||||
|
path: {{ .path }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: ( include "ombi.fullname" . )-{{ .name }}-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: ( include "ombi.fullname" . )-{{ .name }}-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .size }}
|
||||||
|
{{- end }}
|
216
charts/ombi/templates/deployment.yaml
Normal file
216
charts/ombi/templates/deployment.yaml
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
{{- if not .Values.autoscaling.enabled }}
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 8 }}
|
||||||
|
spec:
|
||||||
|
# Necessary for Pod to have a static hostname in order to expose ports:
|
||||||
|
# https://docs.k8s-at-home.com/guides/pod-gateway/#exposing-routed-pod-ports-from-the-gateway
|
||||||
|
hostname: omni
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
containers:
|
||||||
|
{{ if .Values.ombi.enabled }}
|
||||||
|
- name: ombi
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
env:
|
||||||
|
- name: PUID
|
||||||
|
value: "1000"
|
||||||
|
- name: GUID
|
||||||
|
value: "1000"
|
||||||
|
- name: TZ
|
||||||
|
value: "America/Los_Angeles"
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /config
|
||||||
|
name: ombi-config
|
||||||
|
{{- end -}}
|
||||||
|
{{ if .Values.sonarr.enabled }}
|
||||||
|
- name: sonarr
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12}}
|
||||||
|
image: "lscr.io/linuxserver/sonarr:{{ .Values.sonarr.tag | default "latest" }}"
|
||||||
|
env:
|
||||||
|
- name: PUID
|
||||||
|
value: "1000"
|
||||||
|
- name: PGID
|
||||||
|
value: "1000"
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /config
|
||||||
|
name: sonarr-config
|
||||||
|
- mountPath: /data
|
||||||
|
name: ombi-truenas-data
|
||||||
|
{{- end -}}
|
||||||
|
{{ if .Values.radarr.enabled }}
|
||||||
|
- name: radarr
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12}}
|
||||||
|
image: "lscr.io/linuxserver/radarr:latest"
|
||||||
|
env:
|
||||||
|
- name: PUID
|
||||||
|
value: "1000"
|
||||||
|
- name: PGID
|
||||||
|
value: "1000"
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /config
|
||||||
|
name: radarr-config
|
||||||
|
- mountPath: /data
|
||||||
|
name: ombi-truenas-data
|
||||||
|
{{- end -}}
|
||||||
|
{{ if .Values.readarr.enabled }}
|
||||||
|
- name: readarr
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12}}
|
||||||
|
image: "lscr.io/linuxserver/readarr:develop"
|
||||||
|
env:
|
||||||
|
- name: PUID
|
||||||
|
value: "1000"
|
||||||
|
- name: PGID
|
||||||
|
value: "1000"
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /config
|
||||||
|
name: readarr-config
|
||||||
|
- mountPath: /data
|
||||||
|
name: ombi-truenas-data
|
||||||
|
{{- end -}}
|
||||||
|
{{if .Values.prowlarr.enabled}}
|
||||||
|
- name: prowlarr
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12}}
|
||||||
|
image: "lscr.io/linuxserver/prowlarr:latest"
|
||||||
|
env:
|
||||||
|
- name: PUID
|
||||||
|
value: "1000"
|
||||||
|
- name: PGID
|
||||||
|
value: "1000"
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12}}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /config
|
||||||
|
name: prowlarr-config
|
||||||
|
- mountPath: /data
|
||||||
|
name: ombi-truenas-data
|
||||||
|
{{- end -}}
|
||||||
|
{{ if .Values.nzbget.enabled }}
|
||||||
|
- name: nzbget
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
image: "lscr.io/linuxserver/nzbget:latest"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
env:
|
||||||
|
- name: PUID
|
||||||
|
value: "1000"
|
||||||
|
- name: PGID
|
||||||
|
value: "1000"
|
||||||
|
- name: TZ
|
||||||
|
value: "America/Los_Angeles"
|
||||||
|
- name: NZBGET_USER
|
||||||
|
value: nzbget
|
||||||
|
- name: NZBGET_PASS
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: nzbget-creds
|
||||||
|
key: password
|
||||||
|
optional: false
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /config
|
||||||
|
name: nzbget-config
|
||||||
|
- mountPath: /data/usenet
|
||||||
|
name: usenet-truenas-downloads
|
||||||
|
{{ end }}
|
||||||
|
{{ if .Values.sabnzbd.enabled }}
|
||||||
|
- name: sabnzbd
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
image: "lscr.io/linuxserver/sabnzbd:latest"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
env:
|
||||||
|
- name: PUID
|
||||||
|
value: "1000"
|
||||||
|
- name: PGID
|
||||||
|
value: "1000"
|
||||||
|
- name: TZ
|
||||||
|
value: "America/Los_Angeles"
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /config
|
||||||
|
name: sabnzbd-config
|
||||||
|
- mountPath: /data/usenet
|
||||||
|
name: usenet-truenas-downloads
|
||||||
|
{{ end }}
|
||||||
|
volumes:
|
||||||
|
- name: ombi-config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-ombi-config-pvc
|
||||||
|
- name: ombi-data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-data-pvc
|
||||||
|
- name: ombi-truenas-data
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-truenas-data-pvc
|
||||||
|
- name: sonarr-config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-sonarr-config-pvc
|
||||||
|
- name: radarr-config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-radarr-config-pvc
|
||||||
|
- name: readarr-config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-readarr-config-pvc
|
||||||
|
- name: prowlarr-config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-prowlarr-config-pvc
|
||||||
|
- name: nzbget-config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{include "ombi.fullname" .}}-nzbget-config-pvc
|
||||||
|
- name: sabnzbd-config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{include "ombi.fullname" .}}-sabnzbd-config-pvc
|
||||||
|
- name: usenet-truenas-downloads
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: {{ include "ombi.fullname" . }}-truenas-usenet-downloads-pvc
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
121
charts/ombi/templates/ingress.yaml
Normal file
121
charts/ombi/templates/ingress.yaml
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
{{- if .Values.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "ombi.fullname" . -}}
|
||||||
|
{{- $svcPort := .Values.service.port -}}
|
||||||
|
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||||
|
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||||
|
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.ingress.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
ingressClassName: {{ .Values.ingress.className }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.ingress.tls }}
|
||||||
|
tls:
|
||||||
|
{{- range .Values.ingress.tls }}
|
||||||
|
- hosts:
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
secretName: {{ .secretName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
- host: {{ .host | quote }}
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
{{- range .paths }}
|
||||||
|
- path: {{ .path }}
|
||||||
|
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
pathType: {{ .pathType }}
|
||||||
|
{{- end }}
|
||||||
|
backend:
|
||||||
|
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||||
|
service:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
port:
|
||||||
|
number: {{ $svcPort }}
|
||||||
|
{{- else }}
|
||||||
|
serviceName: {{ $fullName }}
|
||||||
|
servicePort: {{ $svcPort }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
- host: sonarr.avril
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sonarr
|
||||||
|
port:
|
||||||
|
number: {{ .Values.service.sonarrPort }}
|
||||||
|
- host: radarr.avril
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: {{ include "ombi.fullname" . }}-radarr
|
||||||
|
port:
|
||||||
|
number: {{ .Values.service.radarrPort }}
|
||||||
|
- host: readarr.avril
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: {{ include "ombi.fullname" . }}-readarr
|
||||||
|
port:
|
||||||
|
number: {{ .Values.service.readarrPort }}
|
||||||
|
- host: prowlarr.avril
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: {{include "ombi.fullname" .}}-prowlarr
|
||||||
|
port:
|
||||||
|
number: {{.Values.service.prowlarrPort}}
|
||||||
|
- host: nzbget.avril
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: {{ include "ombi.fullname" . }}-nzbget
|
||||||
|
port:
|
||||||
|
number: {{ .Values.service.nzbgetWebPort }}
|
||||||
|
- host: sabnzbd.avril
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sabnzbd
|
||||||
|
port:
|
||||||
|
number: {{ .Values.service.sabnzbdWebPort }}
|
||||||
|
{{- end }}
|
104
charts/ombi/templates/service.yaml
Normal file
104
charts/ombi/templates/service.yaml
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
targetPort: 3579
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 4 }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sonarr
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.sonarrPort }}
|
||||||
|
targetPort: 8989
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 4 }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-radarr
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.radarrPort }}
|
||||||
|
targetPort: 7878
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 4 }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-readarr
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.readarrPort }}
|
||||||
|
targetPort: 8787
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 4 }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-prowlarr
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.prowlarrPort }}
|
||||||
|
targetPort: 9696
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 4 }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-nzbget
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.nzbgetWebPort }}
|
||||||
|
targetPort: 6789
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 4 }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sabnzbd
|
||||||
|
labels:
|
||||||
|
{{- include "ombi.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.sabnzbdWebPort }}
|
||||||
|
targetPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
selector:
|
||||||
|
{{- include "ombi.selectorLabels" . | nindent 4 }}
|
265
charts/ombi/templates/volumes.yaml
Normal file
265
charts/ombi/templates/volumes.yaml
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-data-pv
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 5T
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ .Values.volume.dataNFSServer }}
|
||||||
|
path: {{ .Values.volume.dataNFSPath }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-data-pvc
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-data-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 5T
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-truenas-data-pv
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 20T
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: galactus.avril
|
||||||
|
path: /mnt/low-resiliency-with-read-cache/ombi-data/
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-truenas-data-pvc
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-truenas-data-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 20T
|
||||||
|
# TODO - templatize these similar definitions
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-ombi-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 1Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.configNFSServer }}
|
||||||
|
path: /mnt/BERTHA/etc/ombi
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-ombi-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-ombi-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sonarr-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 10M
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.configNFSServer }}
|
||||||
|
path: /mnt/BERTHA/etc/sonarr
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sonarr-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-sonarr-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10M
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-radarr-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 10M
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.configNFSServer }}
|
||||||
|
path: /mnt/BERTHA/etc/radarr
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-radarr-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-radarr-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10M
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-readarr-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 10M
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.configNFSServer }}
|
||||||
|
path: /mnt/BERTHA/etc/readarr
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-readarr-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-readarr-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10M
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-prowlarr-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 10M
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.configNFSServer }}
|
||||||
|
path: /mnt/BERTHA/etc/prowlarr
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-prowlarr-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-prowlarr-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10M
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-nzbget-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 10M
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.configNFSServer }}
|
||||||
|
path: /mnt/BERTHA/etc/nzbget
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-nzbget-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-nzbget-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10M
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sabnzbd-config-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 10M
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: {{ $.Values.volume.configNFSServer }}
|
||||||
|
path: /mnt/BERTHA/etc/sabnzbd
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-sabnzbd-config-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-sabnzbd-config-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10M
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-truenas-usenet-downloads-pv
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 1T
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
nfs:
|
||||||
|
server: galactus.avril
|
||||||
|
path: /mnt/low-resiliency-with-read-cache/ombi-data/usenet
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: {{ include "ombi.fullname" . }}-truenas-usenet-downloads-pvc
|
||||||
|
spec:
|
||||||
|
storageClassName: ""
|
||||||
|
volumeName: {{ include "ombi.fullname" . }}-truenas-usenet-downloads-pv
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1T
|
123
charts/ombi/values.yaml
Normal file
123
charts/ombi/values.yaml
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# Default values for ombi.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
# Declare variables to be passed into your templates.
|
||||||
|
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: linuxserver/ombi
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
|
tag: "latest"
|
||||||
|
|
||||||
|
imagePullSecrets: []
|
||||||
|
nameOverride: ""
|
||||||
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
securityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# runAsUser: 1000
|
||||||
|
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
port: 3579
|
||||||
|
sonarrPort: 8989
|
||||||
|
radarrPort: 7878
|
||||||
|
readarrPort: 8787
|
||||||
|
prowlarrPort: 9696
|
||||||
|
nzbgetWebPort: 6789
|
||||||
|
sabnzbdWebPort: 8080
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
className: ""
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: ombi.avril
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
autoscaling:
|
||||||
|
enabled: false
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 100
|
||||||
|
targetCPUUtilizationPercentage: 80
|
||||||
|
# targetMemoryUtilizationPercentage: 80
|
||||||
|
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/hostname: epsilon
|
||||||
|
|
||||||
|
tolerations:
|
||||||
|
- key: architecture
|
||||||
|
operator: "Equal"
|
||||||
|
value: x86
|
||||||
|
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
# Custom values below here
|
||||||
|
ombi:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
sonarr:
|
||||||
|
enabled: true
|
||||||
|
# Hard-coded to address https://forums.sonarr.tv/t/unraid-binhex-sonarr-crashes-constantly-epic-fail/33175/
|
||||||
|
# https://github.com/Sonarr/Sonarr/issues/5929 / https://old.reddit.com/r/sonarr/comments/15p160j/v4_consoleapp_epic_fail_error/
|
||||||
|
# tag: "develop-version-4.0.0.613"
|
||||||
|
tag: "4.0.7"
|
||||||
|
|
||||||
|
radarr:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
readarr:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
prowlarr:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
nzbget:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
sabnzbd:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
volume:
|
||||||
|
configNFSServer: rassigma.avril
|
||||||
|
dataNFSServer: rasnu2.avril
|
||||||
|
dataNFSPath: /mnt/NEW_BERTHA/ombi-data
|
31
charts/proton-vpn/Chart.yaml
Normal file
31
charts/proton-vpn/Chart.yaml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: proton-vpn
|
||||||
|
description: A Helm chart for Kubernetes
|
||||||
|
|
||||||
|
# A chart can be either an 'application' or a 'library' chart.
|
||||||
|
#
|
||||||
|
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||||
|
# to be deployed.
|
||||||
|
#
|
||||||
|
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||||
|
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||||
|
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||||
|
type: application
|
||||||
|
|
||||||
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
|
# to the chart and its templates, including the app version.
|
||||||
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
|
version: 0.1.0
|
||||||
|
|
||||||
|
# This is the version number of the application being deployed. This version number should be
|
||||||
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
|
# It is recommended to use it with quotes.
|
||||||
|
appVersion: "1.16.0"
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
# https://github.com/k8s-at-home/charts/tree/master/charts/stable/pod-gateway
|
||||||
|
# https://github.com/k8s-at-home/charts/commit/bc8aee9648feb02fbe03246026e799cd1bd50ae5
|
||||||
|
- name: pod-gateway
|
||||||
|
version: "2.0.0"
|
||||||
|
repository: https://k8s-at-home.com/charts/
|
73
charts/proton-vpn/README.md
Normal file
73
charts/proton-vpn/README.md
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
Implements [this guide](https://docs.k8s-at-home.com/guides/pod-gateway/). Note that I only tested this with OpenVPN, not Wireguard.
|
||||||
|
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
### Cert-manager
|
||||||
|
|
||||||
|
Depends on the CRDs installed as part of `cert-manager`, which apparently will not be installed if that chart is a dependency of this one - so it's installed manually in its own directory.
|
||||||
|
|
||||||
|
If you need to install it manually, run `helm repo add jetstack https://charts.jetstack.io; helm repo update; helm install --create-namespace -n security jetstack/cert-manager cert-manager --set installCRDs=true`
|
||||||
|
|
||||||
|
## Secrets
|
||||||
|
|
||||||
|
Note that the names of both of these secrets are arbitrary (though the keys within them are not) - the expected names are set in `values.yaml`.
|
||||||
|
|
||||||
|
### Config file
|
||||||
|
|
||||||
|
Depends on the existence of a secret called `openvpn-config`, with a key `vpnConfigfile` that contains the appropriate config file. Download it from [here](https://account.protonvpn.com/downloads) and upload it with:
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl -n proton-vpn create secret generic openvpn-config --from-file=vpnConfigfile=<path_to_config_file>
|
||||||
|
```
|
||||||
|
|
||||||
|
### OpenVPN creds
|
||||||
|
|
||||||
|
Fetch from [here](https://account.protonvpn.com/account) (note - these are different from your ProtonVPN credentials!), then upload with:
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl -n proton-vpn create secret generic openvpn-creds --from-literal="VPN_AUTH=<username>;<password>"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that you can (apparently!) append various suffices to the OpenVPN username to enable extra features if you are a paying member:
|
||||||
|
|
||||||
|
* `<username>+f1` as username to enable anti-malware filtering
|
||||||
|
* `<username>+f2` as username to additionally enable ad-blocking filtering
|
||||||
|
* `<username>+nr` as username to enable Moderate NAT
|
||||||
|
|
||||||
|
I haven't tested - use at your own risk! Probably best to get a functioning connection working before messing around with extra features.
|
||||||
|
|
||||||
|
### update-resolv-conf
|
||||||
|
|
||||||
|
TODO: (Not sure if this is required for all servers...) This is required by the ProtonVPN OpenVPN configuration (line 124)
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
### `GATEWAY_IP=';; connection timed out; no servers could be reached'`
|
||||||
|
|
||||||
|
As per [here](https://docs.k8s-at-home.com/guides/pod-gateway/#routed-pod-fails-to-init), "_try setting the_ `NOT_ROUTED_TO_GATEWAY_CIDRS:` _with your cluster cidr and service cidrs_". The way to find those values is described [here](https://stackoverflow.com/questions/44190607/how-do-you-find-the-cluster-service-cidr-of-a-kubernetes-cluster)
|
||||||
|
|
||||||
|
## More info
|
||||||
|
|
||||||
|
Some OpenVPN server configurations rely on a script at `/etc/openvpn/update-resolv-conf.sh`, which isn't provided by default. It [looks like](https://github.com/dperson/openvpn-client/issues/90) it's been replaced with `/etc/openvpn/up.sh` and `.../down.sh` - you should be able to manually edit the `.ovpn` file to reference those scripts instead.
|
||||||
|
|
||||||
|
If you really need the original file - get it from [here](https://github.com/alfredopalhares/openvpn-update-resolv-conf) and provide it in a ConfigMap:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -s https://raw.githubusercontent.com/alfredopalhares/openvpn-update-resolv-conf/master/update-resolv-conf.sh -o /tmp/update-resolv-conf
|
||||||
|
```
|
||||||
|
|
||||||
|
### Debugging image
|
||||||
|
|
||||||
|
Useful tools to install:
|
||||||
|
|
||||||
|
```
|
||||||
|
apt update -y
|
||||||
|
apt install -y traceroute net-tools iputils-ping dnsutils
|
||||||
|
```
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
* [Values definition for VPN](https://github.com/k8s-at-home/library-charts/blob/2b4e0aa1ef5f8c6ef4ac14c2335fc9a008394ed6/charts/stable/common/values.yaml#L479)
|
||||||
|
* [Charts for VPN](https://github.com/k8s-at-home/library-charts/tree/2b4e0aa1ef5f8c6ef4ac14c2335fc9a008394ed6/charts/stable/common/templates/addons/vpn)
|
||||||
|
* [Pod Gateway templates](https://github.com/k8s-at-home/charts/tree/master/charts/stable/pod-gateway/templates)
|
BIN
charts/proton-vpn/charts/pod-gateway-2.0.0.tgz
Normal file
BIN
charts/proton-vpn/charts/pod-gateway-2.0.0.tgz
Normal file
Binary file not shown.
62
charts/proton-vpn/templates/_helpers.tpl
Normal file
62
charts/proton-vpn/templates/_helpers.tpl
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "proton-vpn.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "proton-vpn.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "proton-vpn.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "proton-vpn.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "proton-vpn.chart" . }}
|
||||||
|
{{ include "proton-vpn.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "proton-vpn.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "proton-vpn.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "proton-vpn.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "proton-vpn.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
11
charts/proton-vpn/templates/namespace.yaml
Normal file
11
charts/proton-vpn/templates/namespace.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Note these are _not_ the namespace for the items created by this chart, but rather are the namespaces of pods that will
|
||||||
|
# be routed _through_ this VPN
|
||||||
|
{{- range (index .Values "pod-gateway" "routed_namespaces") }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: {{ . }}
|
||||||
|
labels:
|
||||||
|
routed-gateway: "true"
|
||||||
|
{{- end }}
|
59
charts/proton-vpn/values.yaml
Normal file
59
charts/proton-vpn/values.yaml
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
pod-gateway:
|
||||||
|
routed_namespaces:
|
||||||
|
- "vpn"
|
||||||
|
- "ombi"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
NOT_ROUTED_TO_GATEWAY_CIDRS: "10.42.0.0/16 10.43.0.0/16 192.168.0.0/16"
|
||||||
|
VPN_BLOCK_OTHER_TRAFFIC: true
|
||||||
|
# https://github.com/k8s-at-home/charts/tree/master/charts/stable/pod-gateway
|
||||||
|
VPN_INTERFACE: tun0 # For OpenVPN. For Wireguard, use `wg0`
|
||||||
|
VPN_TRAFFIC_PORT: 1194 # UDP port - which is generally preferred over TCP. If you use TCP, 443 is probably correct
|
||||||
|
|
||||||
|
publicPorts:
|
||||||
|
- hostname: ombi
|
||||||
|
IP: 9
|
||||||
|
ports:
|
||||||
|
- type: udp
|
||||||
|
port: 6789
|
||||||
|
- type: tcp
|
||||||
|
port: 6789
|
||||||
|
|
||||||
|
addons:
|
||||||
|
# https://github.com/k8s-at-home/library-charts/blob/2b4e0aa1ef5f8c6ef4ac14c2335fc9a008394ed6/charts/stable/common/templates/addons/vpn/openvpn/_container.tpl
|
||||||
|
# https://github.com/k8s-at-home/library-charts/blob/2b4e0aa1ef5f8c6ef4ac14c2335fc9a008394ed6/charts/stable/common/values.yaml#L477
|
||||||
|
vpn:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
type: openvpn
|
||||||
|
openvpn:
|
||||||
|
authSecret: openvpn-creds
|
||||||
|
configFileSecret: openvpn-config
|
||||||
|
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
# Change "CA" to whatever country your VPN connects to
|
||||||
|
command:
|
||||||
|
- sh
|
||||||
|
- -c
|
||||||
|
- if [ $(curl -s https://ipinfo.io/country) == 'CA' ]; then exit 0; else exit $?; fi
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 60
|
||||||
|
failureThreshold: 1
|
||||||
|
|
||||||
|
networkPolicy:
|
||||||
|
enabled: true
|
||||||
|
egress:
|
||||||
|
- ports:
|
||||||
|
- protocol: UDP # Setting settings.VPN_TRAFFIC_PORT is insufficient
|
||||||
|
port: 1194
|
||||||
|
to:
|
||||||
|
- ipBlock:
|
||||||
|
cidr: 0.0.0.0/0
|
||||||
|
- to:
|
||||||
|
- ipBlock:
|
||||||
|
cidr: 10.0.0.0/8
|
||||||
|
|
||||||
|
scripts:
|
||||||
|
up: true
|
||||||
|
down: true
|
178
charts/vault-crossplane-integration/base-app-infra.yaml
Normal file
178
charts/vault-crossplane-integration/base-app-infra.yaml
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
apiVersion: apiextensions.crossplane.io/v1
|
||||||
|
kind: CompositeResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: xbaseapplicationinfrastructures.scubbo.org
|
||||||
|
spec:
|
||||||
|
group: scubbo.org
|
||||||
|
names:
|
||||||
|
kind: xBaseApplicationInfrastructure
|
||||||
|
plural: xbaseapplicationinfrastructures
|
||||||
|
claimNames:
|
||||||
|
kind: BaseAppInfra
|
||||||
|
plural: baseappinfras
|
||||||
|
versions:
|
||||||
|
- name: v1alpha1
|
||||||
|
served: true
|
||||||
|
referenceable: true
|
||||||
|
schema:
|
||||||
|
openAPIV3Schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
spec:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
appName:
|
||||||
|
type: string
|
||||||
|
---
|
||||||
|
# Sources for the Vault resources are here:
|
||||||
|
# https://developer.hashicorp.com/vault/tutorials/kubernetes/vault-secrets-operator#configure-vault
|
||||||
|
apiVersion: apiextensions.crossplane.io/v1
|
||||||
|
kind: Composition
|
||||||
|
metadata:
|
||||||
|
name: base-application-infrastructure
|
||||||
|
spec:
|
||||||
|
compositeTypeRef:
|
||||||
|
apiVersion: scubbo.org/v1alpha1
|
||||||
|
kind: xBaseApplicationInfrastructure
|
||||||
|
resources:
|
||||||
|
- name: vault-role
|
||||||
|
base:
|
||||||
|
apiVersion: kubernetes.vault.upbound.io/v1alpha1
|
||||||
|
kind: AuthBackendRole
|
||||||
|
spec:
|
||||||
|
providerConfigRef:
|
||||||
|
name: vault-provider-config
|
||||||
|
forProvider:
|
||||||
|
audience: vault
|
||||||
|
boundServiceAccountNames:
|
||||||
|
- default
|
||||||
|
tokenMaxTtl: 86400
|
||||||
|
tokenTtl: 86400
|
||||||
|
patches:
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
# https://docs.crossplane.io/latest/concepts/composite-resources/#claim-namespace-label
|
||||||
|
fromFieldPath: metadata.labels["crossplane.io/claim-namespace"]
|
||||||
|
toFieldPath: spec.forProvider.boundServiceAccountNamespaces
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "[\"%s\"]"
|
||||||
|
- type: convert
|
||||||
|
convert:
|
||||||
|
toType: array
|
||||||
|
format: json
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.roleName
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "vault-secrets-operator-%s-role"
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.tokenPolicies
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "[\"vault-secrets-operator-%s-policy\"]"
|
||||||
|
- type: convert
|
||||||
|
convert:
|
||||||
|
toType: array
|
||||||
|
format: json
|
||||||
|
|
||||||
|
- name: vault-secrets-mount
|
||||||
|
base:
|
||||||
|
apiVersion: vault.vault.upbound.io/v1alpha1
|
||||||
|
kind: Mount
|
||||||
|
spec:
|
||||||
|
providerConfigRef:
|
||||||
|
name: vault-provider-config
|
||||||
|
forProvider:
|
||||||
|
type: kv-v2
|
||||||
|
patches:
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.path
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "app-%s-kv"
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.description
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "KV storage for app %s"
|
||||||
|
|
||||||
|
- name: vault-policy
|
||||||
|
base:
|
||||||
|
apiVersion: vault.vault.upbound.io/v1alpha1
|
||||||
|
kind: Policy
|
||||||
|
spec:
|
||||||
|
providerConfigRef:
|
||||||
|
name: vault-provider-config
|
||||||
|
forProvider: {}
|
||||||
|
patches:
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.name
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "vault-secrets-operator-%s-policy"
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.policy
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "path \"app-%s-kv/*\" {capabilities=[\"read\"]}"
|
||||||
|
|
||||||
|
# Note that this is an `Object` created by provider-kubernetes, not by provider-vault
|
||||||
|
- name: vault-auth
|
||||||
|
base:
|
||||||
|
apiVersion: kubernetes.crossplane.io/v1alpha2
|
||||||
|
kind: Object
|
||||||
|
spec:
|
||||||
|
providerConfigRef:
|
||||||
|
name: kubernetes-provider
|
||||||
|
forProvider:
|
||||||
|
manifest:
|
||||||
|
apiVersion: secrets.hashicorp.com/v1beta1
|
||||||
|
kind: VaultAuth
|
||||||
|
spec:
|
||||||
|
method: kubernetes
|
||||||
|
mount: kubernetes # Hard-coded - this is what I used in my setup, but this could be customizable
|
||||||
|
kubernetes:
|
||||||
|
serviceAccount: default
|
||||||
|
audiences:
|
||||||
|
- vault
|
||||||
|
patches:
|
||||||
|
# The Vault Role created earlier in this Composition
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.manifest.spec.kubernetes.role
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "vault-secrets-operator-%s-role"
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: spec.appName
|
||||||
|
toFieldPath: spec.forProvider.manifest.metadata.name
|
||||||
|
transforms:
|
||||||
|
- type: string
|
||||||
|
string:
|
||||||
|
type: Format
|
||||||
|
fmt: "vault-auth-%s"
|
||||||
|
- type: FromCompositeFieldPath
|
||||||
|
fromFieldPath: metadata.labels["crossplane.io/claim-namespace"]
|
||||||
|
toFieldPath: spec.forProvider.manifest.metadata.namespace
|
7
charts/vault/Chart.yaml
Normal file
7
charts/vault/Chart.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: vault-extra-resources
|
||||||
|
description: Extra resources in support of Vault official Helm Chart
|
||||||
|
|
||||||
|
type: application
|
||||||
|
version: 0.1.0
|
||||||
|
appVersion: "1.0.0"
|
11
charts/vault/templates/pvc.yaml
Normal file
11
charts/vault/templates/pvc.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: vault-plugin-claim
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- "ReadWriteOnce"
|
||||||
|
storageClassName: "freenas-iscsi-csi"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: "1Gi"
|
1
charts/vault/values.yaml
Normal file
1
charts/vault/values.yaml
Normal file
@ -0,0 +1 @@
|
|||||||
|
# No configuration required
|
24
main-manifest.yaml
Normal file
24
main-manifest.yaml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
apiVersion: argoproj.io/v1alpha1
|
||||||
|
kind: Application
|
||||||
|
metadata:
|
||||||
|
name: jackjack-app-of-apps
|
||||||
|
namespace: argo
|
||||||
|
finalizers:
|
||||||
|
- resources-finalizer.argocd.argoproj.io
|
||||||
|
spec:
|
||||||
|
project: default
|
||||||
|
|
||||||
|
source:
|
||||||
|
repoURL: https://gitea.scubbo.org/scubbo/helm-charts.git
|
||||||
|
targetRevision: HEAD
|
||||||
|
path: app-of-apps
|
||||||
|
|
||||||
|
destination:
|
||||||
|
server: "https://kubernetes.default.svc"
|
||||||
|
namespace: default
|
||||||
|
|
||||||
|
syncPolicy:
|
||||||
|
automated:
|
||||||
|
prune: true
|
||||||
|
syncOptions:
|
||||||
|
- CreateNamespace=true
|
Loading…
x
Reference in New Issue
Block a user