104 lines
6.0 KiB
Bash
Executable File
104 lines
6.0 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Note that this is unfinished! I got bored at about the point of making
|
|
# Gitea trust Kubernetes' certificates so that we could automatically push
|
|
# to them - I realized that a fully-automated demo isn't really worth the
|
|
# effort to set it up, since 99% of folks who are interested in this will
|
|
# already have their own Drone+SourceControl setups working.
|
|
|
|
# https://stackoverflow.com/a/677212/1040915
|
|
if ! command -v multipass &> /dev/null
|
|
then
|
|
brew install --cask multipass
|
|
fi
|
|
|
|
###
|
|
# Generate SSL/TLS certs because they are a requirement for self-hosted OCI registries
|
|
###
|
|
#
|
|
# https://medium.com/@ifeanyiigili/how-to-setup-a-private-docker-registry-with-a-self-sign-certificate-43a7407a1613
|
|
#mkdir -p /tmp/gitea-certs
|
|
#openssl req -newkey rsa:4096 -nodes -sha256 \
|
|
# -keyout /tmp/gitea-certs/domain.key -x509 -days 365 \
|
|
# -out /tmp/gitea-certs/domain.crt \
|
|
# -subj '/C=US/ST=CA/L=FakeTown/O=FakeCorp/CN=Fake Gitea Ltd./' \
|
|
# -addext 'subjectAltName=DNS:fakegiteatls.local'
|
|
# TODO - update K3s to use the created certificate to trust Gitea's repo:
|
|
# https://docs.k3s.io/installation/private-registry
|
|
|
|
multipass launch --name k3s --mem 4G --disk 40G
|
|
multipass transfer demo/remote-script.sh k3s:/tmp/remote-script.sh
|
|
multipass exec k3s -- sh /tmp/remote-script.sh
|
|
|
|
## Transfer certificates to multipass VM
|
|
#multipass transfer /tmp/gitea-certs/domain.crt k3s:/tmp/gitea-certs
|
|
#multipass transfer /tmp/gitea-certs/domain.key k3s:/tmp/gitea-certs
|
|
|
|
# Fetch the k3s-on-multipass configuration to your laptop to enable interaction
|
|
mkdir -p $HOME/.kube
|
|
multipass transfer k3s:/tmp/k3s.yaml $HOME/.kube/multipass-k3s.yaml
|
|
# Following line depends on `jq`, a super-handy utility you should really have installed already! If you don't have or want it for some reason, you can approximate this with `multipass info k3s | grep 'IPv4' | awk '{print $2}'`
|
|
VM_IP=$(multipass info k3s --format json | jq '.info.k3s.ipv4[0]' -r)
|
|
# I wish `sed` were consistent between installations so we didn't have to rely on perl for this...
|
|
perl -i -pe 's/127.0.0.1/'"$VM_IP"'/' $HOME/.kube/multipass-k3s.yaml
|
|
# Rename all the "default" entities - cluster, user, etc. - to "k3s" so that they will remain distinct when merging with existing config
|
|
perl -i -pe 's/: default/: k3s/' $HOME/.kube/multipass-k3s.yaml
|
|
if [[ -f $HOME/.kube/config ]]; then
|
|
cp $HOME/.kube/config $HOME/.kube/config.BAK
|
|
# Merge kubernetes config file into existing file: https://medium.com/@jacobtomlinson/how-to-merge-kubernetes-kubectl-config-files-737b61bd517d
|
|
KUBECONFIG=$HOME/.kube/config:$HOME/.kube/multipass-k3s.yaml kubectl config view --flatten > /tmp/config && mv /tmp/config ~/.kube/config
|
|
else
|
|
mv $HOME/.kube/multipass-k3s.yaml $HOME/.kube/config
|
|
fi
|
|
|
|
# This next line relies on `kubectx`. Install like so: https://github.com/ahmetb/kubectx - or manually set your `kubectl` context for these commands
|
|
kubectl ctx k3s
|
|
# Install dashboard separately from the main release, so we can use it to debug if things go wrong
|
|
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
|
|
helm install -n kubernetes-dashboard --create-namespace kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard
|
|
# I don't know why, but the Helm chart doesn't seem to grant the created ServiceAccount appropriate permissions
|
|
kubectl patch clusterrolebinding cluster-admin --type='json' -p='[{"op": "add", "path": "/subjects/1", "value": {"kind": "ServiceAccount", "name": "kubernetes-dashboard", "namespace":"kubernetes-dashboard"}}]'
|
|
echo "Run 'kubectl proxy' in a separate terminal, then navigate to http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:443/proxy/"
|
|
echo "Use the following token to log in to the Kubernetes dashboard: $(kubectl -n kubernetes-dashboard create token kubernetes-dashboard)"
|
|
echo "[Press Enter to continue]"
|
|
read
|
|
helm dependency build helm/
|
|
helm install -n demo --create-namespace demo helm/
|
|
# Drone UI available on http://localhost:8001/api/v1/namespaces/demo/services/http:demo-drone:8080/proxy
|
|
# Prometheus UI available on http://localhost:8001/api/v1/namespaces/demo/services/http:demo-kube-prometheus-stack-prometheus:9090/proxy/graph
|
|
# Gitea UI available on http://localhost:8001/api/v1/namespaces/demo/services/http:drone-build-monitor-demo-gitea-service:3000/proxy/
|
|
|
|
|
|
# This is a hack - it looks like the `configure-gitea` script can't run properly on first execution,
|
|
# since the installation hasn't completed (and won't, until the Web UI is loaded and interacted with) -
|
|
# so, by deleting the Gitea pod (which will then be recreated because of the deployment), the `initContainer` is re-run,
|
|
# creating the admin user with provided credentials.
|
|
#
|
|
# There's probably a way to skip the Web UI requirement for installation, but this is just a proof-of-concept
|
|
# so I'm not going to spend much time trying to find it. Please do let me know if you know how to do it, though!
|
|
#
|
|
# The below is commented out because I _think_ I've found how to get around that (by setting `GITEA__security__INSTALL_LOCK=true`,
|
|
# then calling `gitea migrate` before creating the user) - but keeping it around in case I need it
|
|
#kubectl delete pod $(kubectl get pods | grep 'gitea' | awk '{print $1}')
|
|
|
|
# Do this manually because I don't want to risk messing with sudo privileges!
|
|
echo "Add the following line to your /etc/hosts:"
|
|
echo "${VM_IP} fakegiteatls.local"
|
|
read
|
|
# Ditto - we probably _could_ do this with automated /etc/docker/daemon.json editing (or
|
|
# whatever the equivalent is for Mac), but probably not worth it
|
|
echo "Now add 'fakegiteatls.local' as an insecure Docker Registry, in whatever way"
|
|
echo "is appropriate for your system"
|
|
|
|
# TODO - docker login (and, also, save to Kubernetes cluster)
|
|
docker build -t drone-build-metrics-init -f ../Dockerfile-initImage ..
|
|
docker tag drone-build-metrics-init fakegiteatls.local/root/drone-build-metrics-init
|
|
|
|
docker build -t drone-build-metrics-demo ..
|
|
docker tag drone-build-metrics-demo fakegiteatls.local/root/drone-build-metrics-demo
|
|
|
|
# ...and then we would set up a Drone build by creating a repository and making an automated
|
|
# push to it, and then observe metrics.
|
|
|
|
|