Init Image (to create user), plus some (not all) automation of test

This commit is contained in:
Jack Jackson 2022-12-05 21:50:47 -08:00
parent c2dd6143e6
commit 8e5fdff0cd
28 changed files with 759 additions and 8 deletions

View File

@ -7,11 +7,20 @@ platform:
arch: arm64 arch: arm64
steps: steps:
- name: push-built-image - name: push-built-init-image
image: plugins/docker image: plugins/docker
settings: settings:
registry: gitea.scubbo.org registry: gitea.scubbo.org
repo: gitea.scubbo.org/scubbo/drone-build-status-monitor repo: gitea.scubbo.org/scubbo/drone-build-status-monitor-init
username: scubbo username: scubbo
password: password:
from_secret: gitea_password from_secret: gitea_password
- name: push-built-main-image
image: plugins/docker
settings:
dockerfile: Dockerfile-initImage
registry: gitea.scubbo.org
repo: gitea.scubbo.org/scubbo/drone-build-status-monitor-main
username: scubbo
password:
from_secret: gitea_password

View File

@ -4,6 +4,6 @@ COPY requirements.txt requirements.txt
RUN pip3 install -r requirements.txt RUN pip3 install -r requirements.txt
RUN rm requirements.txt RUN rm requirements.txt
WORKDIR /app WORKDIR /app
COPY src/ src COPY src/main/ src
RUN chmod +x src/app.py RUN chmod +x src/app.py
CMD src/app.py CMD src/app.py

12
Dockerfile-initImage Normal file
View File

@ -0,0 +1,12 @@
FROM alpine
RUN apk update
RUN apk upgrade
RUN apk add curl
RUN curl -L https://github.com/harness/drone-cli/releases/latest/download/drone_linux_amd64.tar.gz | tar zx
RUN install -t /usr/local/bin drone
WORKDIR /app
COPY src/init/ src
RUN chmod +x src/init.sh
CMD src/init.sh

View File

@ -1,7 +1,8 @@
TODO - flesh this out more! AFAICT, Drone's [metrics]()Exposes Prometheus metrics on port 8000. Listens on port 8015 for Webhook update events from Drone.
Exposes Prometheus metrics on port 8000. Listens on port 8015 for Webhook update events from Drone.
Environment variables: Environment variables:
* `ACCESS_TOKEN` * `ACCESS_TOKEN`
* `DRONE_DOMAIN` * `DRONE_DOMAIN`
## Demo

12
demo/helm/Chart.lock Normal file
View File

@ -0,0 +1,12 @@
dependencies:
- name: drone
repository: https://charts.drone.io
version: 0.6.4
- name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 42.0.0
- name: kubernetes-dashboard
repository: https://kubernetes.github.io/dashboard/
version: 6.0.0
digest: sha256:44b28e02441df32fdf8a6a039d03ae3196b4aeddb6690b9fd2601232f90ff8d1
generated: "2022-11-30T12:58:27.135466-08:00"

38
demo/helm/Chart.yaml Normal file
View File

@ -0,0 +1,38 @@
apiVersion: v2
name: drone-build-status-monitor
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"
dependencies:
# - name: gitea
# version: "6.0.3"
# repository: https://dl.gitea.io/charts/
- name: drone
version: "0.6.4"
repository: https://charts.drone.io
- name: kube-prometheus-stack
version: "42.0.0"
repository: "https://prometheus-community.github.io/helm-charts"
- name: kubernetes-dashboard
version: "6.0.0"
repository: https://kubernetes.github.io/dashboard/

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "drone-build-status-monitor.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "drone-build-status-monitor.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "drone-build-status-monitor.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "drone-build-status-monitor.labels" -}}
helm.sh/chart: {{ include "drone-build-status-monitor.chart" . }}
{{ include "drone-build-status-monitor.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "drone-build-status-monitor.selectorLabels" -}}
app.kubernetes.io/name: {{ include "drone-build-status-monitor.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "drone-build-status-monitor.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "drone-build-status-monitor.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-build-monitor-demo-gitea
spec:
selector:
matchLabels:
app: gitea
template:
metadata:
labels:
app: gitea
spec:
initContainers:
- name: init
image: gitea/gitea:latest
imagePullPolicy: IfNotPresent
command: ["/usr/sbin/configure_gitea.sh"]
securityContext:
runAsUser: 1000
env:
- name: GITEA_ADMIN_USERNAME
valueFrom:
secretKeyRef:
key: username
name: drone-build-monitor-demo-gitea-admin-creds
- name: GITEA_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: drone-build-monitor-demo-gitea-admin-creds
volumeMounts:
- name: init
mountPath: /usr/sbin
- mountPath: /data
name: drone-build-monitor-demo-gitea-persistent-volume
containers:
- name: gitea
image: gitea/gitea:latest
imagePullPolicy: IfNotPresent
env:
- name: USER_UID
value: "1000"
- name: USER_GID
value: "1000"
- name: GITEA__security__INSTALL_LOCK
value: "true"
volumeMounts:
- mountPath: /etc/timezone
name: timezone
readOnly: true
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /data
name: drone-build-monitor-demo-gitea-persistent-volume
volumes:
- name: drone-build-monitor-demo-gitea-persistent-volume
persistentVolumeClaim:
claimName: drone-build-monitor-demo-gitea-persistent-volume-claim
- name: timezone
hostPath:
path: /etc/timezone
type: File
- name: localtime
hostPath:
path: /etc/localtime
type: File
- name: init
secret:
secretName: drone-build-monitor-demo-gitea-init
defaultMode: 110

View File

@ -0,0 +1,38 @@
apiVersion: v1
kind: Secret
metadata:
name: drone-build-monitor-demo-gitea-init
type: Opaque
stringData:
configure_gitea.sh: |-
#!/usr/bin/env bash
set -uo pipefail
set -x
echo '==== BEGIN GITEA CONFIGURATION ===='
function configure_admin_user() {
echo "Printing a lot of debugging"
gitea admin user list --admin
echo "Admin Username"
echo $GITEA_ADMIN_USERNAME
echo "Grepped admin list:"
gitea admin user list --admin | grep "$GITEA_ADMIN_USERNAME"
echo "Grepped awked admin list:"
gitea admin user list --admin | grep -e "\s\+${GITEA_ADMIN_USERNAME}\s\+" | awk -F " " "{printf \$1}"
local ACCOUNT_ID=$(gitea admin user list --admin | grep -e "\s\+${GITEA_ADMIN_USERNAME}\s\+" | awk -F " " "{printf \$1}")
echo "DEBUG - accountId is:";
echo $ACCOUNT_ID;
if [[ -z "${ACCOUNT_ID}" ]]; then
echo "No admin user '${GITEA_ADMIN_USERNAME}' found. Creating now..."
gitea admin user create --admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email "admin@example.org" --must-change-password=false
echo '...created.'
else
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist. Running update to sync password..."
gitea admin user change-password --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}"
echo '...password sync done.'
fi
}
configure_admin_user

View File

@ -0,0 +1,25 @@
apiVersion: v1
kind: Service
metadata:
name: drone-build-monitor-demo-gitea-service
spec:
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app: gitea
---
apiVersion: v1
kind: Service
metadata:
name: drone-build-monitor-demo-gitea-ssh-service
spec:
ports:
- port: 22
targetPort: 22
protocol: TCP
name: ssh
selector:
app: gitea

View File

@ -0,0 +1,35 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: drone-build-monitor-demo-gitea-persistent-volume
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Delete
storageClassName: local-storage
local:
path: /tmp/gitea-storage
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: absent-label
operator: DoesNotExist
values:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: drone-build-monitor-demo-gitea-persistent-volume-claim
spec:
volumeName: drone-build-monitor-demo-gitea-persistent-volume
accessModes:
- ReadWriteMany
storageClassName: local-storage
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,47 @@
apiVersion: v1
kind: Secret
metadata:
name: nginx-conf-secret
type: Opaque
stringData:
nginx.conf: |-
events {
worker_connections 1024; ## Default
}
http {
# Without this, docker image interaction might give a `413 Request Entity Too Large` (default 1M)
client_max_body_size 500M;
server {
listen 80;
server_name fakegitea.local;
location / {
# Use this if running locally with docker, rather than on k8s
# proxy_pass http://host.docker.internal:3000;
proxy_pass http://drone-build-monitor-demo-gitea-service.demo:3000;
}
}
server {
listen 443 ssl;
server_name fakegiteatls.local;
ssl_certificate /certs/domain.crt;
ssl_certificate_key /certs/domain.key;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
location / {
# Use this if running locally with docker, rather than on k8s
# proxy_pass http://host.docker.internal:3000;
proxy_pass http://drone-build-monitor-demo-gitea-service.demo:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}

View File

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-build-monitor-demo-nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- mountPath: /etc/nginx/
name: nginx-conf
readOnly: true
- mountPath: /certs
name: certs
volumes:
- name: nginx-conf
secret:
secretName: nginx-conf-secret
- name: certs
hostPath:
path: /tmp/gitea-certs
type: Directory

View File

@ -0,0 +1,17 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitea-ingress
spec:
ingressClassName: traefik
rules:
- host: fakegiteatls.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: drone-build-monitor-demo-gitea-service
port:
number: 3000

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
ports:
- port: 443
targetPort: 443
protocol: TCP
name: https
selector:
app: nginx

View File

@ -0,0 +1 @@
For ease of setup, some of these contain hard-coded values. Use greater care in production deployments!

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: drone-build-monitor-demo-gitea-admin-creds
stringData:
username: admin-username
password: admin-password

View File

@ -0,0 +1,26 @@
{{- /*
This is a Kubernetes secret that holds the token for the Machine user used to poll the Drone API
This pattern was taken from:
https://itnext.io/manage-auto-generated-secrets-in-your-helm-charts-5aee48ba6918
This secret provides two values:
* `DRONE_USER_CREATE`, an environment variable which will prompt Drone to create a user with the given configuration
* `token`, the bare token of the created user, that other services can use in order to act as the user
*/}}
{{- if empty .Values.primaryDroneMachineUserSecret }}
apiVersion: v1
kind: Secret
metadata:
name: "primary-drone-machine-user-secret"
annotations:
"helm.sh/resource-policy": "keep"
type: Opaque
data:
# retrieve the secret data using lookup function and when not exists, return an empty dictionary / map as result
{{- $existing_secret := (lookup "v1" "Secret" .Release.Namespace "primary-drone-machine-user-secret") | default dict }}
{{- $secretData := (get $existing_secret "data") | default dict }}
# set $secret to existing secret data or generate a random one (32 chars long) when not exists
{{- $secret := (get $secretData "token") | default (randAlphaNum 32) }}
token: {{ $secret | b64enc | quote }}
DRONE_USER_CREATE: {{ printf "%s%s" "username:root,admin:true,machine:true,token:" $secret | b64enc | quote }}
{{- end }}

109
demo/helm/values.yaml Normal file
View File

@ -0,0 +1,109 @@
# Default values for drone-build-status-monitor.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
# Everything below here was hand-created
# Set these if you want to use an existing secret for the Drone
# tokens (otherwise, fresh ones will be created if necessary)
#
# Note the required format of the secret pointed to by
# `primaryDroneMachineUserToken` - it must have two entries:
# * `DRONE_USER_CREATE=username:root,admin:true,machine:true,token:<token>`
# * `token=<token>`
#
# For explanation why, see https://community.harness.io/t/is-it-possible-to-create-a-new-user-from-a-command-on-the-image-itself/12899/4
primaryDroneMachineUserSecret: ""
# Subchart values
kube-prometheus-stack:
grafana:
enabled: false
# TODO - fill out appropriate values here
drone:
extraSecretNamesForEnvFrom:
- primary-drone-machine-user-secret
env:
DRONE_SERVER_HOST: drone-monitoring-demo.local
DRONE_RPC_SECRET: hard-coding-is-very-bad-do-not-do-this-in-production
DRONE_GITEA_SERVER: drone-build-monitor-demo-gitea-service.demo

103
demo/main-script.sh Normal file
View File

@ -0,0 +1,103 @@
#!/bin/bash
# Note that this is unfinished! I got bored at about the point of making
# Gitea trust Kubernetes' certificates so that we could automatically push
# to them - I realized that a fully-automated demo isn't really worth the
# effort to set it up, since 99% of folks who are interested in this will
# already have their own Drone+SourceControl setups working.
# https://stackoverflow.com/a/677212/1040915
if ! command -v multipass &> /dev/null
then
brew install --cask multipass
fi
###
# Generate SSL/TLS certs because they are a requirement for self-hosted OCI registries
###
#
# https://medium.com/@ifeanyiigili/how-to-setup-a-private-docker-registry-with-a-self-sign-certificate-43a7407a1613
#mkdir -p /tmp/gitea-certs
#openssl req -newkey rsa:4096 -nodes -sha256 \
# -keyout /tmp/gitea-certs/domain.key -x509 -days 365 \
# -out /tmp/gitea-certs/domain.crt \
# -subj '/C=US/ST=CA/L=FakeTown/O=FakeCorp/CN=Fake Gitea Ltd./' \
# -addext 'subjectAltName=DNS:fakegiteatls.local'
# TODO - update K3s to use the created certificate to trust Gitea's repo:
# https://docs.k3s.io/installation/private-registry
multipass launch --name k3s --mem 4G --disk 40G
multipass transfer demo/remote-script.sh k3s:/tmp/remote-script.sh
multipass exec k3s -- sh /tmp/remote-script.sh
## Transfer certificates to multipass VM
#multipass transfer /tmp/gitea-certs/domain.crt k3s:/tmp/gitea-certs
#multipass transfer /tmp/gitea-certs/domain.key k3s:/tmp/gitea-certs
# Fetch the k3s-on-multipass configuration to your laptop to enable interaction
mkdir -p $HOME/.kube
multipass transfer k3s:/tmp/k3s.yaml $HOME/.kube/multipass-k3s.yaml
# Following line depends on `jq`, a super-handy utility you should really have installed already! If you don't have or want it for some reason, you can approximate this with `multipass info k3s | grep 'IPv4' | awk '{print $2}'`
VM_IP=$(multipass info k3s --format json | jq '.info.k3s.ipv4[0]' -r)
# I wish `sed` were consistent between installations so we didn't have to rely on perl for this...
perl -i -pe 's/127.0.0.1/'"$VM_IP"'/' $HOME/.kube/multipass-k3s.yaml
# Rename all the "default" entities - cluster, user, etc. - to "k3s" so that they will remain distinct when merging with existing config
perl -i -pe 's/: default/: k3s/' $HOME/.kube/multipass-k3s.yaml
if [[ -f $HOME/.kube/config ]]; then
cp $HOME/.kube/config $HOME/.kube/config.BAK
# Merge kubernetes config file into existing file: https://medium.com/@jacobtomlinson/how-to-merge-kubernetes-kubectl-config-files-737b61bd517d
KUBECONFIG=$HOME/.kube/config:$HOME/.kube/multipass-k3s.yaml kubectl config view --flatten > /tmp/config && mv /tmp/config ~/.kube/config
else
mv $HOME/.kube/multipass-k3s.yaml $HOME/.kube/config
fi
# This next line relies on `kubectx`. Install like so: https://github.com/ahmetb/kubectx - or manually set your `kubectl` context for these commands
kubectl ctx k3s
# Install dashboard separately from the main release, so we can use it to debug if things go wrong
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm install -n kubernetes-dashboard --create-namespace kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard
# I don't know why, but the Helm chart doesn't seem to grant the created ServiceAccount appropriate permissions
kubectl patch clusterrolebinding cluster-admin --type='json' -p='[{"op": "add", "path": "/subjects/1", "value": {"kind": "ServiceAccount", "name": "kubernetes-dashboard", "namespace":"kubernetes-dashboard"}}]'
echo "Run 'kubectl proxy' in a separate terminal, then navigate to http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:443/proxy/"
echo "Use the following token to log in to the Kubernetes dashboard: $(kubectl -n kubernetes-dashboard create token kubernetes-dashboard)"
echo "[Press Enter to continue]"
read
helm dependency build helm/
helm install -n demo --create-namespace demo helm/
# Drone UI available on http://localhost:8001/api/v1/namespaces/demo/services/http:demo-drone:8080/proxy
# Prometheus UI available on http://localhost:8001/api/v1/namespaces/demo/services/http:demo-kube-prometheus-stack-prometheus:9090/proxy/graph
# Gitea UI available on http://localhost:8001/api/v1/namespaces/demo/services/http:drone-build-monitor-demo-gitea-service:3000/proxy/
# This is a hack - it looks like the `configure-gitea` script can't run properly on first execution,
# since the installation hasn't completed (and won't, until the Web UI is loaded and interacted with) -
# so, by deleting the Gitea pod (which will then be recreated because of the deployment), the `initContainer` is re-run,
# creating the admin user with provided credentials.
#
# There's probably a way to skip the Web UI requirement for installation, but this is just a proof-of-concept
# so I'm not going to spend much time trying to find it. Please do let me know if you know how to do it, though!
#
# The below is commented out because I _think_ I've found how to get around that (by setting `GITEA__security__INSTALL_LOCK=true`,
# then calling `gitea migrate` before creating the user) - but keeping it around in case I need it
#kubectl delete pod $(kubectl get pods | grep 'gitea' | awk '{print $1}')
# Do this manually because I don't want to risk messing with sudo privileges!
echo "Add the following line to your /etc/hosts:"
echo "${VM_IP} fakegiteatls.local"
read
# Ditto - we probably _could_ do this with automated /etc/docker/daemon.json editing (or
# whatever the equivalent is for Mac), but probably not worth it
echo "Now add 'fakegiteatls.local' as an insecure Docker Registry, in whatever way"
echo "is appropriate for your system"
# TODO - docker login (and, also, save to Kubernetes cluster)
docker build -t drone-build-metrics-init -f ../Dockerfile-initImage ..
docker tag drone-build-metrics-init fakegiteatls.local/root/drone-build-metrics-init
docker build -t drone-build-metrics-demo ..
docker tag drone-build-metrics-demo fakegiteatls.local/root/drone-build-metrics-demo
# ...and then we would set up a Drone build by creating a repository and making an automated
# push to it, and then observe metrics.

View File

@ -0,0 +1,39 @@
# This deployment is applied separately from the main Helm chart - which
# sets up Drone, Gitea as a Drone source, and Prometheus and Grafana for
# monitoring and visualization - because it depends on Gitea being set up
# so that we can build and push an image to the Gitea registry.
#
# This could _probably_ be carried out fully automatically by having
# the deployment wait on the presence of the image in the Gitea registry
# (with an initContainer that waits on availability), but that seems like
# more complexity than is worth it for this proof-of-concept
apiVersion: apps/v1
kind: Deployment
metadata:
name: drone-build-monitor-demo-monitor
namespace: demo
spec:
select:
matchLabels:
app: monitoring
template:
metadata:
labels:
app: monitoring
spec:
initContainers:
- name: init
image: "drone-build-monitor-demo-gitea-service.demo:3000/root/drone-monitor-init"
imagePullPolicy: IfNotPresent
command: ["/app/init.sh"]
env:
- name: PRIMARY_DRONE_USER_TOKEN
valueFrom:
secretKeyRef:
key: token
name: primary-drone-machine-user-secret
- name: DRONE_URL
value: "demo-drone.demo:8080"
containers:
- name: placeholder
image: ubuntu

24
demo/remote-script.sh Executable file
View File

@ -0,0 +1,24 @@
#!/bin/sh
# DO NOT run this script directly - this will be copied to the Multipass cluster to set up k3s
sudo apt update
sudo NEEDRESTART_MODE=a apt upgrade -y
# Install k3s, and make config file available
curl -sfL https://get.k3s.io | sh -
sudo cp /etc/rancher/k3s/k3s.yaml /tmp/k3s.yaml
sudo chmod +r /tmp/k3s.yaml
# Create necessary directories for local-storage from k3s nodes...
sudo mkdir -p /tmp/gitea-storage
sudo chmod 777 /tmp/gitea-storage
# ...and for certificates...
sudo mkdir -p /tmp/gitea-certs
sudo chmod 777 /tmp/gitea-certs
# ...and for the nginx configuration
sudo mkdir -p /tmp/nginx
sudo chmod 777 /tmp/nginx
exit

28
src/init/init.sh Normal file
View File

@ -0,0 +1,28 @@
#!/bin/sh
metrics_user_exists() {
[ $(drone -s "$DRONE_DOMAIN" -t "$PRIMARY_DRONE_USER_TOKEN" user ls 2>/dev/null | \
grep -c '^build-metrics$') -ne 0 ]
# `return` on its own will return the return code of the previous statement.
# Bash is a very sensible and normal programming language
return
}
if metrics_user_exists; then
echo "Drone Build Metrics user exists - exiting"
exit 0
else
# I'm assuming that it needs to be admin in order to see and report on every build,
# but if you wanted to get really finicky with permissions you could create different
# metrics users with different permissions.
drone -s "$DRONE_DOMAIN" -t "$PRIMARY_DRONE_USER_TOKEN" \
user add --machine --admin --token "$METRICS_DRONE_USER_TOKEN" build-metrics 2>/dev/null
# Double-check!
if ! metrics_user_exists; then
echo "Tried creating the user, but it still doesn't exist - something's gone wrong"
echo "Drone Domain: $DRONE_DOMAIN"
echo "Primary Drone User Token: $PRIMARY_DRONE_USER_TOKEN"
exit 1
fi
exit 0
fi

0
src/main/__init__.py Normal file
View File

View File

@ -54,7 +54,11 @@ def get_repos(access_token: str, drone_domain: str) -> Iterable[Repo]:
return repo_list return repo_list
def get_latest_build_status(access_token: str, drone_domain: str, owner: str, repo_name: str) -> BuildStatus: def get_latest_build_status(
access_token: str,
drone_domain: str,
owner: str,
repo_name: str) -> BuildStatus:
builds = requests.get(f'{drone_domain}/api/repos/{owner}/{repo_name}/builds').json() builds = requests.get(f'{drone_domain}/api/repos/{owner}/{repo_name}/builds').json()
if len(builds) == 0: if len(builds) == 0:
return BuildStatus('unknown') return BuildStatus('unknown')