|
| 1 | +#!/bin/env bash |
| 2 | +set -x |
| 3 | + |
| 4 | +# upgrade go to 1.19.1 |
| 5 | +go version | grep 1.19.1 || curl -L https://dl.google.com/go/go1.19.1.linux-amd64.tar.gz | sudo tar --directory /usr/local --extract --ungzip |
| 6 | +# shfmt needed for make |
| 7 | +which shfmt || sudo apt-get install shfmt |
| 8 | +# for converting k8s yaml to HCL |
| 9 | +go install github.com/jrhouston/tfk8s@latest |
| 10 | +# TODO: Make still failing, possible dependencies still missing. |
| 11 | + |
| 12 | +# install coder binary until we can build from src |
| 13 | +which coder || ( |
| 14 | + curl -OL https://github.com/coder/coder/releases/download/v0.9.1/coder_0.9.1_linux_amd64.deb |
| 15 | + sudo dpkg -i coder_0.9.1_linux_amd64.deb |
| 16 | + # Add completion |
| 17 | + echo '. <(coder completion bash)' >>~/.bashrc |
| 18 | +) |
| 19 | + |
| 20 | +# Deploying coder (from helm for now) |
| 21 | +kubectl create namespace coder |
| 22 | +# ensure ingress works / certs secrets get copied |
| 23 | +kubectl label ns coder cert-manager-tls=sync |
| 24 | +# needs a postgres db |
| 25 | +helm repo add bitnami https://charts.bitnami.com/bitnami |
| 26 | +helm install postgres bitnami/postgresql \ |
| 27 | + --namespace coder \ |
| 28 | + --set auth.username=coder \ |
| 29 | + --set auth.password=coder \ |
| 30 | + --set auth.database=coder \ |
| 31 | + --set persistence.size=10Gi |
| 32 | +# deploy via helm for now |
| 33 | +envsubst <.sharing.io/values.template.yaml >.sharing.io/values.yaml |
| 34 | +helm install coder ./helm/ \ |
| 35 | + --namespace coder \ |
| 36 | + --values .sharing.io/values.yaml |
| 37 | +# setup ingress |
| 38 | +envsubst <.sharing.io/ingress.template.yaml >.sharing.io/ingress.yaml |
| 39 | +kubectl apply -f .sharing.io/ingress.yaml |
| 40 | +# Wait for coder to deploy |
| 41 | +kubectl rollout status deployment coder -n coder |
| 42 | +kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder |
| 43 | + |
| 44 | +# create the initial user |
| 45 | +# populate ii or pair as an admin user without logging in |
| 46 | +CODER_EMAIL=ii@ii.coop |
| 47 | +CODER_PASSWORD=ii |
| 48 | +CODER_USERNAME=ii |
| 49 | +CODER_URL=https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME} |
| 50 | +# export vars to we can emulate a tty with a short expect script |
| 51 | +export CODER_EMAIL CODER_PASSWORD CODER_USERNAME |
| 52 | +coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL |
| 53 | +export HELM_VALUES="service:\n type: NodePort\nsyncer:\n extraArgs:\n - --tls-san=${SHARINGIO_PAIR_BASE_DNS_NAME}" |
| 54 | +export EXP_CLUSTER_RESOURCE_SET=true |
| 55 | +# Install kubevirt |
| 56 | +export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt) |
| 57 | +# Deploy the KubeVirt operator |
| 58 | +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml |
| 59 | +# Create the KubeVirt CR (instance deployment request) which triggers the actual installation |
| 60 | +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml |
| 61 | +# wait until all KubeVirt components are up |
| 62 | +kubectl -n kubevirt wait kv kubevirt --for condition=Available |
| 63 | + |
| 64 | +clusterctl init --infrastructure vcluster |
| 65 | +clusterctl init --infrastructure kubevirt |
| 66 | +clusterctl init --infrastructure=packet |
| 67 | + |
| 68 | +kubectl create ns coder-workspaces |
| 69 | + |
| 70 | +#TODO : upload / update the kubernetes template |
0 commit comments