diff --git a/.sharing.io/destroy b/.sharing.io/destroy new file mode 100755 index 0000000000000..14bcb987174a0 --- /dev/null +++ b/.sharing.io/destroy @@ -0,0 +1,8 @@ +#!/bin/env bash +set -x +kubectl delete -f .sharingio/ingress.yaml +helm delete coder --namespace coder +helm delete postgres --namespace coder +kubectl delete namespace coder +# TODO : populate ii or pair as an admin user without logging in +# TODO : upload / update the kubernetes template diff --git a/.sharing.io/ingress.template.yaml b/.sharing.io/ingress.template.yaml new file mode 100644 index 0000000000000..c97860bef9598 --- /dev/null +++ b/.sharing.io/ingress.template.yaml @@ -0,0 +1,25 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: contour-external + name: coder + namespace: coder +spec: + rules: + - host: coder.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: coder + port: + number: 80 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - coder.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +status: + loadBalancer: {} diff --git a/.sharing.io/init b/.sharing.io/init new file mode 100755 index 0000000000000..14a9cc27802b2 --- /dev/null +++ b/.sharing.io/init @@ -0,0 +1,79 @@ +#!/bin/env bash +set -x + +# shfmt needed for make +which shfmt || sudo apt-get install -y shfmt +# for converting k8s yaml to HCL +go install github.com/jrhouston/tfk8s@latest +# TODO: Make still failing, possible dependencies still missing. + +# install coder binary until we can build from src +which coder || ( + curl -L -o /tmp/coder.deb https://github.com/coder/coder/releases/download/v0.9.8/coder_0.9.8_linux_amd64.deb + sudo apt install -y /tmp/coder.deb + # Add completion + echo '. <(coder completion bash)' >>~/.bashrc +) + +# Deploying coder (from helm for now) +kubectl create namespace coder +# ensure ingress works / certs secrets get copied +kubectl label ns coder cert-manager-tls=sync +# needs a postgres db +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install postgres bitnami/postgresql \ + --namespace coder \ + --set auth.username=coder \ + --set auth.password=coder \ + --set auth.database=coder \ + --set persistence.size=10Gi +# deploy via helm for now +envsubst <.sharing.io/values.template.yaml >.sharing.io/values.yaml +helm install coder ./helm/ \ + --namespace coder \ + --values .sharing.io/values.yaml +# # Wait for coder to deploy +# kubectl rollout status deployment coder -n coder... so we can create the inital user +kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder + +# create the initial user +# populate ii or pair as an admin user without logging in +CODER_EMAIL=ii@ii.coop +CODER_PASSWORD=ii +CODER_USERNAME=ii +CODER_URL=https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME} +# export vars to we can emulate a tty with a short expect script +export CODER_EMAIL CODER_PASSWORD CODER_USERNAME +coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL +export HELM_VALUES="service:\n type: NodePort\nsyncer:\n extraArgs:\n - --tls-san=${SHARINGIO_PAIR_BASE_DNS_NAME}" +export EXP_CLUSTER_RESOURCE_SET=true + +# Install kubevirt +export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt) +# Deploy the KubeVirt operator +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml +# Create the KubeVirt CR (instance deployment request) which triggers the actual installation +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml +# wait until all KubeVirt components are up +kubectl -n kubevirt wait kv kubevirt --for condition=Available + +# install CDI support for KubeVirt +export TAG=$(curl -s -w %{redirect_url} https://github.com/kubevirt/containerized-data-importer/releases/latest) +export VERSION=$(echo ${TAG##*/}) +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator.yaml +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr.yaml + +# cluster-api bootstrapping +clusterctl init --infrastructure vcluster +clusterctl init --infrastructure kubevirt +clusterctl init --infrastructure=packet +clusterctl init --bootstrap talos --control-plane talos + +# we'll need these extra rolebindings for the coder service account for our template to work +# must be applied after coder helm chart is run and clusterctl init -- talos +kubectl apply -f ./examples/templates/kubevirt-talos/role+rolebinding.yaml + +kubectl create ns coder-workspaces + + +#TODO : upload / update the kubernetes template diff --git a/.sharing.io/values.template.yaml b/.sharing.io/values.template.yaml new file mode 100644 index 0000000000000..bf9209306f9a8 --- /dev/null +++ b/.sharing.io/values.template.yaml @@ -0,0 +1,41 @@ +coder: + # You can specify any environment variables you'd like to pass to Coder + # here. Coder consumes environment variables listed in + # `coder server --help`, and these environment variables are also passed + # to the workspace provisioner (so you can consume them in your Terraform + # templates for auth keys etc.). + # + # Please keep in mind that you should not set `CODER_ADDRESS`, + # `CODER_TLS_ENABLE`, `CODER_TLS_CERT_FILE` or `CODER_TLS_KEY_FILE` as + # they are already set by the Helm chart and will cause conflicts. + image: + tag: "v0.9.8" + env: + - name: CODER_ACCESS_URL + value: "https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" + - name: CODER_PG_CONNECTION_URL + value: "postgres://coder:coder@postgres-postgresql.coder.svc.cluster.local:5432/coder?sslmode=disable" + # This env variable controls whether or not to auto-import the "kubernetes" + # template on first startup. This will not work unless + # coder.serviceAccount.workspacePerms is true. + - name: CODER_TEMPLATE_AUTOIMPORT + value: "kubernetes" + - name: CODER_VERBOSE + value: "true" + - name: CODER_AUDIT_LOGGING + value: "false" + - name: CODER_TELEMETRY + value: "false" + - name: CODER_TELEMETRY_TRACE + value: "false" + - name: CODER_WILDCARD_ACCESS_URL + value: "*.coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" + ingress: + enable: true + className: $SHARINGIO_PAIR_INSTANCE_INGRESS_CLASS_NAME + host: "coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" + wildcardHost: "*.${SHARINGIO_PAIR_BASE_DNS_NAME}" + tls: + enable: true + secretName: letsencrypt-prod + wildcardSecretName: "letsencrypt-prod" diff --git a/.sharing.io/vcluster/cluster.tf b/.sharing.io/vcluster/cluster.tf new file mode 100644 index 0000000000000..4efbd7f151aa2 --- /dev/null +++ b/.sharing.io/vcluster/cluster.tf @@ -0,0 +1,89 @@ +resource "kubernetes_namespace" "work-namespace" { + metadata { + annotations = { + name = "ii-annotation" + } + + labels = { + cert-manager-tls = "sync" + } + + name = var.namespace + } +} +resource "kubernetes_manifest" "cluster_vclusters_vcluster1" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = "vcluster1" + "namespace" = var.namespace + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = "vcluster1" + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = "vcluster1" + } + } + } +} + +resource "kubernetes_manifest" "vcluster_vclusters_vcluster1" { + provisioner "local-exec" { + command = "kubectl wait --for=condition=Ready --timeout=30s -n ${var.namespace} cluster vcluster1" + } + provisioner "local-exec" { + command = "kubectl get secrets -n ${var.namespace} vcluster1-kubeconfig -o jsonpath={.data.value} | base64 -d > kubeconfig" + } + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "metadata" = { + "name" = "vcluster1" + "namespace" = var.namespace + } + "spec" = { + "controlPlaneEndpoint" = { + "host" = "" + "port" = 0 + } + "helmRelease" = { + "chart" = { + "name" = null + "repo" = null + "version" = null + } + "values" = <<-EOT + service: + type: NodePort + syncer: + extraArgs: + - --tls-san=${var.tls-san} + EOT + } + "kubernetesVersion" = var.k8s-version + } + } +} + +# This is generated from the vcluster... +# Need to find a way for it to wait before running, so that the secret exists +data "kubernetes_resource" "kubeconfig" { + api_version = "v1" + kind = "Secret" + depends_on = [ + kubernetes_manifest.vcluster_vclusters_vcluster1 + ] + metadata { + name = "vcluster-kubeconfig" + namespace = var.namespace + } +} + +# We'll need to use the kubeconfig from above to provision the coder/pair environment diff --git a/.sharing.io/vcluster/input.tf b/.sharing.io/vcluster/input.tf new file mode 100644 index 0000000000000..caae793f39011 --- /dev/null +++ b/.sharing.io/vcluster/input.tf @@ -0,0 +1,17 @@ +variable "namespace" { + description = "namespace that will contain the workspace" + type = string + default = "coder-ws" +} + +variable "k8s-version" { + description = "Version of Kubernetes to Depoy as a Cluster" + type = string + default = "1.23.4" +} + +variable "tls-san" { + description = "Helm Chart Extra Args --tls-san=X" + type = string + default = "sanskar.pair.sharing.io" +} diff --git a/.sharing.io/vcluster/output.tf b/.sharing.io/vcluster/output.tf new file mode 100644 index 0000000000000..df82962fa33c6 --- /dev/null +++ b/.sharing.io/vcluster/output.tf @@ -0,0 +1,3 @@ +# output "kubeconfig" { +# value = base64decode(data.kubernetes_resource.kubeconfig.object.data.value) +# } diff --git a/examples/templates/kubevirt-talos/README.org b/examples/templates/kubevirt-talos/README.org new file mode 100644 index 0000000000000..cdaed46778e3d --- /dev/null +++ b/examples/templates/kubevirt-talos/README.org @@ -0,0 +1,225 @@ +#+title: Kubevirt-talos + +* Initialise providers +#+begin_src tmate +clusterctl init -b talos -c talos +#+end_src + +* Install CDI support for KubeVirt +#+begin_src shell +export TAG=$(curl -s -w %{redirect_url} https://github.com/kubevirt/containerized-data-importer/releases/latest) +export VERSION=$(echo ${TAG##*/}) +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator.yaml +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr.yaml +#+end_src + +#+RESULTS: +#+begin_example +namespace/cdi created +customresourcedefinition.apiextensions.k8s.io/cdis.cdi.kubevirt.io created +clusterrole.rbac.authorization.k8s.io/cdi-operator-cluster created +clusterrolebinding.rbac.authorization.k8s.io/cdi-operator created +serviceaccount/cdi-operator created +role.rbac.authorization.k8s.io/cdi-operator created +rolebinding.rbac.authorization.k8s.io/cdi-operator created +deployment.apps/cdi-operator created +configmap/cdi-operator-leader-election-helper created +cdi.cdi.kubevirt.io/cdi created +#+end_example + +* Coder Iteration Loop +** Start Coder +#+begin_src tmate :window coder :dir "../../.." + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=https://coder.$SHARINGIO_PAIR_BASE_DNS_NAME \ + 2>&1 | tee coder-server.log +#+end_src +#+begin_src shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +#+end_src + +#+RESULTS: +#+begin_example +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +#+end_example +** coder url +#+begin_src shell :dir "../../.." +grep "coder login https://" coder-server.log | cut -d\ -f 4 +#+end_src + +#+RESULTS: +#+begin_example +https://coder.bobymcbobs.pair.sharing.io +#+end_example + + +* kubevirt workspace +** create template and cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +cd ~/sharingio/coder +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +coder template create kubevirt-talos \ + -d examples/templates/kubevirt-talos \ + --yes +coder create kv1 --template kubevirt-talos +#+end_src + +** update template and new cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +export WORKSPACE=kv1 +coder template push kubevirt-talos -d examples/templates/kubevirt-talos --yes +coder create $WORKSPACE --template kubevirt-talos --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src + +** grab new cluster kubeconfig + +#+begin_src tmate :dir "../../.." :window kubectl +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +#+end_src + +** inner cluster +#+begin_src shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-mwwff 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-ppw92 0/1 Pending 0 81s +kube-system pod/etcd-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-apiserver-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-controller-manager-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-proxy-48s9l 1/1 Running 0 81s +kube-system pod/kube-scheduler-kv1-97525 1/1 Running 0 90s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 97s +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 96s + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 96s + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 96s + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 82s + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 88s +#+end_example + +** cni not yet working +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes | grep -B6 KubeletNotReady +#+end_src + +#+RESULTS: +#+begin_example +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +#+end_example + +** list all relevant resources +#+begin_src shell +kubectl get $(kubectl api-resources | grep -E 'x-k8s|sidero' | awk '{print $1}' | xargs | tr ' ' ','),secrets,configmaps,pods,svc,pvc,vmis -o wide -n kv10 +#+end_src + +#+RESULTS: +#+begin_example +NAME AGE +clusterresourceset.addons.cluster.x-k8s.io/kv10 25s + +NAME AGE +talosconfig.bootstrap.cluster.x-k8s.io/kv10-nz842 5s + +NAME AGE +talosconfigtemplate.bootstrap.cluster.x-k8s.io/kv10 27s + +NAME PHASE AGE VERSION +cluster.cluster.x-k8s.io/kv10 Provisioned 24s + +NAME CLUSTER DESIRED REPLICAS READY UPDATED UNAVAILABLE PHASE AGE VERSION +machinedeployment.cluster.x-k8s.io/kv10 kv10 0 Running 23s v1.23.5 + +NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION +machine.cluster.x-k8s.io/kv10-mknmr kv10 Provisioning 5s v1.25.2 + +NAME CLUSTER DESIRED REPLICAS READY AVAILABLE AGE VERSION +machineset.cluster.x-k8s.io/kv10-7c6b4779d9 kv10 0 22s v1.23.5 + +NAME READY INITIALIZED REPLICAS READY REPLICAS UNAVAILABLE REPLICAS +taloscontrolplane.controlplane.cluster.x-k8s.io/kv10 1 1 + +NAME AGE +kubevirtcluster.infrastructure.cluster.x-k8s.io/kv10 26s + +NAME AGE +kubevirtmachine.infrastructure.cluster.x-k8s.io/kv10-cp-9klxl 5s + +NAME AGE +kubevirtmachinetemplate.infrastructure.cluster.x-k8s.io/kv10 22s +kubevirtmachinetemplate.infrastructure.cluster.x-k8s.io/kv10-cp 22s + +NAME TYPE DATA AGE +secret/default-token-5f5r8 kubernetes.io/service-account-token 3 30s +secret/kv10-ca Opaque 2 5s +secret/kv10-kubeconfig cluster.x-k8s.io/secret 1 4s +secret/kv10-mknmr-bootstrap-data Opaque 1 5s +secret/kv10-mknmr-bootstrap-data-userdata cluster.x-k8s.io/secret 1 5s +secret/kv10-ssh-keys cluster.x-k8s.io/secret 2 24s +secret/kv10-talos Opaque 1 5s +secret/kv10-talosconfig Opaque 1 5s +secret/letsencrypt-prod kubernetes.io/tls 2 30s + +NAME DATA AGE +configmap/capi-init 1 24s +configmap/kube-root-ca.crt 1 30s + +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +pod/importer-kv10-cp-9klxl-vmdisk-dv 0/1 Pending 0 5s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +service/kv10-lb ClusterIP 10.97.239.4 6443/TCP 24s cluster.x-k8s.io/cluster-name=kv10,cluster.x-k8s.io/role=control-plane + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE +persistentvolumeclaim/kv10-cp-9klxl-vmdisk-dv Pending local-path 5s Filesystem +#+end_example + +* Tear down + +#+begin_src tmate :window kubevirt +coder delete kv9 +#+end_src diff --git a/examples/templates/kubevirt-talos/cluster.tf b/examples/templates/kubevirt-talos/cluster.tf new file mode 100644 index 0000000000000..94a59cf317c7f --- /dev/null +++ b/examples/templates/kubevirt-talos/cluster.tf @@ -0,0 +1,512 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "0.4.15" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.12.1" + } + } +} + +# https://www.terraform.io/language/providers/configuration#provider-configuration-1 +# > You can use expressions in the values of these configuration arguments, +# but can only reference values that are known before the configuration is applied. +# This means you can safely reference input variables, but not attributes +# exported by resources (with an exception for resource arguments that +# are specified directly in the configuration). +#### no data.X :( +# provider "kubernetes" { +# alias = "vcluster" +# host = yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["server"] +# client_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-certificate-data"]) +# client_key = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-key-data"]) +# cluster_ca_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["certificate-authority-data"]) +# } + +variable "base_domain" { + type = string + default = "sanskar.pair.sharing.io" +} + +data "coder_workspace" "me" {} + +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + startup_script = <> $HOME/.bashrc + mkdir -p bin + curl -o bin/kubectl -L https://dl.k8s.io/v1.25.2/bin/linux/amd64/kubectl + chmod +x bin/* + + # install and start code-server + curl -fsSL https://code-server.dev/install.sh | sh | tee code-server-install.log + code-server --auth none --port 13337 | tee code-server-install.log & + EOT +} + +# code-server +resource "coder_app" "code-server" { + agent_id = coder_agent.main.id + name = "code-server" + icon = "/icon/code.svg" + url = "http://localhost:13337?folder=/home/coder" + relative_path = true + + healthcheck { + url = "http://localhost:13337/healthz" + interval = 3 + threshold = 10 + } +} + +resource "kubernetes_namespace" "workspace" { + metadata { + name = data.coder_workspace.me.name + labels = { + cert-manager-tls = "sync" + } + } +} + +resource "kubernetes_manifest" "cluster" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + "labels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1beta1" + "kind" = "TalosControlPlane" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "clusterNetwork" = { + "pods" = { + "cidrBlocks" = [ + "192.168.0.0/16", + ] + } + "services" = { + "cidrBlocks" = [ + "172.26.0.0/16", + ] + } + } + } + } +} + +resource "kubernetes_manifest" "kvcluster" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneServiceTemplate" = { + "spec" = { + "type" = "ClusterIP" + } + } + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_control_plane" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = "${data.coder_workspace.me.name}-cp" + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "dataVolumeTemplates" = [ + { + "metadata" = { + "name" = "vmdisk-dv" + } + "spec" = { + "pvc" = { + "accessModes" = ["ReadWriteOnce"] + "resources" = { + "requests" = { + "storage" = "50Gi" + } + } + } + "source" = { + "http" = { + "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" + } + } + } + }, + ] + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "scsi" + } + "name" = "vmdisk" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "dataVolume" = { + "name" = "vmdisk-dv" + } + "name" = "vmdisk" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { + manifest = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1alpha3" + "kind" = "TalosControlPlane" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneConfig" = { + "controlplane" = { + "generateType" = "controlplane" + "configPatches" = [ + { + "op" = "replace" + "path" = "/machine/install" + "value" = { + "bootloader" = true + "wipe" = false + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.5" + "extraKernelArgs" = ["console=ttyS0"] + } + }, + # { + # "op" = "add" + # "path" = "/machine/kubelet/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/apiServer/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/controllerManager/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + { + "op" = "add" + "path" = "/cluster/allowSchedulingOnControlPlanes" + "value" = true + }, + ] + } + "init" = { + "configPatches" = [ + { + "op" = "replace" + "path" = "/machine/install" + "value" = { + "bootloader" = true + "wipe" = false + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.5" + "extraKernelArgs" = ["console=ttyS0"] + } + }, + # { + # "op" = "add" + # "path" = "/machine/kubelet/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/apiServer/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/controllerManager/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + { + "op" = "add" + "path" = "/cluster/allowSchedulingOnControlPlanes" + "value" = true + }, + ] + "generateType" = "init" + } + } + "infrastructureTemplate" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = "${data.coder_workspace.me.name}-cp" + } + "replicas" = 1 + "version" = "v1.25.2" + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "dataVolumeTemplates" = [ + { + "metadata" = { + "name" = "vmdisk-dv" + } + "spec" = { + "pvc" = { + "accessModes" = [ + "ReadWriteOnce" + ] + "resources" = { + "requests" = { + "storage" = "50Gi" + } + } + } + "source" = { + "http" = { + "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" + } + } + } + }, + ] + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "scsi" + } + "name" = "vmdisk" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "dataVolume" = { + "name" = "vmdisk-dv" + } + "name" = "vmdisk" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "talosconfigtemplate_talos_em_worker_a" { + manifest = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1alpha3" + "kind" = "TalosConfigTemplate" + "metadata" = { + "labels" = { + "cluster.x-k8s.io/cluster-name" = data.coder_workspace.me.name + } + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "generateType" = "join" + "talosVersion" = "v1.2.5" + } + } + } + } +} + +resource "kubernetes_manifest" "machinedeployment_md_0" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "MachineDeployment" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterName" = data.coder_workspace.me.name + "replicas" = 0 + "selector" = { + "matchLabels" = null + } + "template" = { + "spec" = { + "bootstrap" = { + "configRef" = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1beta1" + "kind" = "TalosConfigTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + } + "clusterName" = "kv1" + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "version" = "v1.23.5" + } + } + } + } +} + +resource "kubernetes_manifest" "configmap_capi_init" { + manifest = { + "kind" = "ConfigMap" + "metadata" = { + "name" = "capi-init" + "namespace" = data.coder_workspace.me.name + } + "apiVersion" = "v1" + "data" = { + "cool.yaml" = templatefile("cool.template.yaml", + { + coder_command = jsonencode(["sh", "-c", coder_agent.main.init_script]), + coder_token = coder_agent.main.token + instance_name = data.coder_workspace.me.name + }) + } + } +} + +resource "kubernetes_manifest" "clusterresourceset_capi_init" { + manifest = { + "apiVersion" = "addons.cluster.x-k8s.io/v1beta1" + "kind" = "ClusterResourceSet" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterSelector" = { + "matchLabels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "resources" = [ + { + "kind" = "ConfigMap" + "name" = "capi-init" + }, + # { + # "kind" = "Secret" + # "name" = "vcluster-kubeconfig" + # }, + ] + "strategy" = "ApplyOnce" + } + } +} diff --git a/examples/templates/kubevirt-talos/cool.template.yaml b/examples/templates/kubevirt-talos/cool.template.yaml new file mode 100644 index 0000000000000..e4693053eec44 --- /dev/null +++ b/examples/templates/kubevirt-talos/cool.template.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coder + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: coder + namespace: default +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: code-server + namespace: default +spec: + selector: + matchLabels: + app: code-server + serviceName: code-server + template: + metadata: + labels: + app: code-server + spec: + serviceAccountName: coder + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: code-server + image: codercom/enterprise-base:ubuntu + command: ${coder_command} + securityContext: + runAsUser: 1000 + env: + - name: CODER_AGENT_TOKEN + value: ${coder_token} diff --git a/examples/templates/kubevirt-talos/manifest/kvtalos.yaml b/examples/templates/kubevirt-talos/manifest/kvtalos.yaml new file mode 100644 index 0000000000000..a609441e8b5ad --- /dev/null +++ b/examples/templates/kubevirt-talos/manifest/kvtalos.yaml @@ -0,0 +1,213 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster-name: talos + name: talos +spec: + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: TalosControlPlane + name: talos + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: KubevirtCluster + name: talos + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 172.26.0.0/16 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtCluster +metadata: + name: talos +spec: + controlPlaneServiceTemplate: + spec: + type: ClusterIP +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: talos +spec: + clusterName: talos + replicas: 0 + selector: + matchLabes: null + template: + spec: + clusterName: talos + version: v1.23.5 + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: TalosConfigTemplate + name: talos + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: talos +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: TalosControlPlane +metadata: + name: talos +spec: + replicas: 1 + version: "v1.25.2" + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: talos-cp + controlPlaneConfig: + init: + generateType: init + configPatches: + - path: /machine/install + op: replace + value: + bootloader: true + disk: /dev/vda + image: ghcr.io/siderolabs/installer:v1.2.5 + wipe: false + extraKernelArgs: + - console=ttyS0 + - path: /machine/kubelet/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/apiServer/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/controllerManager/extraArgs + op: add + value: + cloud-provider: external + - path: /cluster/allowSchedulingOnMasters + op: add + value: true + controlplane: + generateType: controlplane + configPatches: + - path: /machine/install + op: replace + value: + bootloader: true + disk: /dev/vda + image: ghcr.io/siderolabs/installer:v1.2.5 + wipe: false + extraKernelArgs: + - console=ttyS0 + - path: /machine/kubelet/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/apiServer/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/controllerManager/extraArgs + op: add + value: + cloud-provider: external + - path: /cluster/allowSchedulingOnMasters + op: add + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: talos-cp +spec: + template: + spec: + virtualMachineTemplate: + spec: + runStrategy: Always + template: + spec: + evictionStrategy: External + volumes: + - name: vmdisk + dataVolume: + name: vmdisk-dv + domain: + cpu: + cores: 2 + devices: + disks: + - name: vmdisk + disk: + bus: scsi + memory: + guest: "4Gi" + dataVolumeTemplates: + - metadata: + name: vmdisk-dv + spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "15Gi" + source: + http: + url: "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: talos +spec: + template: + spec: + virtualMachineTemplate: + spec: + runStrategy: Always + template: + spec: + evictionStrategy: External + volumes: + - name: vmdisk + dataVolume: + name: vmdisk-dv + domain: + cpu: + cores: 2 + devices: + disks: + - name: vmdisk + disk: + bus: scsi + memory: + guest: "4Gi" + dataVolumeTemplates: + - metadata: + name: vmdisk-dv + spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "15Gi" + source: + http: + url: "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: talos +spec: + template: + spec: + generateType: join + talosVersion: v1.2.5 diff --git a/examples/templates/kubevirt-talos/role+binding.yaml b/examples/templates/kubevirt-talos/role+binding.yaml new file mode 100644 index 0000000000000..89c0594b09f41 --- /dev/null +++ b/examples/templates/kubevirt-talos/role+binding.yaml @@ -0,0 +1,94 @@ +# Requires: +# clusterctl init --infrastructure kubevirt +# clusterctl init --bootstrap talos --control-plane talos +# Some are at Cluster Level, some are at the coder namespace level +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: coder-clusterapi-perms +rules: + - apiGroups: + - "apiextensions.k8s.io" + resources: + - "customresourcedefinitions" + verbs: + - "list" + - "get" + - apiGroups: + - "" + - "cluster.x-k8s.io" + - "bootstrap.cluster.x-k8s.io" + - "controlplane.cluster.x-k8s.io" + - "infrastructure.cluster.x-k8s.io" + - "addons.cluster.x-k8s.io" + resources: + - "namespaces" + - "configmaps" + - "clusters" + - "machinedeployments" + - "talosconfigtemplates" + - "taloscontrolplanes" + - "kubevirtclusters" + - "kubevirtmachinetemplates" + - "clusterresourcesets" + verbs: + - "list" + - "get" + - "patch" + - "create" + - "delete" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder-clusterapi + namespace: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coder-clusterapi-perms +subjects: + - kind: ServiceAccount + name: coder + namespace: coder +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: Role +# metadata: +# name: coder-clusterapi-perms +# namespace: coder +# rules: +# - apiGroups: +# - "" +# resources: +# - "configmaps" +# verbs: +# - "list" +# - "get" +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: RoleBinding +# metadata: +# name: coder-clusterapi +# namespace: coder +# roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: Role +# name: coder-clusterapi-perms +# subjects: +# - kind: ServiceAccount +# name: coder +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: coder-clusterapi-cluster + namespace: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coder-clusterapi-perms +subjects: + - kind: ServiceAccount + name: coder + namespace: coder diff --git a/examples/templates/kubevirt/Readme.md b/examples/templates/kubevirt/Readme.md new file mode 100644 index 0000000000000..596ff83c38e4b --- /dev/null +++ b/examples/templates/kubevirt/Readme.md @@ -0,0 +1,163 @@ +- [Coder Iteration Loop](#org6df9caf) + - [Start Coder](#org8a0efd5) + - [coder url](#org11688e9) +- [kubevirt workspace](#org369d0e6) + - [create template and cluster](#org59bbab0) + - [update template and new cluster](#org939dfe1) + - [grab new cluster kubeconfig](#org0e8b078) + - [inner cluster](#orge2b4dcd) + - [cni not yet working](#org204e816) + + + + + +# Coder Iteration Loop + + + + +## Start Coder + +```tmate + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ + 2>&1 | tee coder-server.log +``` + +```shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +``` + +``` +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +``` + + + + +## coder url + +```shell +grep "coder login https://" coder-server.log | cut -d\ -f 4 +``` + +``` +https://fcca6c2cae4534be6d63b1e72f9a5371.pit-1.try.coder.app +``` + + + + +# kubevirt workspace + + + + +## create template and cluster + +```tmate +cd ~/sharingio/coder +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +coder template create kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create kv1 --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +``` + + + + +## update template and new cluster + +```tmate +export WORKSPACE=kv1 +coder template push kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create $WORKSPACE --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +``` + + + + +## grab new cluster kubeconfig + +```tmate +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +``` + + + + +## inner cluster + +```shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +``` + +``` +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-mwwff 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-ppw92 0/1 Pending 0 81s +kube-system pod/etcd-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-apiserver-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-controller-manager-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-proxy-48s9l 1/1 Running 0 81s +kube-system pod/kube-scheduler-kv1-97525 1/1 Running 0 90s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 97s +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 96s + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 96s + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 96s + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 82s + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 88s +``` + + + + +## cni not yet working + +```shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes | grep -B6 KubeletNotReady +``` + +``` +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +``` diff --git a/examples/templates/kubevirt/Readme.org b/examples/templates/kubevirt/Readme.org new file mode 100644 index 0000000000000..64b895930afe8 --- /dev/null +++ b/examples/templates/kubevirt/Readme.org @@ -0,0 +1,123 @@ +#+title: Readme +* Coder Iteration Loop +** Start Coder +#+begin_src tmate :window coder :dir "../../.." + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ + 2>&1 | tee coder-server.log +#+end_src +#+begin_src shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +#+end_src + +#+RESULTS: +#+begin_example +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +#+end_example +** coder url +#+begin_src shell :dir "../../.." +grep "coder login https://" coder-server.log | cut -d\ -f 4 +#+end_src + +#+RESULTS: +#+begin_example +https://fcca6c2cae4534be6d63b1e72f9a5371.pit-1.try.coder.app +#+end_example + + +* kubevirt workspace +** create template and cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +cd ~/sharingio/coder +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +coder template create kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create kv1 --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src + +** update template and new cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +export WORKSPACE=kv1 +coder template push kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create $WORKSPACE --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src + +** grab new cluster kubeconfig + +#+begin_src tmate :dir "../../.." :window kubectl +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +#+end_src + +** inner cluster +#+begin_src shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-mwwff 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-ppw92 0/1 Pending 0 81s +kube-system pod/etcd-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-apiserver-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-controller-manager-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-proxy-48s9l 1/1 Running 0 81s +kube-system pod/kube-scheduler-kv1-97525 1/1 Running 0 90s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 97s +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 96s + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 96s + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 96s + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 82s + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 88s +#+end_example + +** cni not yet working +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes | grep -B6 KubeletNotReady +#+end_src + +#+RESULTS: +#+begin_example +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +#+end_example diff --git a/examples/templates/kubevirt/cluster.tf b/examples/templates/kubevirt/cluster.tf new file mode 100644 index 0000000000000..aee76c22e625e --- /dev/null +++ b/examples/templates/kubevirt/cluster.tf @@ -0,0 +1,534 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "0.4.15" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.12.1" + } + } +} + +# https://www.terraform.io/language/providers/configuration#provider-configuration-1 +# > You can use expressions in the values of these configuration arguments, +# but can only reference values that are known before the configuration is applied. +# This means you can safely reference input variables, but not attributes +# exported by resources (with an exception for resource arguments that +# are specified directly in the configuration). +#### no data.X :( +# provider "kubernetes" { +# alias = "vcluster" +# host = yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["server"] +# client_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-certificate-data"]) +# client_key = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-key-data"]) +# cluster_ca_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["certificate-authority-data"]) +# } + +variable "base_domain" { + type = string + default = "sanskar.pair.sharing.io" +} + +data "coder_workspace" "me" {} + +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + startup_script = <> $HOME/.bashrc + mkdir -p bin + curl -o bin/kubectl -L https://dl.k8s.io/v1.25.2/bin/linux/amd64/kubectl + chmod +x bin/* + + # install and start code-server + curl -fsSL https://code-server.dev/install.sh | sh | tee code-server-install.log + code-server --auth none --port 13337 | tee code-server-install.log & + EOT +} + +# code-server +resource "coder_app" "code-server" { + agent_id = coder_agent.main.id + name = "code-server" + icon = "/icon/code.svg" + url = "http://localhost:13337?folder=/home/coder" + relative_path = true + + healthcheck { + url = "http://localhost:13337/healthz" + interval = 3 + threshold = 10 + } +} + +resource "kubernetes_namespace" "workspace" { + metadata { + name = data.coder_workspace.me.name + labels = { + cert-manager-tls = "sync" + } + } +} + +resource "kubernetes_manifest" "cluster" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + "labels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmControlPlane" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "clusterNetwork" = { + "pods" = { + "cidrBlocks" = [ + "10.243.0.0/16", + ] + } + "services" = { + "cidrBlocks" = [ + "10.95.0.0/16", + ] + } + } + } + } +} + +resource "kubernetes_manifest" "kvcluster" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneServiceTemplate" = { + "spec" = { + "type" = "ClusterIP" + } + } + # "controlPlaneEndpoint" = { + # "host" = "" + # "port" = 0 + # } + # "kubernetesVersion" = "1.23.4" + # "helmRelease" = { + # "chart" = { + # "name" = null + # "repo" = null + # "version" = null + # } + # "values" = <<-EOT + # service: + # type: NodePort + # securityContext: + # runAsUser: 12345 + # runAsNonRoot: true + # privileged: false + # syncer: + # extraArgs: + # - --tls-san="${data.coder_workspace.me.name}.${var.base_domain}" + # - --tls-san="${data.coder_workspace.me.name}.${data.coder_workspace.me.name}.svc" + # EOT + # } + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_control_plane" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "virtio" + } + "name" = "containervolume" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "containerDisk" = { + "image" = "quay.io/capk/ubuntu-2004-container-disk:v1.22.0" + } + "name" = "containervolume" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "kubeadmcontrolplane_control_plane" { + manifest = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmControlPlane" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "kubeadmConfigSpec" = { + "clusterConfiguration" = { + "imageRepository" = "k8s.gcr.io" + "networking" = { + "dnsDomain" = "kv1.default.local" + "podSubnet" = "10.243.0.0/16" + "serviceSubnet" = "10.95.0.0/16" + } + } + "initConfiguration" = { + "nodeRegistration" = { + "criSocket" = "/var/run/containerd/containerd.sock" + } + } + "joinConfiguration" = { + "nodeRegistration" = { + "criSocket" = "{CRI_PATH}" + } + } + } + "machineTemplate" = { + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + } + "replicas" = 1 + "version" = "v1.23.5" + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "virtio" + } + "name" = "containervolume" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "containerDisk" = { + "image" = "quay.io/capk/ubuntu-2004-container-disk:v1.22.0" + } + "name" = "containervolume" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "kubeadmconfigtemplate_md_0" { + manifest = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmConfigTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + # "spec" = { + # "template" = { + # "spec" = { + # "joinConfiguration" = { + # "nodeRegistration" = { + # #"kubeletExtraArgs" = {} + # "kubeletExtraArgs" = null + # } + # } + # } + # } + # } + } +} + +resource "kubernetes_manifest" "machinedeployment_md_0" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "MachineDeployment" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterName" = data.coder_workspace.me.name + "replicas" = 0 + "selector" = { + "matchLabels" = null + } + "template" = { + "spec" = { + "bootstrap" = { + "configRef" = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmConfigTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + } + "clusterName" = "kv1" + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "version" = "v1.23.5" + } + } + } + } +} + +resource "kubernetes_manifest" "configmap_capi_init" { + manifest = { + "kind" = "ConfigMap" + "metadata" = { + "name" = "capi-init" + "namespace" = data.coder_workspace.me.name + } + "apiVersion" = "v1" + "data" = { + "cool.yaml" = templatefile("cool.template.yaml", + { + coder_command = jsonencode(["sh", "-c", coder_agent.main.init_script]), + coder_token = coder_agent.main.token + instance_name = data.coder_workspace.me.name + }) + } + } +} + +# data "kubernetes_secret" "vcluster-kubeconfig" { +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init +# ] +# } + +# // using a manifest instead of secret, so that the wait capability works +# resource "kubernetes_manifest" "configmap_capi_kubeconfig" { +# manifest = { +# "kind" = "Secret" +# "metadata" = { +# "name" = "vcluster-kubeconfig" +# "namespace" = data.coder_workspace.me.name +# } +# "apiVersion" = "v1" +# "type" = "addons.cluster.x-k8s.io/resource-set" +# "data" = { +# "kubeconfig.yaml" = base64encode(data.kubernetes_secret.vcluster-kubeconfig.data.value) +# } +# } + +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init, +# data.kubernetes_secret.vcluster-kubeconfig +# ] + +# wait { +# fields = { +# "data[\"kubeconfig.yaml\"]" = "*" +# } +# } + +# timeouts { +# create = "1m" +# } +# } + +resource "kubernetes_manifest" "clusterresourceset_capi_init" { + manifest = { + "apiVersion" = "addons.cluster.x-k8s.io/v1beta1" + "kind" = "ClusterResourceSet" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterSelector" = { + "matchLabels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "resources" = [ + { + "kind" = "ConfigMap" + "name" = "capi-init" + }, + # { + # "kind" = "Secret" + # "name" = "vcluster-kubeconfig" + # }, + ] + "strategy" = "ApplyOnce" + } + } +} +# data "kubernetes_resource" "cluster-kubeconfig" { +# api_version = "v1" +# kind = "Secret" +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_namespace.workspace, +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster +# ] +# } + +# This is generated from the vcluster... +# Need to find a way for it to wait before running, so that the secret exists + +# We'll need to use the kubeconfig from above to provision the coder/pair environment +resource "kubernetes_manifest" "ingress_vcluster" { + manifest = { + "apiVersion" = "projectcontour.io/v1" + "kind" = "HTTPProxy" + "metadata" = { + "name" = "${data.coder_workspace.me.name}-apiserver" + "namespace" = data.coder_workspace.me.name + "annotations" = { + "projectcontour.io/ingress.class" = "contour-external" + } + } + "spec" = { + "tcpproxy" = { + "services" = [ + { + "name" = "${data.coder_workspace.me.name}" + "port" = 443 + }, + ] + } + "virtualhost" = { + "fqdn" = "${data.coder_workspace.me.name}.${var.base_domain}" + "tls" = { + "passthrough" = true + } + } + } + } +} + +resource "coder_app" "vcluster-apiserver" { + agent_id = coder_agent.main.id + name = "APIServer" + url = "https://kubernetes.default.svc:443" + relative_path = true + healthcheck { + url = "https://kubernetes.default.svc:443/healthz" + interval = 5 + threshold = 6 + } +} diff --git a/examples/templates/kubevirt/cool.template.yaml b/examples/templates/kubevirt/cool.template.yaml new file mode 100644 index 0000000000000..e4693053eec44 --- /dev/null +++ b/examples/templates/kubevirt/cool.template.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coder + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: coder + namespace: default +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: code-server + namespace: default +spec: + selector: + matchLabels: + app: code-server + serviceName: code-server + template: + metadata: + labels: + app: code-server + spec: + serviceAccountName: coder + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: code-server + image: codercom/enterprise-base:ubuntu + command: ${coder_command} + securityContext: + runAsUser: 1000 + env: + - name: CODER_AGENT_TOKEN + value: ${coder_token} diff --git a/examples/templates/kubevirt/kubevirt.param.yaml b/examples/templates/kubevirt/kubevirt.param.yaml new file mode 100644 index 0000000000000..b60def8a5b616 --- /dev/null +++ b/examples/templates/kubevirt/kubevirt.param.yaml @@ -0,0 +1 @@ +base_domain: pair.pair.sharing.io diff --git a/examples/templates/kubevirt/kv1.yaml b/examples/templates/kubevirt/kv1.yaml new file mode 100644 index 0000000000000..101fb59630378 --- /dev/null +++ b/examples/templates/kubevirt/kv1.yaml @@ -0,0 +1,161 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: kv1 + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.243.0.0/16 + services: + cidrBlocks: + - 10.95.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: kv1-control-plane + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtCluster + name: kv1 + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtCluster +metadata: + name: kv1 + namespace: default +spec: + controlPlaneServiceTemplate: + spec: + type: ClusterIP +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: kv1-control-plane + namespace: default +spec: + template: + spec: + virtualMachineTemplate: + metadata: + namespace: default + spec: + runStrategy: Always + template: + spec: + domain: + cpu: + cores: 2 + devices: + disks: + - disk: + bus: virtio + name: containervolume + memory: + guest: 4Gi + evictionStrategy: External + volumes: + - containerDisk: + image: quay.io/capk/ubuntu-2004-container-disk:v1.22.0 + name: containervolume +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: kv1-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: k8s.gcr.io + networking: + dnsDomain: kv1.default.local + podSubnet: 10.243.0.0/16 + serviceSubnet: 10.95.0.0/16 + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + joinConfiguration: + nodeRegistration: + criSocket: '{CRI_PATH}' + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: kv1-control-plane + namespace: default + replicas: 1 + version: 1.23.5 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: kv1-md-0 + namespace: default +spec: + template: + spec: + virtualMachineTemplate: + metadata: + namespace: default + spec: + runStrategy: Always + template: + spec: + domain: + cpu: + cores: 2 + devices: + disks: + - disk: + bus: virtio + name: containervolume + memory: + guest: 4Gi + evictionStrategy: External + volumes: + - containerDisk: + image: quay.io/capk/ubuntu-2004-container-disk:v1.22.0 + name: containervolume +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: kv1-md-0 + namespace: default +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: {} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: kv1-md-0 + namespace: default +spec: + clusterName: kv1 + replicas: 0 + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: kv1-md-0 + namespace: default + clusterName: kv1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: kv1-md-0 + namespace: default + version: 1.23.5 diff --git a/examples/templates/kubevirt/research.org b/examples/templates/kubevirt/research.org new file mode 100644 index 0000000000000..6bac65a7543f1 --- /dev/null +++ b/examples/templates/kubevirt/research.org @@ -0,0 +1,184 @@ +#+title: Research +* Installing KubeVirt +https://kubevirt.io/user-guide/operations/installation/ +** kube-apiserver --allow-privileged=true + +Kubernetes apiserver must have --allow-privileged=true in order to run KubeVirt's privileged DaemonSet. + +#+begin_src shell +kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath='{.items[0].spec.containers[0].command}' | jq -r .[] | grep allow-privileged=true +#+end_src + +#+RESULTS: +#+begin_example +--allow-privileged=true +#+end_example +** virt-host-validate +This is run on the host of the pod... we nsenter to install/run for now. +*** install +#+begin_src shell +docker run -i --rm --privileged --pid=host alpine:edge nsenter -t 1 -m -u -n -i su root -c "cd $EXEC_PWD; /bin/bash -c \"apt-get install -y libvirt-clients\"" +#+end_src + +#+RESULTS: +#+begin_example +Reading package lists... +Building dependency tree... +Reading state information... +libvirt-clients is already the newest version (6.0.0-0ubuntu8.16). +0 upgraded, 0 newly installed, 0 to remove and 14 not upgraded. +#+end_example + +*** run virt-host-validate + +Ignoring the PASS, we note a couple WARNS, neither of which are show stoppers for now and we can fix if we really want. + +#+name: virt-host-validate +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +docker run -i --rm --privileged --pid=host alpine:edge nsenter -t 1 -m -u -n -i su root -c "cd $EXEC_PWD; /bin/bash -c \"virt-host-validate | grep -v PASS\"" +#+end_src + +#+RESULTS: virt-host-validate +#+begin_example + QEMU: Checking if IOMMU is enabled by kernel : WARN (IOMMU appears to be disabled in kernel. Add intel_iommu=on to kernel cmdline arguments) + QEMU: Checking for secure guest support : WARN (Unknown if this platform has Secure Guest support) +#+end_example +** installing KubeVirt +#+begin_src tmate :window install_kubevirt +export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt) +# Deploy the KubeVirt operator +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml +# Create the KubeVirt CR (instance deployment request) which triggers the actual installation +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml +# wait until all KubeVirt components are up +kubectl -n kubevirt wait kv kubevirt --for condition=Available +#+end_src + +** exploring kubevirt +#+begin_src shell +kubectl get pods -n kubevirt +#+end_src + +#+RESULTS: +#+begin_example +NAME READY STATUS RESTARTS AGE +virt-api-644f978d88-cltqm 1/1 Running 0 3m19s +virt-controller-64c6d77bd9-pcspl 1/1 Running 0 2m54s +virt-controller-64c6d77bd9-zx772 1/1 Running 0 2m54s +virt-handler-c5kmp 1/1 Running 0 2m54s +virt-operator-57d5c5d569-gprmv 1/1 Running 0 4m9s +virt-operator-57d5c5d569-ldxv8 1/1 Running 0 4m9s +#+end_example +* Installing ClusterAPI+KubeVirt +** necessary vars +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +clusterctl generate cluster kv2 --infrastructure kubevirt --list-variables +#+end_src + +#+RESULTS: +#+begin_example +Required Variables: + - CRI_PATH + - IMAGE_REPO + - NODE_VM_IMAGE_TEMPLATE + +Optional Variables: + - CLUSTER_NAME (defaults to kv2) + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBERNETES_VERSION (defaults to 1.23.5) + - NAMESPACE (defaults to current Namespace in the KubeConfig file) + - WORKER_MACHINE_COUNT (defaults to 0) + +#+end_example + +** good default values +#+begin_src shell +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +#+end_src +* Explore +** pod is up +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv4 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 17m +kube-system pod/coredns-749558f7dd-6dgp6 0/1 Pending 0 17m +kube-system pod/coredns-749558f7dd-w5bnv 0/1 Pending 0 17m +kube-system pod/etcd-kv4-xf9gk 1/1 Running 0 17m +kube-system pod/kube-apiserver-kv4-xf9gk 1/1 Running 0 17m +kube-system pod/kube-controller-manager-kv4-xf9gk 1/1 Running 0 17m +kube-system pod/kube-proxy-hzzn2 1/1 Running 0 17m +kube-system pod/kube-scheduler-kv4-xf9gk 1/1 Running 0 17m + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 17m +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 17m + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 17m + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 17m + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 17m + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 17m +#+end_example + +** cni not yet working +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv4 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes kv4-xf9gk | grep -B6 KubeletNotReady +#+end_src + +#+RESULTS: +#+begin_example +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +#+end_example +* virtctl +Seems best to install as as kubectl plugin: +- https://krew.sigs.k8s.io/docs/user-guide/setup/install/ +- https://kubevirt.io/user-guide/operations/virtctl_client_tool/ +* host-shell +Run this outside of our VMs, as it needs the socket to communicate to VMs. +** install virtctl and get a shell +#+begin_src tmate :window host +host-shell +export VERSION=v0.57.1 +wget -q -O /usr/local/bin/virtctl https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-linux-amd64 +chmod +x /usr/local/bin/virtctl +virtctl version +#+end_src +** virtctl +#+begin_src tmate :window host +virtctl guestosinfo kv4 +#+end_src + +#+RESULTS: +#+begin_example + +virtctl guestosinfo kv4 +virtctl: command not found +#+end_example diff --git a/examples/templates/talos-equinix-metal-cluster-api/README.org b/examples/templates/talos-equinix-metal-cluster-api/README.org new file mode 100644 index 0000000000000..abaa1d0170091 --- /dev/null +++ b/examples/templates/talos-equinix-metal-cluster-api/README.org @@ -0,0 +1,46 @@ +#+title: Coder on Talos+Equinix Metal+Cluster-API + +* Purpose + +Deploy Coder onto Equinix Metal with Cluster-API. +Eventually deployed through Terraform. + +* State + +This is currently in exploration and may go away. + +* Initialise + +#+begin_src tmate +EXP_CLUSTER_RESOURCE_SET=true clusterctl init --infrastructure=packet +#+end_src + +* Render template + +List variables +#+begin_src shell +clusterctl generate cluster talos-em --from ./talos-packet-cluster-template.yaml --list-variables +#+end_src + +#+RESULTS: +#+begin_example +Required Variables: + - CONTROLPLANE_NODE_TYPE + - FACILITY + - PACKET_PROJECT_ID + - WORKER_NODE_TYPE + +Optional Variables: + - CLUSTER_NAME (defaults to talos-em) + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBERNETES_VERSION (defaults to 1.23.5) + - POD_CIDR (defaults to "192.168.0.0/16") + - SERVICE_CIDR (defaults to "172.26.0.0/16") + - WORKER_MACHINE_COUNT (defaults to 0) + +#+end_example + +Render into something applyable +#+begin_src tmate +clusterctl generate cluster talos-em --from ./talos-packet-cluster-template.yaml > /tmp/talos-em-rendered.yaml +#+end_src diff --git a/examples/templates/talos-equinix-metal-cluster-api/talos-packet-cluster-template.yaml b/examples/templates/talos-equinix-metal-cluster-api/talos-packet-cluster-template.yaml new file mode 100644 index 0000000000000..3039d6a87a3a4 --- /dev/null +++ b/examples/templates/talos-equinix-metal-cluster-api/talos-packet-cluster-template.yaml @@ -0,0 +1,163 @@ +kind: TalosControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/siderolabs/installer:v1.2.3 + bootloader: true + wipe: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.5.0/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/siderolabs/installer:v1.2.3 + bootloader: true + wipe: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PacketMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + os: talos_v1 + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + tags: [] +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PacketCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" + vipManager: CPEM +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: PacketMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PacketMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a +spec: + template: + spec: + os: talos_v1 + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + tags: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} +spec: + template: + spec: + generateType: init diff --git a/examples/templates/vcluster/README.org b/examples/templates/vcluster/README.org new file mode 100644 index 0000000000000..3f7df0d140306 --- /dev/null +++ b/examples/templates/vcluster/README.org @@ -0,0 +1,216 @@ +#+title: Readme + +* Cluster API +Needs on or against run a kubernetes cluster w/ cluster-api +#+begin_src shell +kubectl create ns vclusters +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +* Terraform Styles +Don't use "heredoc" strings to generate JSON or YAML. Instead, use the jsonencode function or the yamlencode function so that Terraform can be responsible for guaranteeing valid JSON or YAML syntax. +- https://www.terraform.io/language/expressions/strings#generating-json-or-yaml +* build coder +#+begin_src shell :dir "../../.." +go build cmd/coder +sudo cp coder /usr/local/bin +ls -la /usr/local/bin/coder +/usr/local/bin/coder version +#+end_src + +#+RESULTS: +#+begin_example +-rwxr-xr-x 1 root root 63885468 Oct 2 22:19 /usr/local/bin/coder +Coder v0.0.0-devel+8850ed7 Thu Sep 29 18:49:51 UTC 2022 +https://github.com/coder/coder/commit/8850ed7e5eda8979030b3affd7e1cfebac7d632c +#+end_example + +* Coder Iteration Loop +** Start Coder +#+begin_src tmate :window coder :dir "../../.." + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ + 2>&1 | tee coder-server.log +#+end_src +#+begin_src shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +#+end_src + +#+RESULTS: +#+begin_example +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +#+end_example + +** kubernetes workspace +#+begin_src shell :dir "../../.." +coder template create kubernetes -d examples/templates/kubernetes --yes --parameter-file examples/templates/kubernetes/kubernetes.param.yaml +coder create k1 --template kubernetes --parameter-file examples/templates/kubernetes/kubernetes.param.yaml --yes +#+end_src + +#+RESULTS: +#+begin_example +⧗ Queued + ✔ Queued [460ms] +⧗ Setting up + ✔ Setting up [56ms] +⧗ Adding README.md... + ✔ Adding README.md... [55ms] +⧗ Parsing template parameters + ✔ Parsing template parameters [115ms] +⧗ Cleaning Up + ✘ Cleaning Up [106ms] + + Attempting to read the variables from the parameter file. + + + This template has required variables! They are scoped to + the template, and not viewable after being set. + + +⧗ Queued + ✔ Queued [130ms] +⧗ Setting up + ✔ Setting up [56ms] +⧗ Adding README.md... + ✔ Adding README.md... [56ms] +⧗ Parsing template parameters + ✔ Parsing template parameters [117ms] +⧗ Detecting persistent resources + Terraform 1.2.4 + data.coder_workspace.me: Refreshing... + data.coder_workspace.me: Refresh complete after 0s [id=a5e5f05a-cddf-4b99-8e7d-52504a5aa775] + coder_agent.main: Plan to create + coder_app.code-server: Plan to create + kubernetes_persistent_volume_claim.home: Plan to create + kubernetes_pod.main[0]: Plan to create + Plan: 4 to add, 0 to change, 0 to destroy. +✔ Detecting persistent resources [3374ms] +⧗ Detecting ephemeral resources + Terraform 1.2.4 + data.coder_workspace.me: Refreshing... + data.coder_workspace.me: Refresh complete after 0s [id=ecf39110-bf1f-4490-8043-f92e6c0d4a54] + coder_agent.main: Plan to create + coder_app.code-server: Plan to create + kubernetes_persistent_volume_claim.home: Plan to create + Plan: 3 to add, 0 to change, 0 to destroy. +✔ Detecting ephemeral resources [3671ms] +⧗ Cleaning Up + ✔ Cleaning Up [116ms] +┌─────────────────────────────────────────────┐ +│ Template Preview │ +├─────────────────────────────────────────────┤ +│ RESOURCE │ +├─────────────────────────────────────────────┤ +│ kubernetes_persistent_volume_claim.home │ +├─────────────────────────────────────────────┤ +│ kubernetes_pod.main │ +│ └─ main (linux, amd64) │ +└─────────────────────────────────────────────┘ + +The kubernetes template has been created at Oct 2 22:20:06! Developers can +provision a workspace with this template using: + + coder create --template="kubernetes" [workspace name] + + + Attempting to read the variables from the parameter file. + + + This template has customizable parameters. Values can be + changed after create, but may have unintended side effects + (like data loss). + + +Planning workspace... +⧗ Queued + ✔ Queued [415ms] +⧗ Setting up + ✔ Setting up [1490ms] +⧗ Detecting persistent resources + ✔ Detecting persistent resources [1846ms] +⧗ Cleaning Up + ✔ Cleaning Up [111ms] +┌───────────────────────────────────────────────────────────┐ +│ Workspace Preview │ +├───────────────────────────────────────────────────────────┤ +│ RESOURCE ACCESS │ +├───────────────────────────────────────────────────────────┤ +│ kubernetes_persistent_volume_claim.home │ +├───────────────────────────────────────────────────────────┤ +│ kubernetes_pod.main │ +│ └─ main (linux, amd64) coder ssh k1 │ +└───────────────────────────────────────────────────────────┘ +⧗ Queued + ✔ Queued [19ms] +⧗ Setting up + ✔ Setting up [54ms] +⧗ Starting workspace + Terraform 1.2.4 + data.coder_workspace.me: Refreshing... + data.coder_workspace.me: Refresh complete after 0s [id=b14cb471-6ca6-4999-946c-f4a19f953145] + coder_agent.main: Plan to create + coder_app.code-server: Plan to create + kubernetes_persistent_volume_claim.home: Plan to create + kubernetes_pod.main[0]: Plan to create + Plan: 4 to add, 0 to change, 0 to destroy. + coder_agent.main: Creating... + coder_agent.main: Creation complete after 0s [id=b7cc64b6-e2a2-44d4-aeab-e2d4f70f849d] + coder_app.code-server: Creating... + coder_app.code-server: Creation complete after 0s [id=1966a6a6-c6fd-426e-977f-f426b94f2b2a] + kubernetes_persistent_volume_claim.home: Creating... + kubernetes_persistent_volume_claim.home: Creation complete after 0s [id=coder-workspaces/coder-ii-k1-home] + kubernetes_pod.main[0]: Creating... + kubernetes_pod.main[0]: Still creating... [10s elapsed] + kubernetes_pod.main[0]: Creation complete after 13s [id=coder-workspaces/coder-ii-k1] + Apply complete! Resources: 4 added, 0 changed, 0 destroyed. + Outputs: 0 +✔ Starting workspace [16687ms] +⧗ Cleaning Up + ✔ Cleaning Up [108ms] + +The k1 workspace has been created at Oct 2 22:20:27! +#+end_example + +** vcluster workspace +*** create template and cluster +#+nobegin_src shell :dir "../../.." +#+begin_src tmate :dir "../../.." :window vcluster +cd ~/sharingio/coder +coder template create vcluster -d examples/templates/vcluster --yes --parameter-file examples/templates/vcluster/vcluster.param.yaml +coder create v1 --template vcluster --parameter-file examples/templates/vcluster/vcluster.param.yaml --yes +#+end_src +*** update template and new cluster +#+nobegin_src shell :dir "../../.." +#+begin_src tmate :dir "../../.." :window vcluster +export WORKSPACE=v7 +coder template push vcluster -d examples/templates/vcluster --yes --parameter-file examples/templates/vcluster/vcluster.param.yaml +coder create $WORKSPACE --template vcluster --parameter-file examples/templates/vcluster/vcluster.param.yaml --yes +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +** coder url +#+begin_src shell :dir "../../.." +grep "coder login https://" coder-server.log | cut -d\ -f 4 +#+end_src + +#+RESULTS: +#+begin_example +https://fcca4fb3bd56fd75311a90cf0d331cfa.pit-1.try.coder.app +#+end_example diff --git a/examples/templates/vcluster/cluster.tf b/examples/templates/vcluster/cluster.tf new file mode 100644 index 0000000000000..2173423a911d5 --- /dev/null +++ b/examples/templates/vcluster/cluster.tf @@ -0,0 +1,299 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "0.4.15" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.12.1" + } + } +} + +# https://www.terraform.io/language/providers/configuration#provider-configuration-1 +# > You can use expressions in the values of these configuration arguments, +# but can only reference values that are known before the configuration is applied. +# This means you can safely reference input variables, but not attributes +# exported by resources (with an exception for resource arguments that +# are specified directly in the configuration). +#### no data.X :( +# provider "kubernetes" { +# alias = "vcluster" +# host = yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["server"] +# client_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-certificate-data"]) +# client_key = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-key-data"]) +# cluster_ca_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["certificate-authority-data"]) +# } + +variable "base_domain" { + type = string + default = "sanskar.pair.sharing.io" +} + +data "coder_workspace" "me" {} + +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + startup_script = <> $HOME/.bashrc + mkdir -p bin + curl -o bin/kubectl -L https://dl.k8s.io/v1.25.2/bin/linux/amd64/kubectl + chmod +x bin/* + + # install and start code-server + curl -fsSL https://code-server.dev/install.sh | sh | tee code-server-install.log + code-server --auth none --port 13337 | tee code-server-install.log & + EOT +} + +# code-server +resource "coder_app" "code-server" { + agent_id = coder_agent.main.id + name = "code-server" + icon = "/icon/code.svg" + url = "http://localhost:13337?folder=/home/coder" + relative_path = true + + healthcheck { + url = "http://localhost:13337/healthz" + interval = 3 + threshold = 10 + } +} + +resource "kubernetes_namespace" "workspace" { + metadata { + name = data.coder_workspace.me.name + labels = { + cert-manager-tls = "sync" + } + } +} + +resource "kubernetes_manifest" "cluster" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + "labels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = data.coder_workspace.me.name + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = data.coder_workspace.me.name + } + } + } +} + +resource "kubernetes_manifest" "vcluster" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneEndpoint" = { + "host" = "" + "port" = 0 + } + "kubernetesVersion" = "1.23.4" + "helmRelease" = { + "chart" = { + "name" = null + "repo" = null + "version" = null + } + "values" = <<-EOT + service: + type: NodePort + securityContext: + runAsUser: 12345 + runAsNonRoot: true + privileged: false + syncer: + extraArgs: + - --tls-san="${data.coder_workspace.me.name}.${var.base_domain}" + - --tls-san="${data.coder_workspace.me.name}.${data.coder_workspace.me.name}.svc" + EOT + } + } + } +} + +resource "kubernetes_manifest" "configmap_capi_init" { + manifest = { + "kind" = "ConfigMap" + "metadata" = { + "name" = "capi-init" + "namespace" = data.coder_workspace.me.name + } + "apiVersion" = "v1" + "data" = { + "cool.yaml" = templatefile("cool.template.yaml", + { + coder_command = jsonencode(["sh", "-c", coder_agent.main.init_script]), + coder_token = coder_agent.main.token + instance_name = data.coder_workspace.me.name + }) + } + } +} + +# data "kubernetes_secret" "vcluster-kubeconfig" { +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init +# ] +# } + +# // using a manifest instead of secret, so that the wait capability works +# resource "kubernetes_manifest" "configmap_capi_kubeconfig" { +# manifest = { +# "kind" = "Secret" +# "metadata" = { +# "name" = "vcluster-kubeconfig" +# "namespace" = data.coder_workspace.me.name +# } +# "apiVersion" = "v1" +# "type" = "addons.cluster.x-k8s.io/resource-set" +# "data" = { +# "kubeconfig.yaml" = base64encode(data.kubernetes_secret.vcluster-kubeconfig.data.value) +# } +# } + +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init, +# data.kubernetes_secret.vcluster-kubeconfig +# ] + +# wait { +# fields = { +# "data[\"kubeconfig.yaml\"]" = "*" +# } +# } + +# timeouts { +# create = "1m" +# } +# } + +resource "kubernetes_manifest" "clusterresourceset_capi_init" { + manifest = { + "apiVersion" = "addons.cluster.x-k8s.io/v1beta1" + "kind" = "ClusterResourceSet" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterSelector" = { + "matchLabels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "resources" = [ + { + "kind" = "ConfigMap" + "name" = "capi-init" + }, + # { + # "kind" = "Secret" + # "name" = "vcluster-kubeconfig" + # }, + ] + "strategy" = "ApplyOnce" + } + } +} +# data "kubernetes_resource" "cluster-kubeconfig" { +# api_version = "v1" +# kind = "Secret" +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_namespace.workspace, +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster +# ] +# } + +# This is generated from the vcluster... +# Need to find a way for it to wait before running, so that the secret exists + +# We'll need to use the kubeconfig from above to provision the coder/pair environment +resource "kubernetes_manifest" "ingress_vcluster" { + manifest = { + "apiVersion" = "projectcontour.io/v1" + "kind" = "HTTPProxy" + "metadata" = { + "name" = "${data.coder_workspace.me.name}-apiserver" + "namespace" = data.coder_workspace.me.name + "annotations" = { + "projectcontour.io/ingress.class" = "contour-external" + } + } + "spec" = { + "tcpproxy" = { + "services" = [ + { + "name" = "${data.coder_workspace.me.name}" + "port" = 443 + }, + ] + } + "virtualhost" = { + "fqdn" = "${data.coder_workspace.me.name}.${var.base_domain}" + "tls" = { + "passthrough" = true + } + } + } + } +} + +resource "coder_app" "vcluster-apiserver" { + agent_id = coder_agent.main.id + name = "APIServer" + url = "https://kubernetes.default.svc:443" + relative_path = true + healthcheck { + url = "https://kubernetes.default.svc:443/healthz" + interval = 5 + threshold = 6 + } +} diff --git a/examples/templates/vcluster/cool.template.yaml b/examples/templates/vcluster/cool.template.yaml new file mode 100644 index 0000000000000..e4693053eec44 --- /dev/null +++ b/examples/templates/vcluster/cool.template.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coder + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: coder + namespace: default +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: code-server + namespace: default +spec: + selector: + matchLabels: + app: code-server + serviceName: code-server + template: + metadata: + labels: + app: code-server + spec: + serviceAccountName: coder + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: code-server + image: codercom/enterprise-base:ubuntu + command: ${coder_command} + securityContext: + runAsUser: 1000 + env: + - name: CODER_AGENT_TOKEN + value: ${coder_token} diff --git a/examples/templates/vcluster/vcluster.param.yaml b/examples/templates/vcluster/vcluster.param.yaml new file mode 100644 index 0000000000000..1a7045c0d21ac --- /dev/null +++ b/examples/templates/vcluster/vcluster.param.yaml @@ -0,0 +1 @@ +base_domain: sanskar.pair.sharing.io