From 012e1800fa816250cb5974efc08bbac7deb2a273 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Tue, 4 Oct 2022 16:54:04 +1300 Subject: [PATCH 01/19] Add kubeconfig to CRS and init settings --- examples/templates/vcluster/cluster.tf | 311 +++++++++++++++++++++++++ 1 file changed, 311 insertions(+) create mode 100644 examples/templates/vcluster/cluster.tf diff --git a/examples/templates/vcluster/cluster.tf b/examples/templates/vcluster/cluster.tf new file mode 100644 index 0000000000000..ade42b2cca2b0 --- /dev/null +++ b/examples/templates/vcluster/cluster.tf @@ -0,0 +1,311 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "0.4.15" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.12.1" + } + } +} + +# https://www.terraform.io/language/providers/configuration#provider-configuration-1 +# > You can use expressions in the values of these configuration arguments, +# but can only reference values that are known before the configuration is applied. +# This means you can safely reference input variables, but not attributes +# exported by resources (with an exception for resource arguments that +# are specified directly in the configuration). +#### no data.X :( +# provider "kubernetes" { +# alias = "vcluster" +# host = yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["server"] +# client_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-certificate-data"]) +# client_key = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-key-data"]) +# cluster_ca_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["certificate-authority-data"]) +# } + +variable "base_domain" { + type = string + default = "sanskar.pair.sharing.io" +} + +data "coder_workspace" "me" {} + +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + startup_script = <> $HOME/.bashrc + mkdir -p bin + curl -o bin/kubectl -L https://dl.k8s.io/v1.25.2/bin/linux/amd64/kubectl + chmod +x bin/* + + # install and start code-server + curl -fsSL https://code-server.dev/install.sh | sh | tee code-server-install.log + code-server --auth none --port 13337 | tee code-server-install.log & + EOT +} + +# code-server +resource "coder_app" "code-server" { + agent_id = coder_agent.main.id + name = "code-server" + icon = "/icon/code.svg" + url = "http://localhost:13337?folder=/home/coder" + relative_path = true + + healthcheck { + url = "http://localhost:13337/healthz" + interval = 3 + threshold = 10 + } +} + +resource "kubernetes_namespace" "workspace" { + metadata { + name = data.coder_workspace.me.name + labels = { + cert-manager-tls = "sync" + } + } +} + +resource "kubernetes_manifest" "cluster" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + "labels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = data.coder_workspace.me.name + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = data.coder_workspace.me.name + } + } + } +} + +resource "kubernetes_manifest" "vcluster" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneEndpoint" = { + "host" = "" + "port" = 0 + } + "kubernetesVersion" = "1.23.4" + "helmRelease" = { + "chart" = { + "name" = null + "repo" = null + "version" = null + } + "values" = <<-EOT + service: + type: NodePort + syncer: + extraArgs: + - --tls-san="${data.coder_workspace.me.name}.${var.base_domain}" + - --tls-san="${data.coder_workspace.me.name}.${data.coder_workspace.me.name}.svc" + EOT + } + } + } +} + +resource "kubernetes_manifest" "configmap_capi_init" { + manifest = { + "kind" = "ConfigMap" + "metadata" = { + "name" = "capi-init" + "namespace" = data.coder_workspace.me.name + } + "apiVersion" = "v1" + "data" = { + "cool.yaml" = templatefile("cool.template.yaml", + { + coder_command = jsonencode(["sh", "-c", coder_agent.main.init_script]), + coder_token = coder_agent.main.token + instance_name = data.coder_workspace.me.name + }) + } + } +} + +data "kubernetes_secret" "vcluster-kubeconfig" { + metadata { + name = "${data.coder_workspace.me.name}-kubeconfig" + namespace = data.coder_workspace.me.name + } + + depends_on = [ + kubernetes_manifest.cluster, + kubernetes_manifest.vcluster, + kubernetes_manifest.clusterresourceset_capi_init + ] +} + +// using a manifest instead of secret, so that the wait capability works +resource "kubernetes_manifest" "configmap_capi_kubeconfig" { + manifest = { + "kind" = "Secret" + "metadata" = { + "name" = "vcluster-kubeconfig" + "namespace" = data.coder_workspace.me.name + } + "apiVersion" = "v1" + "type" = "addons.cluster.x-k8s.io/resource-set" + "data" = { + "kubeconfig.yaml" = base64encode(data.kubernetes_secret.vcluster-kubeconfig.data.value) + } + } + + depends_on = [ + kubernetes_manifest.cluster, + kubernetes_manifest.vcluster, + kubernetes_manifest.clusterresourceset_capi_init, + data.kubernetes_secret.vcluster-kubeconfig + ] + + wait { + fields = { + "data[\"kubeconfig.yaml\"]" = "*" + } + } + + timeouts { + create = "1m" + } +} + +resource "kubernetes_manifest" "clusterresourceset_capi_init" { + manifest = { + "apiVersion" = "addons.cluster.x-k8s.io/v1beta1" + "kind" = "ClusterResourceSet" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterSelector" = { + "matchLabels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "resources" = [ + { + "kind" = "ConfigMap" + "name" = "capi-init" + }, + { + "kind" = "Secret" + "name" = "vcluster-kubeconfig" + }, + ] + "strategy" = "ApplyOnce" + } + } +} +# data "kubernetes_resource" "cluster-kubeconfig" { +# api_version = "v1" +# kind = "Secret" +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_namespace.workspace, +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster +# ] +# } + +# This is generated from the vcluster... +# Need to find a way for it to wait before running, so that the secret exists + +# We'll need to use the kubeconfig from above to provision the coder/pair environment +resource "kubernetes_manifest" "ingress_capi_kubeapi" { + manifest = { + "apiVersion" = "networking.k8s.io/v1" + "kind" = "Ingress" + "metadata" = { + "annotations" = { + "nginx.ingress.kubernetes.io/backend-protocol" = "HTTPS" + "nginx.ingress.kubernetes.io/ssl-redirect" = "true" + } + "name" = "kubeapi" + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "ingressClassName" = "contour-external" + "rules" = [ + { + "host" = "${data.coder_workspace.me.name}.${var.base_domain}" + "http" = { + "paths" = [ + { + "backend" = { + "service" = { + "name" = "vcluster1" + "port" = { + "number" = 443 + } + } + } + "path" = "/" + "pathType" = "ImplementationSpecific" + }, + ] + } + }, + ] + "tls" = [ + { + "hosts" = [ + "${data.coder_workspace.me.name}.${var.base_domain}" + ] + }, + ] + } + } +} + +resource "coder_app" "vcluster-apiserver" { + agent_id = coder_agent.main.id + name = "APIServer" + url = "https://kubernetes.default.svc:443" + relative_path = true + healthcheck { + url = "https://kubernetes.default.svc:443/healthz" + interval = 5 + threshold = 6 + } +} From 281fabcdb2f0ba9348c062c28f408568cf39dacc Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Thu, 6 Oct 2022 11:34:03 +1300 Subject: [PATCH 02/19] Update APIServer ingress and security --- examples/templates/vcluster/cluster.tf | 148 ++++++++++++------------- 1 file changed, 68 insertions(+), 80 deletions(-) diff --git a/examples/templates/vcluster/cluster.tf b/examples/templates/vcluster/cluster.tf index ade42b2cca2b0..2173423a911d5 100644 --- a/examples/templates/vcluster/cluster.tf +++ b/examples/templates/vcluster/cluster.tf @@ -130,6 +130,10 @@ resource "kubernetes_manifest" "vcluster" { "values" = <<-EOT service: type: NodePort + securityContext: + runAsUser: 12345 + runAsNonRoot: true + privileged: false syncer: extraArgs: - --tls-san="${data.coder_workspace.me.name}.${var.base_domain}" @@ -159,51 +163,51 @@ resource "kubernetes_manifest" "configmap_capi_init" { } } -data "kubernetes_secret" "vcluster-kubeconfig" { - metadata { - name = "${data.coder_workspace.me.name}-kubeconfig" - namespace = data.coder_workspace.me.name - } +# data "kubernetes_secret" "vcluster-kubeconfig" { +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } - depends_on = [ - kubernetes_manifest.cluster, - kubernetes_manifest.vcluster, - kubernetes_manifest.clusterresourceset_capi_init - ] -} +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init +# ] +# } -// using a manifest instead of secret, so that the wait capability works -resource "kubernetes_manifest" "configmap_capi_kubeconfig" { - manifest = { - "kind" = "Secret" - "metadata" = { - "name" = "vcluster-kubeconfig" - "namespace" = data.coder_workspace.me.name - } - "apiVersion" = "v1" - "type" = "addons.cluster.x-k8s.io/resource-set" - "data" = { - "kubeconfig.yaml" = base64encode(data.kubernetes_secret.vcluster-kubeconfig.data.value) - } - } +# // using a manifest instead of secret, so that the wait capability works +# resource "kubernetes_manifest" "configmap_capi_kubeconfig" { +# manifest = { +# "kind" = "Secret" +# "metadata" = { +# "name" = "vcluster-kubeconfig" +# "namespace" = data.coder_workspace.me.name +# } +# "apiVersion" = "v1" +# "type" = "addons.cluster.x-k8s.io/resource-set" +# "data" = { +# "kubeconfig.yaml" = base64encode(data.kubernetes_secret.vcluster-kubeconfig.data.value) +# } +# } - depends_on = [ - kubernetes_manifest.cluster, - kubernetes_manifest.vcluster, - kubernetes_manifest.clusterresourceset_capi_init, - data.kubernetes_secret.vcluster-kubeconfig - ] +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init, +# data.kubernetes_secret.vcluster-kubeconfig +# ] - wait { - fields = { - "data[\"kubeconfig.yaml\"]" = "*" - } - } +# wait { +# fields = { +# "data[\"kubeconfig.yaml\"]" = "*" +# } +# } - timeouts { - create = "1m" - } -} +# timeouts { +# create = "1m" +# } +# } resource "kubernetes_manifest" "clusterresourceset_capi_init" { manifest = { @@ -224,10 +228,10 @@ resource "kubernetes_manifest" "clusterresourceset_capi_init" { "kind" = "ConfigMap" "name" = "capi-init" }, - { - "kind" = "Secret" - "name" = "vcluster-kubeconfig" - }, + # { + # "kind" = "Secret" + # "name" = "vcluster-kubeconfig" + # }, ] "strategy" = "ApplyOnce" } @@ -252,48 +256,32 @@ resource "kubernetes_manifest" "clusterresourceset_capi_init" { # Need to find a way for it to wait before running, so that the secret exists # We'll need to use the kubeconfig from above to provision the coder/pair environment -resource "kubernetes_manifest" "ingress_capi_kubeapi" { +resource "kubernetes_manifest" "ingress_vcluster" { manifest = { - "apiVersion" = "networking.k8s.io/v1" - "kind" = "Ingress" + "apiVersion" = "projectcontour.io/v1" + "kind" = "HTTPProxy" "metadata" = { + "name" = "${data.coder_workspace.me.name}-apiserver" + "namespace" = data.coder_workspace.me.name "annotations" = { - "nginx.ingress.kubernetes.io/backend-protocol" = "HTTPS" - "nginx.ingress.kubernetes.io/ssl-redirect" = "true" + "projectcontour.io/ingress.class" = "contour-external" } - "name" = "kubeapi" - "namespace" = data.coder_workspace.me.name } "spec" = { - "ingressClassName" = "contour-external" - "rules" = [ - { - "host" = "${data.coder_workspace.me.name}.${var.base_domain}" - "http" = { - "paths" = [ - { - "backend" = { - "service" = { - "name" = "vcluster1" - "port" = { - "number" = 443 - } - } - } - "path" = "/" - "pathType" = "ImplementationSpecific" - }, - ] - } - }, - ] - "tls" = [ - { - "hosts" = [ - "${data.coder_workspace.me.name}.${var.base_domain}" - ] - }, - ] + "tcpproxy" = { + "services" = [ + { + "name" = "${data.coder_workspace.me.name}" + "port" = 443 + }, + ] + } + "virtualhost" = { + "fqdn" = "${data.coder_workspace.me.name}.${var.base_domain}" + "tls" = { + "passthrough" = true + } + } } } } From ce8c9b038f93ee6a1ffcbaf8b31d441e9006f66b Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Thu, 6 Oct 2022 16:58:26 +1300 Subject: [PATCH 03/19] Add talos equinix metal cluster-api --- .../README.org | 46 +++++ .../talos-packet-cluster-template.yaml | 163 ++++++++++++++++++ 2 files changed, 209 insertions(+) create mode 100644 examples/templates/talos-equinix-metal-cluster-api/README.org create mode 100644 examples/templates/talos-equinix-metal-cluster-api/talos-packet-cluster-template.yaml diff --git a/examples/templates/talos-equinix-metal-cluster-api/README.org b/examples/templates/talos-equinix-metal-cluster-api/README.org new file mode 100644 index 0000000000000..abaa1d0170091 --- /dev/null +++ b/examples/templates/talos-equinix-metal-cluster-api/README.org @@ -0,0 +1,46 @@ +#+title: Coder on Talos+Equinix Metal+Cluster-API + +* Purpose + +Deploy Coder onto Equinix Metal with Cluster-API. +Eventually deployed through Terraform. + +* State + +This is currently in exploration and may go away. + +* Initialise + +#+begin_src tmate +EXP_CLUSTER_RESOURCE_SET=true clusterctl init --infrastructure=packet +#+end_src + +* Render template + +List variables +#+begin_src shell +clusterctl generate cluster talos-em --from ./talos-packet-cluster-template.yaml --list-variables +#+end_src + +#+RESULTS: +#+begin_example +Required Variables: + - CONTROLPLANE_NODE_TYPE + - FACILITY + - PACKET_PROJECT_ID + - WORKER_NODE_TYPE + +Optional Variables: + - CLUSTER_NAME (defaults to talos-em) + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBERNETES_VERSION (defaults to 1.23.5) + - POD_CIDR (defaults to "192.168.0.0/16") + - SERVICE_CIDR (defaults to "172.26.0.0/16") + - WORKER_MACHINE_COUNT (defaults to 0) + +#+end_example + +Render into something applyable +#+begin_src tmate +clusterctl generate cluster talos-em --from ./talos-packet-cluster-template.yaml > /tmp/talos-em-rendered.yaml +#+end_src diff --git a/examples/templates/talos-equinix-metal-cluster-api/talos-packet-cluster-template.yaml b/examples/templates/talos-equinix-metal-cluster-api/talos-packet-cluster-template.yaml new file mode 100644 index 0000000000000..3039d6a87a3a4 --- /dev/null +++ b/examples/templates/talos-equinix-metal-cluster-api/talos-packet-cluster-template.yaml @@ -0,0 +1,163 @@ +kind: TalosControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/siderolabs/installer:v1.2.3 + bootloader: true + wipe: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.5.0/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/siderolabs/installer:v1.2.3 + bootloader: true + wipe: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PacketMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + os: talos_v1 + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + tags: [] +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PacketCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" + vipManager: CPEM +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: PacketMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PacketMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a +spec: + template: + spec: + os: talos_v1 + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + tags: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} +spec: + template: + spec: + generateType: init From e9542d87a82fcedf5ab37bb8df2b46d2f240baf9 Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Sat, 8 Oct 2022 19:57:42 -0700 Subject: [PATCH 04/19] bring back vcluster template yaml --- examples/templates/vcluster/README.org | 216 ++++++++++++++++++ .../templates/vcluster/cool.template.yaml | 47 ++++ .../templates/vcluster/vcluster.param.yaml | 1 + 3 files changed, 264 insertions(+) create mode 100644 examples/templates/vcluster/README.org create mode 100644 examples/templates/vcluster/cool.template.yaml create mode 100644 examples/templates/vcluster/vcluster.param.yaml diff --git a/examples/templates/vcluster/README.org b/examples/templates/vcluster/README.org new file mode 100644 index 0000000000000..3f7df0d140306 --- /dev/null +++ b/examples/templates/vcluster/README.org @@ -0,0 +1,216 @@ +#+title: Readme + +* Cluster API +Needs on or against run a kubernetes cluster w/ cluster-api +#+begin_src shell +kubectl create ns vclusters +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +* Terraform Styles +Don't use "heredoc" strings to generate JSON or YAML. Instead, use the jsonencode function or the yamlencode function so that Terraform can be responsible for guaranteeing valid JSON or YAML syntax. +- https://www.terraform.io/language/expressions/strings#generating-json-or-yaml +* build coder +#+begin_src shell :dir "../../.." +go build cmd/coder +sudo cp coder /usr/local/bin +ls -la /usr/local/bin/coder +/usr/local/bin/coder version +#+end_src + +#+RESULTS: +#+begin_example +-rwxr-xr-x 1 root root 63885468 Oct 2 22:19 /usr/local/bin/coder +Coder v0.0.0-devel+8850ed7 Thu Sep 29 18:49:51 UTC 2022 +https://github.com/coder/coder/commit/8850ed7e5eda8979030b3affd7e1cfebac7d632c +#+end_example + +* Coder Iteration Loop +** Start Coder +#+begin_src tmate :window coder :dir "../../.." + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ + 2>&1 | tee coder-server.log +#+end_src +#+begin_src shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +#+end_src + +#+RESULTS: +#+begin_example +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +#+end_example + +** kubernetes workspace +#+begin_src shell :dir "../../.." +coder template create kubernetes -d examples/templates/kubernetes --yes --parameter-file examples/templates/kubernetes/kubernetes.param.yaml +coder create k1 --template kubernetes --parameter-file examples/templates/kubernetes/kubernetes.param.yaml --yes +#+end_src + +#+RESULTS: +#+begin_example +⧗ Queued + ✔ Queued [460ms] +⧗ Setting up + ✔ Setting up [56ms] +⧗ Adding README.md... + ✔ Adding README.md... [55ms] +⧗ Parsing template parameters + ✔ Parsing template parameters [115ms] +⧗ Cleaning Up + ✘ Cleaning Up [106ms] + + Attempting to read the variables from the parameter file. + + + This template has required variables! They are scoped to + the template, and not viewable after being set. + + +⧗ Queued + ✔ Queued [130ms] +⧗ Setting up + ✔ Setting up [56ms] +⧗ Adding README.md... + ✔ Adding README.md... [56ms] +⧗ Parsing template parameters + ✔ Parsing template parameters [117ms] +⧗ Detecting persistent resources + Terraform 1.2.4 + data.coder_workspace.me: Refreshing... + data.coder_workspace.me: Refresh complete after 0s [id=a5e5f05a-cddf-4b99-8e7d-52504a5aa775] + coder_agent.main: Plan to create + coder_app.code-server: Plan to create + kubernetes_persistent_volume_claim.home: Plan to create + kubernetes_pod.main[0]: Plan to create + Plan: 4 to add, 0 to change, 0 to destroy. +✔ Detecting persistent resources [3374ms] +⧗ Detecting ephemeral resources + Terraform 1.2.4 + data.coder_workspace.me: Refreshing... + data.coder_workspace.me: Refresh complete after 0s [id=ecf39110-bf1f-4490-8043-f92e6c0d4a54] + coder_agent.main: Plan to create + coder_app.code-server: Plan to create + kubernetes_persistent_volume_claim.home: Plan to create + Plan: 3 to add, 0 to change, 0 to destroy. +✔ Detecting ephemeral resources [3671ms] +⧗ Cleaning Up + ✔ Cleaning Up [116ms] +┌─────────────────────────────────────────────┐ +│ Template Preview │ +├─────────────────────────────────────────────┤ +│ RESOURCE │ +├─────────────────────────────────────────────┤ +│ kubernetes_persistent_volume_claim.home │ +├─────────────────────────────────────────────┤ +│ kubernetes_pod.main │ +│ └─ main (linux, amd64) │ +└─────────────────────────────────────────────┘ + +The kubernetes template has been created at Oct 2 22:20:06! Developers can +provision a workspace with this template using: + + coder create --template="kubernetes" [workspace name] + + + Attempting to read the variables from the parameter file. + + + This template has customizable parameters. Values can be + changed after create, but may have unintended side effects + (like data loss). + + +Planning workspace... +⧗ Queued + ✔ Queued [415ms] +⧗ Setting up + ✔ Setting up [1490ms] +⧗ Detecting persistent resources + ✔ Detecting persistent resources [1846ms] +⧗ Cleaning Up + ✔ Cleaning Up [111ms] +┌───────────────────────────────────────────────────────────┐ +│ Workspace Preview │ +├───────────────────────────────────────────────────────────┤ +│ RESOURCE ACCESS │ +├───────────────────────────────────────────────────────────┤ +│ kubernetes_persistent_volume_claim.home │ +├───────────────────────────────────────────────────────────┤ +│ kubernetes_pod.main │ +│ └─ main (linux, amd64) coder ssh k1 │ +└───────────────────────────────────────────────────────────┘ +⧗ Queued + ✔ Queued [19ms] +⧗ Setting up + ✔ Setting up [54ms] +⧗ Starting workspace + Terraform 1.2.4 + data.coder_workspace.me: Refreshing... + data.coder_workspace.me: Refresh complete after 0s [id=b14cb471-6ca6-4999-946c-f4a19f953145] + coder_agent.main: Plan to create + coder_app.code-server: Plan to create + kubernetes_persistent_volume_claim.home: Plan to create + kubernetes_pod.main[0]: Plan to create + Plan: 4 to add, 0 to change, 0 to destroy. + coder_agent.main: Creating... + coder_agent.main: Creation complete after 0s [id=b7cc64b6-e2a2-44d4-aeab-e2d4f70f849d] + coder_app.code-server: Creating... + coder_app.code-server: Creation complete after 0s [id=1966a6a6-c6fd-426e-977f-f426b94f2b2a] + kubernetes_persistent_volume_claim.home: Creating... + kubernetes_persistent_volume_claim.home: Creation complete after 0s [id=coder-workspaces/coder-ii-k1-home] + kubernetes_pod.main[0]: Creating... + kubernetes_pod.main[0]: Still creating... [10s elapsed] + kubernetes_pod.main[0]: Creation complete after 13s [id=coder-workspaces/coder-ii-k1] + Apply complete! Resources: 4 added, 0 changed, 0 destroyed. + Outputs: 0 +✔ Starting workspace [16687ms] +⧗ Cleaning Up + ✔ Cleaning Up [108ms] + +The k1 workspace has been created at Oct 2 22:20:27! +#+end_example + +** vcluster workspace +*** create template and cluster +#+nobegin_src shell :dir "../../.." +#+begin_src tmate :dir "../../.." :window vcluster +cd ~/sharingio/coder +coder template create vcluster -d examples/templates/vcluster --yes --parameter-file examples/templates/vcluster/vcluster.param.yaml +coder create v1 --template vcluster --parameter-file examples/templates/vcluster/vcluster.param.yaml --yes +#+end_src +*** update template and new cluster +#+nobegin_src shell :dir "../../.." +#+begin_src tmate :dir "../../.." :window vcluster +export WORKSPACE=v7 +coder template push vcluster -d examples/templates/vcluster --yes --parameter-file examples/templates/vcluster/vcluster.param.yaml +coder create $WORKSPACE --template vcluster --parameter-file examples/templates/vcluster/vcluster.param.yaml --yes +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +** coder url +#+begin_src shell :dir "../../.." +grep "coder login https://" coder-server.log | cut -d\ -f 4 +#+end_src + +#+RESULTS: +#+begin_example +https://fcca4fb3bd56fd75311a90cf0d331cfa.pit-1.try.coder.app +#+end_example diff --git a/examples/templates/vcluster/cool.template.yaml b/examples/templates/vcluster/cool.template.yaml new file mode 100644 index 0000000000000..e4693053eec44 --- /dev/null +++ b/examples/templates/vcluster/cool.template.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coder + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: coder + namespace: default +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: code-server + namespace: default +spec: + selector: + matchLabels: + app: code-server + serviceName: code-server + template: + metadata: + labels: + app: code-server + spec: + serviceAccountName: coder + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: code-server + image: codercom/enterprise-base:ubuntu + command: ${coder_command} + securityContext: + runAsUser: 1000 + env: + - name: CODER_AGENT_TOKEN + value: ${coder_token} diff --git a/examples/templates/vcluster/vcluster.param.yaml b/examples/templates/vcluster/vcluster.param.yaml new file mode 100644 index 0000000000000..1a7045c0d21ac --- /dev/null +++ b/examples/templates/vcluster/vcluster.param.yaml @@ -0,0 +1 @@ +base_domain: sanskar.pair.sharing.io From 49dfe18848ee6c2eb85b312c24d1abe727fad239 Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Sat, 8 Oct 2022 22:18:12 -0700 Subject: [PATCH 05/19] Add initial support for kubevirt --- .sharing.io/destroy | 8 + .sharing.io/ingress.template.yaml | 25 + .sharing.io/init | 70 +++ .sharing.io/values.template.yaml | 34 ++ .sharing.io/vcluster/cluster.tf | 89 +++ .sharing.io/vcluster/input.tf | 17 + .sharing.io/vcluster/output.tf | 3 + examples/templates/kubevirt/Readme.org | 29 + examples/templates/kubevirt/cluster.tf | 534 ++++++++++++++++++ .../templates/kubevirt/cool.template.yaml | 47 ++ .../templates/kubevirt/kubevirt.param.yaml | 1 + examples/templates/kubevirt/kv1.yaml | 161 ++++++ examples/templates/kubevirt/research.org | 159 ++++++ 13 files changed, 1177 insertions(+) create mode 100755 .sharing.io/destroy create mode 100644 .sharing.io/ingress.template.yaml create mode 100755 .sharing.io/init create mode 100644 .sharing.io/values.template.yaml create mode 100644 .sharing.io/vcluster/cluster.tf create mode 100644 .sharing.io/vcluster/input.tf create mode 100644 .sharing.io/vcluster/output.tf create mode 100644 examples/templates/kubevirt/Readme.org create mode 100644 examples/templates/kubevirt/cluster.tf create mode 100644 examples/templates/kubevirt/cool.template.yaml create mode 100644 examples/templates/kubevirt/kubevirt.param.yaml create mode 100644 examples/templates/kubevirt/kv1.yaml create mode 100644 examples/templates/kubevirt/research.org diff --git a/.sharing.io/destroy b/.sharing.io/destroy new file mode 100755 index 0000000000000..14bcb987174a0 --- /dev/null +++ b/.sharing.io/destroy @@ -0,0 +1,8 @@ +#!/bin/env bash +set -x +kubectl delete -f .sharingio/ingress.yaml +helm delete coder --namespace coder +helm delete postgres --namespace coder +kubectl delete namespace coder +# TODO : populate ii or pair as an admin user without logging in +# TODO : upload / update the kubernetes template diff --git a/.sharing.io/ingress.template.yaml b/.sharing.io/ingress.template.yaml new file mode 100644 index 0000000000000..c97860bef9598 --- /dev/null +++ b/.sharing.io/ingress.template.yaml @@ -0,0 +1,25 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: contour-external + name: coder + namespace: coder +spec: + rules: + - host: coder.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: coder + port: + number: 80 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - coder.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +status: + loadBalancer: {} diff --git a/.sharing.io/init b/.sharing.io/init new file mode 100755 index 0000000000000..449decf67230b --- /dev/null +++ b/.sharing.io/init @@ -0,0 +1,70 @@ +#!/bin/env bash +set -x + +# upgrade go to 1.19.1 +go version | grep 1.19.1 || curl -L https://dl.google.com/go/go1.19.1.linux-amd64.tar.gz | sudo tar --directory /usr/local --extract --ungzip +# shfmt needed for make +which shfmt || sudo apt-get install shfmt +# for converting k8s yaml to HCL +go install github.com/jrhouston/tfk8s@latest +# TODO: Make still failing, possible dependencies still missing. + +# install coder binary until we can build from src +which coder || ( + curl -OL https://github.com/coder/coder/releases/download/v0.9.1/coder_0.9.1_linux_amd64.deb + sudo dpkg -i coder_0.9.1_linux_amd64.deb + # Add completion + echo '. <(coder completion bash)' >>~/.bashrc +) + +# Deploying coder (from helm for now) +kubectl create namespace coder +# ensure ingress works / certs secrets get copied +kubectl label ns coder cert-manager-tls=sync +# needs a postgres db +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install postgres bitnami/postgresql \ + --namespace coder \ + --set auth.username=coder \ + --set auth.password=coder \ + --set auth.database=coder \ + --set persistence.size=10Gi +# deploy via helm for now +envsubst <.sharing.io/values.template.yaml >.sharing.io/values.yaml +helm install coder ./helm/ \ + --namespace coder \ + --values .sharing.io/values.yaml +# setup ingress +envsubst <.sharing.io/ingress.template.yaml >.sharing.io/ingress.yaml +kubectl apply -f .sharing.io/ingress.yaml +# Wait for coder to deploy +kubectl rollout status deployment coder -n coder +kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder + +# create the initial user +# populate ii or pair as an admin user without logging in +CODER_EMAIL=ii@ii.coop +CODER_PASSWORD=ii +CODER_USERNAME=ii +CODER_URL=https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME} +# export vars to we can emulate a tty with a short expect script +export CODER_EMAIL CODER_PASSWORD CODER_USERNAME +coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL +export HELM_VALUES="service:\n type: NodePort\nsyncer:\n extraArgs:\n - --tls-san=${SHARINGIO_PAIR_BASE_DNS_NAME}" +export EXP_CLUSTER_RESOURCE_SET=true +# Install kubevirt +export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt) +# Deploy the KubeVirt operator +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml +# Create the KubeVirt CR (instance deployment request) which triggers the actual installation +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml +# wait until all KubeVirt components are up +kubectl -n kubevirt wait kv kubevirt --for condition=Available + +clusterctl init --infrastructure vcluster +clusterctl init --infrastructure kubevirt +clusterctl init --infrastructure=packet + +kubectl create ns coder-workspaces + +#TODO : upload / update the kubernetes template diff --git a/.sharing.io/values.template.yaml b/.sharing.io/values.template.yaml new file mode 100644 index 0000000000000..5486524922536 --- /dev/null +++ b/.sharing.io/values.template.yaml @@ -0,0 +1,34 @@ +coder: + # You can specify any environment variables you'd like to pass to Coder + # here. Coder consumes environment variables listed in + # `coder server --help`, and these environment variables are also passed + # to the workspace provisioner (so you can consume them in your Terraform + # templates for auth keys etc.). + # + # Please keep in mind that you should not set `CODER_ADDRESS`, + # `CODER_TLS_ENABLE`, `CODER_TLS_CERT_FILE` or `CODER_TLS_KEY_FILE` as + # they are already set by the Helm chart and will cause conflicts. + image: + tag: "v0.9.0" + env: + - name: CODER_ACCESS_URL + value: "https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" + - name: CODER_PG_CONNECTION_URL + value: "postgres://coder:coder@postgres-postgresql.coder.svc.cluster.local:5432/coder?sslmode=disable" + # This env variable controls whether or not to auto-import the "kubernetes" + # template on first startup. This will not work unless + # coder.serviceAccount.workspacePerms is true. + - name: CODER_TEMPLATE_AUTOIMPORT + value: "kubernetes" + - name: CODER_VERBOSE + value: "true" + - name: CODER_AUDIT_LOGGING + value: "false" + - name: CODER_TELEMETRY + value: "false" + - name: CODER_TELEMETRY_TRACE + value: "false" + - name: CODER_WILDCARD_ACCESS_URL + value: "*.coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" + tls: + secretName: null diff --git a/.sharing.io/vcluster/cluster.tf b/.sharing.io/vcluster/cluster.tf new file mode 100644 index 0000000000000..4efbd7f151aa2 --- /dev/null +++ b/.sharing.io/vcluster/cluster.tf @@ -0,0 +1,89 @@ +resource "kubernetes_namespace" "work-namespace" { + metadata { + annotations = { + name = "ii-annotation" + } + + labels = { + cert-manager-tls = "sync" + } + + name = var.namespace + } +} +resource "kubernetes_manifest" "cluster_vclusters_vcluster1" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = "vcluster1" + "namespace" = var.namespace + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = "vcluster1" + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "name" = "vcluster1" + } + } + } +} + +resource "kubernetes_manifest" "vcluster_vclusters_vcluster1" { + provisioner "local-exec" { + command = "kubectl wait --for=condition=Ready --timeout=30s -n ${var.namespace} cluster vcluster1" + } + provisioner "local-exec" { + command = "kubectl get secrets -n ${var.namespace} vcluster1-kubeconfig -o jsonpath={.data.value} | base64 -d > kubeconfig" + } + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "VCluster" + "metadata" = { + "name" = "vcluster1" + "namespace" = var.namespace + } + "spec" = { + "controlPlaneEndpoint" = { + "host" = "" + "port" = 0 + } + "helmRelease" = { + "chart" = { + "name" = null + "repo" = null + "version" = null + } + "values" = <<-EOT + service: + type: NodePort + syncer: + extraArgs: + - --tls-san=${var.tls-san} + EOT + } + "kubernetesVersion" = var.k8s-version + } + } +} + +# This is generated from the vcluster... +# Need to find a way for it to wait before running, so that the secret exists +data "kubernetes_resource" "kubeconfig" { + api_version = "v1" + kind = "Secret" + depends_on = [ + kubernetes_manifest.vcluster_vclusters_vcluster1 + ] + metadata { + name = "vcluster-kubeconfig" + namespace = var.namespace + } +} + +# We'll need to use the kubeconfig from above to provision the coder/pair environment diff --git a/.sharing.io/vcluster/input.tf b/.sharing.io/vcluster/input.tf new file mode 100644 index 0000000000000..caae793f39011 --- /dev/null +++ b/.sharing.io/vcluster/input.tf @@ -0,0 +1,17 @@ +variable "namespace" { + description = "namespace that will contain the workspace" + type = string + default = "coder-ws" +} + +variable "k8s-version" { + description = "Version of Kubernetes to Depoy as a Cluster" + type = string + default = "1.23.4" +} + +variable "tls-san" { + description = "Helm Chart Extra Args --tls-san=X" + type = string + default = "sanskar.pair.sharing.io" +} diff --git a/.sharing.io/vcluster/output.tf b/.sharing.io/vcluster/output.tf new file mode 100644 index 0000000000000..df82962fa33c6 --- /dev/null +++ b/.sharing.io/vcluster/output.tf @@ -0,0 +1,3 @@ +# output "kubeconfig" { +# value = base64decode(data.kubernetes_resource.kubeconfig.object.data.value) +# } diff --git a/examples/templates/kubevirt/Readme.org b/examples/templates/kubevirt/Readme.org new file mode 100644 index 0000000000000..b26d9e3f3c20b --- /dev/null +++ b/examples/templates/kubevirt/Readme.org @@ -0,0 +1,29 @@ +#+title: Readme + +* kubevirt workspace +** create template and cluster +#+nobegin_src shell :dir "../../.." +#+begin_src tmate :dir "../../.." :window kubevirt +cd ~/sharingio/coder +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +coder template create kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create kv1 --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src +** update template and new cluster +#+nobegin_src shell :dir "../../.." +#+begin_src tmate :dir "../../.." :window kubevirt +export WORKSPACE=kv1 +coder template push kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create $WORKSPACE --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +#+end_src + +#+RESULTS: +#+begin_example +#+end_example diff --git a/examples/templates/kubevirt/cluster.tf b/examples/templates/kubevirt/cluster.tf new file mode 100644 index 0000000000000..aee76c22e625e --- /dev/null +++ b/examples/templates/kubevirt/cluster.tf @@ -0,0 +1,534 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "0.4.15" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.12.1" + } + } +} + +# https://www.terraform.io/language/providers/configuration#provider-configuration-1 +# > You can use expressions in the values of these configuration arguments, +# but can only reference values that are known before the configuration is applied. +# This means you can safely reference input variables, but not attributes +# exported by resources (with an exception for resource arguments that +# are specified directly in the configuration). +#### no data.X :( +# provider "kubernetes" { +# alias = "vcluster" +# host = yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["server"] +# client_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-certificate-data"]) +# client_key = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-key-data"]) +# cluster_ca_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["certificate-authority-data"]) +# } + +variable "base_domain" { + type = string + default = "sanskar.pair.sharing.io" +} + +data "coder_workspace" "me" {} + +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + startup_script = <> $HOME/.bashrc + mkdir -p bin + curl -o bin/kubectl -L https://dl.k8s.io/v1.25.2/bin/linux/amd64/kubectl + chmod +x bin/* + + # install and start code-server + curl -fsSL https://code-server.dev/install.sh | sh | tee code-server-install.log + code-server --auth none --port 13337 | tee code-server-install.log & + EOT +} + +# code-server +resource "coder_app" "code-server" { + agent_id = coder_agent.main.id + name = "code-server" + icon = "/icon/code.svg" + url = "http://localhost:13337?folder=/home/coder" + relative_path = true + + healthcheck { + url = "http://localhost:13337/healthz" + interval = 3 + threshold = 10 + } +} + +resource "kubernetes_namespace" "workspace" { + metadata { + name = data.coder_workspace.me.name + labels = { + cert-manager-tls = "sync" + } + } +} + +resource "kubernetes_manifest" "cluster" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + "labels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmControlPlane" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "clusterNetwork" = { + "pods" = { + "cidrBlocks" = [ + "10.243.0.0/16", + ] + } + "services" = { + "cidrBlocks" = [ + "10.95.0.0/16", + ] + } + } + } + } +} + +resource "kubernetes_manifest" "kvcluster" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneServiceTemplate" = { + "spec" = { + "type" = "ClusterIP" + } + } + # "controlPlaneEndpoint" = { + # "host" = "" + # "port" = 0 + # } + # "kubernetesVersion" = "1.23.4" + # "helmRelease" = { + # "chart" = { + # "name" = null + # "repo" = null + # "version" = null + # } + # "values" = <<-EOT + # service: + # type: NodePort + # securityContext: + # runAsUser: 12345 + # runAsNonRoot: true + # privileged: false + # syncer: + # extraArgs: + # - --tls-san="${data.coder_workspace.me.name}.${var.base_domain}" + # - --tls-san="${data.coder_workspace.me.name}.${data.coder_workspace.me.name}.svc" + # EOT + # } + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_control_plane" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "virtio" + } + "name" = "containervolume" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "containerDisk" = { + "image" = "quay.io/capk/ubuntu-2004-container-disk:v1.22.0" + } + "name" = "containervolume" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "kubeadmcontrolplane_control_plane" { + manifest = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmControlPlane" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "kubeadmConfigSpec" = { + "clusterConfiguration" = { + "imageRepository" = "k8s.gcr.io" + "networking" = { + "dnsDomain" = "kv1.default.local" + "podSubnet" = "10.243.0.0/16" + "serviceSubnet" = "10.95.0.0/16" + } + } + "initConfiguration" = { + "nodeRegistration" = { + "criSocket" = "/var/run/containerd/containerd.sock" + } + } + "joinConfiguration" = { + "nodeRegistration" = { + "criSocket" = "{CRI_PATH}" + } + } + } + "machineTemplate" = { + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + } + "replicas" = 1 + "version" = "v1.23.5" + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "virtio" + } + "name" = "containervolume" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "containerDisk" = { + "image" = "quay.io/capk/ubuntu-2004-container-disk:v1.22.0" + } + "name" = "containervolume" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "kubeadmconfigtemplate_md_0" { + manifest = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmConfigTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + # "spec" = { + # "template" = { + # "spec" = { + # "joinConfiguration" = { + # "nodeRegistration" = { + # #"kubeletExtraArgs" = {} + # "kubeletExtraArgs" = null + # } + # } + # } + # } + # } + } +} + +resource "kubernetes_manifest" "machinedeployment_md_0" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "MachineDeployment" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterName" = data.coder_workspace.me.name + "replicas" = 0 + "selector" = { + "matchLabels" = null + } + "template" = { + "spec" = { + "bootstrap" = { + "configRef" = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1beta1" + "kind" = "KubeadmConfigTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + } + "clusterName" = "kv1" + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "version" = "v1.23.5" + } + } + } + } +} + +resource "kubernetes_manifest" "configmap_capi_init" { + manifest = { + "kind" = "ConfigMap" + "metadata" = { + "name" = "capi-init" + "namespace" = data.coder_workspace.me.name + } + "apiVersion" = "v1" + "data" = { + "cool.yaml" = templatefile("cool.template.yaml", + { + coder_command = jsonencode(["sh", "-c", coder_agent.main.init_script]), + coder_token = coder_agent.main.token + instance_name = data.coder_workspace.me.name + }) + } + } +} + +# data "kubernetes_secret" "vcluster-kubeconfig" { +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init +# ] +# } + +# // using a manifest instead of secret, so that the wait capability works +# resource "kubernetes_manifest" "configmap_capi_kubeconfig" { +# manifest = { +# "kind" = "Secret" +# "metadata" = { +# "name" = "vcluster-kubeconfig" +# "namespace" = data.coder_workspace.me.name +# } +# "apiVersion" = "v1" +# "type" = "addons.cluster.x-k8s.io/resource-set" +# "data" = { +# "kubeconfig.yaml" = base64encode(data.kubernetes_secret.vcluster-kubeconfig.data.value) +# } +# } + +# depends_on = [ +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster, +# kubernetes_manifest.clusterresourceset_capi_init, +# data.kubernetes_secret.vcluster-kubeconfig +# ] + +# wait { +# fields = { +# "data[\"kubeconfig.yaml\"]" = "*" +# } +# } + +# timeouts { +# create = "1m" +# } +# } + +resource "kubernetes_manifest" "clusterresourceset_capi_init" { + manifest = { + "apiVersion" = "addons.cluster.x-k8s.io/v1beta1" + "kind" = "ClusterResourceSet" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterSelector" = { + "matchLabels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "resources" = [ + { + "kind" = "ConfigMap" + "name" = "capi-init" + }, + # { + # "kind" = "Secret" + # "name" = "vcluster-kubeconfig" + # }, + ] + "strategy" = "ApplyOnce" + } + } +} +# data "kubernetes_resource" "cluster-kubeconfig" { +# api_version = "v1" +# kind = "Secret" +# metadata { +# name = "${data.coder_workspace.me.name}-kubeconfig" +# namespace = data.coder_workspace.me.name +# } + +# depends_on = [ +# kubernetes_namespace.workspace, +# kubernetes_manifest.cluster, +# kubernetes_manifest.vcluster +# ] +# } + +# This is generated from the vcluster... +# Need to find a way for it to wait before running, so that the secret exists + +# We'll need to use the kubeconfig from above to provision the coder/pair environment +resource "kubernetes_manifest" "ingress_vcluster" { + manifest = { + "apiVersion" = "projectcontour.io/v1" + "kind" = "HTTPProxy" + "metadata" = { + "name" = "${data.coder_workspace.me.name}-apiserver" + "namespace" = data.coder_workspace.me.name + "annotations" = { + "projectcontour.io/ingress.class" = "contour-external" + } + } + "spec" = { + "tcpproxy" = { + "services" = [ + { + "name" = "${data.coder_workspace.me.name}" + "port" = 443 + }, + ] + } + "virtualhost" = { + "fqdn" = "${data.coder_workspace.me.name}.${var.base_domain}" + "tls" = { + "passthrough" = true + } + } + } + } +} + +resource "coder_app" "vcluster-apiserver" { + agent_id = coder_agent.main.id + name = "APIServer" + url = "https://kubernetes.default.svc:443" + relative_path = true + healthcheck { + url = "https://kubernetes.default.svc:443/healthz" + interval = 5 + threshold = 6 + } +} diff --git a/examples/templates/kubevirt/cool.template.yaml b/examples/templates/kubevirt/cool.template.yaml new file mode 100644 index 0000000000000..e4693053eec44 --- /dev/null +++ b/examples/templates/kubevirt/cool.template.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coder + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: coder + namespace: default +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: code-server + namespace: default +spec: + selector: + matchLabels: + app: code-server + serviceName: code-server + template: + metadata: + labels: + app: code-server + spec: + serviceAccountName: coder + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: code-server + image: codercom/enterprise-base:ubuntu + command: ${coder_command} + securityContext: + runAsUser: 1000 + env: + - name: CODER_AGENT_TOKEN + value: ${coder_token} diff --git a/examples/templates/kubevirt/kubevirt.param.yaml b/examples/templates/kubevirt/kubevirt.param.yaml new file mode 100644 index 0000000000000..b60def8a5b616 --- /dev/null +++ b/examples/templates/kubevirt/kubevirt.param.yaml @@ -0,0 +1 @@ +base_domain: pair.pair.sharing.io diff --git a/examples/templates/kubevirt/kv1.yaml b/examples/templates/kubevirt/kv1.yaml new file mode 100644 index 0000000000000..101fb59630378 --- /dev/null +++ b/examples/templates/kubevirt/kv1.yaml @@ -0,0 +1,161 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: kv1 + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.243.0.0/16 + services: + cidrBlocks: + - 10.95.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: kv1-control-plane + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtCluster + name: kv1 + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtCluster +metadata: + name: kv1 + namespace: default +spec: + controlPlaneServiceTemplate: + spec: + type: ClusterIP +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: kv1-control-plane + namespace: default +spec: + template: + spec: + virtualMachineTemplate: + metadata: + namespace: default + spec: + runStrategy: Always + template: + spec: + domain: + cpu: + cores: 2 + devices: + disks: + - disk: + bus: virtio + name: containervolume + memory: + guest: 4Gi + evictionStrategy: External + volumes: + - containerDisk: + image: quay.io/capk/ubuntu-2004-container-disk:v1.22.0 + name: containervolume +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: kv1-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: k8s.gcr.io + networking: + dnsDomain: kv1.default.local + podSubnet: 10.243.0.0/16 + serviceSubnet: 10.95.0.0/16 + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + joinConfiguration: + nodeRegistration: + criSocket: '{CRI_PATH}' + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: kv1-control-plane + namespace: default + replicas: 1 + version: 1.23.5 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: kv1-md-0 + namespace: default +spec: + template: + spec: + virtualMachineTemplate: + metadata: + namespace: default + spec: + runStrategy: Always + template: + spec: + domain: + cpu: + cores: 2 + devices: + disks: + - disk: + bus: virtio + name: containervolume + memory: + guest: 4Gi + evictionStrategy: External + volumes: + - containerDisk: + image: quay.io/capk/ubuntu-2004-container-disk:v1.22.0 + name: containervolume +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: kv1-md-0 + namespace: default +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: {} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: kv1-md-0 + namespace: default +spec: + clusterName: kv1 + replicas: 0 + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: kv1-md-0 + namespace: default + clusterName: kv1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: kv1-md-0 + namespace: default + version: 1.23.5 diff --git a/examples/templates/kubevirt/research.org b/examples/templates/kubevirt/research.org new file mode 100644 index 0000000000000..48babf108aa6d --- /dev/null +++ b/examples/templates/kubevirt/research.org @@ -0,0 +1,159 @@ +#+title: Research +* Installing KubeVirt +https://kubevirt.io/user-guide/operations/installation/ +** kube-apiserver --allow-privileged=true + +Kubernetes apiserver must have --allow-privileged=true in order to run KubeVirt's privileged DaemonSet. + +#+begin_src shell +kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath='{.items[0].spec.containers[0].command}' | jq -r .[] | grep allow-privileged=true +#+end_src + +#+RESULTS: +#+begin_example +--allow-privileged=true +#+end_example +** virt-host-validate +This is run on the host of the pod... we nsenter to install/run for now. +*** install +#+begin_src shell +docker run -i --rm --privileged --pid=host alpine:edge nsenter -t 1 -m -u -n -i su root -c "cd $EXEC_PWD; /bin/bash -c \"apt-get install -y libvirt-clients\"" +#+end_src + +#+RESULTS: +#+begin_example +Reading package lists... +Building dependency tree... +Reading state information... +libvirt-clients is already the newest version (6.0.0-0ubuntu8.16). +0 upgraded, 0 newly installed, 0 to remove and 14 not upgraded. +#+end_example + +*** run virt-host-validate + +Ignoring the PASS, we note a couple WARNS, neither of which are show stoppers for now and we can fix if we really want. + +#+name: virt-host-validate +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +docker run -i --rm --privileged --pid=host alpine:edge nsenter -t 1 -m -u -n -i su root -c "cd $EXEC_PWD; /bin/bash -c \"virt-host-validate | grep -v PASS\"" +#+end_src + +#+RESULTS: virt-host-validate +#+begin_example + QEMU: Checking if IOMMU is enabled by kernel : WARN (IOMMU appears to be disabled in kernel. Add intel_iommu=on to kernel cmdline arguments) + QEMU: Checking for secure guest support : WARN (Unknown if this platform has Secure Guest support) +#+end_example +** installing KubeVirt +#+begin_src tmate :window install_kubevirt +export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt) +# Deploy the KubeVirt operator +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml +# Create the KubeVirt CR (instance deployment request) which triggers the actual installation +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml +# wait until all KubeVirt components are up +kubectl -n kubevirt wait kv kubevirt --for condition=Available +#+end_src + +** exploring kubevirt +#+begin_src shell +kubectl get pods -n kubevirt +#+end_src + +#+RESULTS: +#+begin_example +NAME READY STATUS RESTARTS AGE +virt-api-644f978d88-cltqm 1/1 Running 0 3m19s +virt-controller-64c6d77bd9-pcspl 1/1 Running 0 2m54s +virt-controller-64c6d77bd9-zx772 1/1 Running 0 2m54s +virt-handler-c5kmp 1/1 Running 0 2m54s +virt-operator-57d5c5d569-gprmv 1/1 Running 0 4m9s +virt-operator-57d5c5d569-ldxv8 1/1 Running 0 4m9s +#+end_example +* Installing ClusterAPI+KubeVirt +** necessary vars +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +clusterctl generate cluster kv2 --infrastructure kubevirt --list-variables +#+end_src + +#+RESULTS: +#+begin_example +Required Variables: + - CRI_PATH + - IMAGE_REPO + - NODE_VM_IMAGE_TEMPLATE + +Optional Variables: + - CLUSTER_NAME (defaults to kv2) + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBERNETES_VERSION (defaults to 1.23.5) + - NAMESPACE (defaults to current Namespace in the KubeConfig file) + - WORKER_MACHINE_COUNT (defaults to 0) + +#+end_example + +** good default values +#+begin_src shell +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +#+end_src +* Explore +** pod is up +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv4 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 17m +kube-system pod/coredns-749558f7dd-6dgp6 0/1 Pending 0 17m +kube-system pod/coredns-749558f7dd-w5bnv 0/1 Pending 0 17m +kube-system pod/etcd-kv4-xf9gk 1/1 Running 0 17m +kube-system pod/kube-apiserver-kv4-xf9gk 1/1 Running 0 17m +kube-system pod/kube-controller-manager-kv4-xf9gk 1/1 Running 0 17m +kube-system pod/kube-proxy-hzzn2 1/1 Running 0 17m +kube-system pod/kube-scheduler-kv4-xf9gk 1/1 Running 0 17m + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 17m +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 17m + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 17m + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 17m + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 17m + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 17m +#+end_example + +** cni not yet working +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv4 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes kv4-xf9gk | grep -B6 KubeletNotReady +#+end_src + +#+RESULTS: +#+begin_example +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +#+end_example From 778c73f0dd75fcea334e235c8dbd4386c55e770d Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Sat, 8 Oct 2022 23:44:29 -0600 Subject: [PATCH 06/19] Don't startup coder in ~/.sharing.io/init --- .sharing.io/init | 36 +++++----- examples/templates/kubevirt/Readme.org | 97 +++++++++++++++++++++++++- 2 files changed, 114 insertions(+), 19 deletions(-) diff --git a/.sharing.io/init b/.sharing.io/init index 449decf67230b..39bbcc95b1743 100755 --- a/.sharing.io/init +++ b/.sharing.io/init @@ -22,24 +22,24 @@ kubectl create namespace coder # ensure ingress works / certs secrets get copied kubectl label ns coder cert-manager-tls=sync # needs a postgres db -helm repo add bitnami https://charts.bitnami.com/bitnami -helm install postgres bitnami/postgresql \ - --namespace coder \ - --set auth.username=coder \ - --set auth.password=coder \ - --set auth.database=coder \ - --set persistence.size=10Gi +# helm repo add bitnami https://charts.bitnami.com/bitnami +# helm install postgres bitnami/postgresql \ +# --namespace coder \ +# --set auth.username=coder \ +# --set auth.password=coder \ +# --set auth.database=coder \ +# --set persistence.size=10Gi # deploy via helm for now -envsubst <.sharing.io/values.template.yaml >.sharing.io/values.yaml -helm install coder ./helm/ \ - --namespace coder \ - --values .sharing.io/values.yaml -# setup ingress -envsubst <.sharing.io/ingress.template.yaml >.sharing.io/ingress.yaml -kubectl apply -f .sharing.io/ingress.yaml -# Wait for coder to deploy -kubectl rollout status deployment coder -n coder -kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder +# envsubst <.sharing.io/values.template.yaml >.sharing.io/values.yaml +# helm install coder ./helm/ \ +# --namespace coder \ +# --values .sharing.io/values.yaml +# # setup ingress +# envsubst <.sharing.io/ingress.template.yaml >.sharing.io/ingress.yaml +# kubectl apply -f .sharing.io/ingress.yaml +# # Wait for coder to deploy +# kubectl rollout status deployment coder -n coder +# kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder # create the initial user # populate ii or pair as an admin user without logging in @@ -49,7 +49,7 @@ CODER_USERNAME=ii CODER_URL=https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME} # export vars to we can emulate a tty with a short expect script export CODER_EMAIL CODER_PASSWORD CODER_USERNAME -coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL +# coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL export HELM_VALUES="service:\n type: NodePort\nsyncer:\n extraArgs:\n - --tls-san=${SHARINGIO_PAIR_BASE_DNS_NAME}" export EXP_CLUSTER_RESOURCE_SET=true # Install kubevirt diff --git a/examples/templates/kubevirt/Readme.org b/examples/templates/kubevirt/Readme.org index b26d9e3f3c20b..384e4b88daa07 100644 --- a/examples/templates/kubevirt/Readme.org +++ b/examples/templates/kubevirt/Readme.org @@ -1,7 +1,39 @@ #+title: Readme +* Coder Iteration Loop +** Start Coder +#+begin_src tmate :window coder :dir "../../.." + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ + 2>&1 | tee coder-server.log +#+end_src +#+begin_src shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +#+end_src + +#+RESULTS: +#+begin_example +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +#+end_example +** coder url +#+begin_src shell :dir "../../.." +grep "coder login https://" coder-server.log | cut -d\ -f 4 +#+end_src + +#+RESULTS: +#+begin_example +https://fcca6c2cae4534be6d63b1e72f9a5371.pit-1.try.coder.app +#+end_example + * kubevirt workspace ** create template and cluster + #+nobegin_src shell :dir "../../.." #+begin_src tmate :dir "../../.." :window kubevirt cd ~/sharingio/coder @@ -11,12 +43,19 @@ export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 coder template create kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml coder create kv1 --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes #+end_src + ** update template and new cluster -#+nobegin_src shell :dir "../../.." + #+begin_src tmate :dir "../../.." :window kubevirt export WORKSPACE=kv1 coder template push kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml coder create $WORKSPACE --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src + +** grab new cluster kubeconfig + +#+begin_src tmate :dir "../../.." :window kubectl +export WORKSPACE=kv1 unset KUBECONFIG TMPFILE=$(mktemp -t kubeconfig-XXXXX) kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE @@ -24,6 +63,62 @@ export KUBECONFIG=$TMPFILE kubectl get ns #+end_src +** inner cluster +#+begin_src shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-mwwff 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-ppw92 0/1 Pending 0 81s +kube-system pod/etcd-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-apiserver-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-controller-manager-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-proxy-48s9l 1/1 Running 0 81s +kube-system pod/kube-scheduler-kv1-97525 1/1 Running 0 90s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 97s +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 96s + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 96s + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 96s + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 82s + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 88s +#+end_example + +** cni not yet working +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes | grep -B6 KubeletNotReady +#+end_src + #+RESULTS: #+begin_example +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized #+end_example From 0e99fa1f10b26878796487b43465f4b805a6907f Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Sat, 8 Oct 2022 23:46:30 -0600 Subject: [PATCH 07/19] Adding markdown export... hoping to see result blocks --- examples/templates/kubevirt/Readme.md | 163 +++++++++++++++++++++++++ examples/templates/kubevirt/Readme.org | 1 - 2 files changed, 163 insertions(+), 1 deletion(-) create mode 100644 examples/templates/kubevirt/Readme.md diff --git a/examples/templates/kubevirt/Readme.md b/examples/templates/kubevirt/Readme.md new file mode 100644 index 0000000000000..596ff83c38e4b --- /dev/null +++ b/examples/templates/kubevirt/Readme.md @@ -0,0 +1,163 @@ +- [Coder Iteration Loop](#org6df9caf) + - [Start Coder](#org8a0efd5) + - [coder url](#org11688e9) +- [kubevirt workspace](#org369d0e6) + - [create template and cluster](#org59bbab0) + - [update template and new cluster](#org939dfe1) + - [grab new cluster kubeconfig](#org0e8b078) + - [inner cluster](#orge2b4dcd) + - [cni not yet working](#org204e816) + + + + + +# Coder Iteration Loop + + + + +## Start Coder + +```tmate + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ + 2>&1 | tee coder-server.log +``` + +```shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +``` + +``` +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +``` + + + + +## coder url + +```shell +grep "coder login https://" coder-server.log | cut -d\ -f 4 +``` + +``` +https://fcca6c2cae4534be6d63b1e72f9a5371.pit-1.try.coder.app +``` + + + + +# kubevirt workspace + + + + +## create template and cluster + +```tmate +cd ~/sharingio/coder +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +coder template create kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create kv1 --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +``` + + + + +## update template and new cluster + +```tmate +export WORKSPACE=kv1 +coder template push kubevirt -d examples/templates/kubevirt --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create $WORKSPACE --template kubevirt --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +``` + + + + +## grab new cluster kubeconfig + +```tmate +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +``` + + + + +## inner cluster + +```shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +``` + +``` +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-mwwff 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-ppw92 0/1 Pending 0 81s +kube-system pod/etcd-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-apiserver-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-controller-manager-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-proxy-48s9l 1/1 Running 0 81s +kube-system pod/kube-scheduler-kv1-97525 1/1 Running 0 90s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 97s +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 96s + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 96s + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 96s + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 82s + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 88s +``` + + + + +## cni not yet working + +```shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes | grep -B6 KubeletNotReady +``` + +``` +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +``` diff --git a/examples/templates/kubevirt/Readme.org b/examples/templates/kubevirt/Readme.org index 384e4b88daa07..64b895930afe8 100644 --- a/examples/templates/kubevirt/Readme.org +++ b/examples/templates/kubevirt/Readme.org @@ -34,7 +34,6 @@ https://fcca6c2cae4534be6d63b1e72f9a5371.pit-1.try.coder.app * kubevirt workspace ** create template and cluster -#+nobegin_src shell :dir "../../.." #+begin_src tmate :dir "../../.." :window kubevirt cd ~/sharingio/coder export CRI_PATH=/var/run/containerd/containerd.sock From 6249fda8e241415b64f4e9a3285ff2fb8e9f6e02 Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Mon, 10 Oct 2022 14:26:44 -0600 Subject: [PATCH 08/19] adding virtctl install --- examples/templates/kubevirt/research.org | 25 ++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/examples/templates/kubevirt/research.org b/examples/templates/kubevirt/research.org index 48babf108aa6d..6bac65a7543f1 100644 --- a/examples/templates/kubevirt/research.org +++ b/examples/templates/kubevirt/research.org @@ -157,3 +157,28 @@ Conditions: PIDPressure False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletHasSufficientPID kubelet has sufficient PID available Ready False Sat, 08 Oct 2022 22:08:53 -0700 Sat, 08 Oct 2022 21:53:32 -0700 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized #+end_example +* virtctl +Seems best to install as as kubectl plugin: +- https://krew.sigs.k8s.io/docs/user-guide/setup/install/ +- https://kubevirt.io/user-guide/operations/virtctl_client_tool/ +* host-shell +Run this outside of our VMs, as it needs the socket to communicate to VMs. +** install virtctl and get a shell +#+begin_src tmate :window host +host-shell +export VERSION=v0.57.1 +wget -q -O /usr/local/bin/virtctl https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-linux-amd64 +chmod +x /usr/local/bin/virtctl +virtctl version +#+end_src +** virtctl +#+begin_src tmate :window host +virtctl guestosinfo kv4 +#+end_src + +#+RESULTS: +#+begin_example + +virtctl guestosinfo kv4 +virtctl: command not found +#+end_example From f283df6f845c09b20abe637a1a8f38aac58a448b Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Tue, 11 Oct 2022 16:13:31 +1300 Subject: [PATCH 09/19] Add kubevirt-talos --- examples/templates/kubevirt-talos/README.org | 139 +++++ examples/templates/kubevirt-talos/cluster.tf | 535 ++++++++++++++++++ .../kubevirt-talos/cool.template.yaml | 47 ++ 3 files changed, 721 insertions(+) create mode 100644 examples/templates/kubevirt-talos/README.org create mode 100644 examples/templates/kubevirt-talos/cluster.tf create mode 100644 examples/templates/kubevirt-talos/cool.template.yaml diff --git a/examples/templates/kubevirt-talos/README.org b/examples/templates/kubevirt-talos/README.org new file mode 100644 index 0000000000000..60ed564548577 --- /dev/null +++ b/examples/templates/kubevirt-talos/README.org @@ -0,0 +1,139 @@ +#+title: Kubevirt-talos + +* Initialise providers +#+begin_src tmate +clusterctl init -b talos -c talos +#+end_src + +* Install CDI support for KubeVirt +#+begin_src shell +export TAG=$(curl -s -w %{redirect_url} https://github.com/kubevirt/containerized-data-importer/releases/latest) +export VERSION=$(echo ${TAG##*/}) +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator.yaml +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr.yaml +#+end_src + +* Coder Iteration Loop +** Start Coder +#+begin_src tmate :window coder :dir "../../.." + +cd ~/sharingio/coder +rm -rf ~/.config/coderv2/ # delete database +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ + 2>&1 | tee coder-server.log +#+end_src +#+begin_src shell +coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz +#+end_src + +#+RESULTS: +#+begin_example +> Your Coder deployment hasn't been set up! + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init +#+end_example +** coder url +#+begin_src shell :dir "../../.." +grep "coder login https://" coder-server.log | cut -d\ -f 4 +#+end_src + +#+RESULTS: +#+begin_example +https://fcca300a80606183a90494dccb9d8ffc.pit-1.try.coder.app +#+end_example + + +* kubevirt workspace +** create template and cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +cd ~/sharingio/coder +export CRI_PATH=/var/run/containerd/containerd.sock +export IMAGE_REPO=k8s.gcr.io +export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 +coder template create kubevirt-talos \ + -d examples/templates/kubevirt-sidero-talos - + --yes +coder create kv1 --template kubevirt-talos --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src + +** update template and new cluster + +#+begin_src tmate :dir "../../.." :window kubevirt +export WORKSPACE=kv1 +coder template push kubevirt -d examples/templates/kubevirt-talos --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder create $WORKSPACE --template kubevirt-talos --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +#+end_src + +** grab new cluster kubeconfig + +#+begin_src tmate :dir "../../.." :window kubectl +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get ns +#+end_src + +** inner cluster +#+begin_src shell +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl get all -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +default pod/code-server-0 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-mwwff 0/1 Pending 0 81s +kube-system pod/coredns-749558f7dd-ppw92 0/1 Pending 0 81s +kube-system pod/etcd-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-apiserver-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-controller-manager-kv1-97525 1/1 Running 0 90s +kube-system pod/kube-proxy-48s9l 1/1 Running 0 81s +kube-system pod/kube-scheduler-kv1-97525 1/1 Running 0 90s + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +default service/kubernetes ClusterIP 10.95.0.1 443/TCP 97s +kube-system service/kube-dns ClusterIP 10.95.0.10 53/UDP,53/TCP,9153/TCP 96s + +NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 96s + +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +kube-system deployment.apps/coredns 0/2 2 0 96s + +NAMESPACE NAME DESIRED CURRENT READY AGE +kube-system replicaset.apps/coredns-749558f7dd 2 2 0 82s + +NAMESPACE NAME READY AGE +default statefulset.apps/code-server 0/1 88s +#+end_example + +** cni not yet working +#+begin_src shell :prologue "(\n" :epilogue "\n) 2>&1\n:\n" +export WORKSPACE=kv1 +unset KUBECONFIG +TMPFILE=$(mktemp -t kubeconfig-XXXXX) +kubectl get secrets -n $WORKSPACE ${WORKSPACE}-kubeconfig -o jsonpath={.data.value} | base64 -d > $TMPFILE +export KUBECONFIG=$TMPFILE +kubectl describe nodes | grep -B6 KubeletNotReady +#+end_src + +#+RESULTS: +#+begin_example +Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized +#+end_example diff --git a/examples/templates/kubevirt-talos/cluster.tf b/examples/templates/kubevirt-talos/cluster.tf new file mode 100644 index 0000000000000..1ef6a9702cac8 --- /dev/null +++ b/examples/templates/kubevirt-talos/cluster.tf @@ -0,0 +1,535 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + version = "0.4.15" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.12.1" + } + } +} + +# https://www.terraform.io/language/providers/configuration#provider-configuration-1 +# > You can use expressions in the values of these configuration arguments, +# but can only reference values that are known before the configuration is applied. +# This means you can safely reference input variables, but not attributes +# exported by resources (with an exception for resource arguments that +# are specified directly in the configuration). +#### no data.X :( +# provider "kubernetes" { +# alias = "vcluster" +# host = yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["server"] +# client_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-certificate-data"]) +# client_key = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["users"][0]["user"]["client-key-data"]) +# cluster_ca_certificate = base64decode(yamldecode(data.kubernetes_resource.kubeconfig.data)["value"]["clusters"][0]["cluster"]["certificate-authority-data"]) +# } + +variable "base_domain" { + type = string + default = "sanskar.pair.sharing.io" +} + +data "coder_workspace" "me" {} + +resource "coder_agent" "main" { + os = "linux" + arch = "amd64" + startup_script = <> $HOME/.bashrc + mkdir -p bin + curl -o bin/kubectl -L https://dl.k8s.io/v1.25.2/bin/linux/amd64/kubectl + chmod +x bin/* + + # install and start code-server + curl -fsSL https://code-server.dev/install.sh | sh | tee code-server-install.log + code-server --auth none --port 13337 | tee code-server-install.log & + EOT +} + +# code-server +resource "coder_app" "code-server" { + agent_id = coder_agent.main.id + name = "code-server" + icon = "/icon/code.svg" + url = "http://localhost:13337?folder=/home/coder" + relative_path = true + + healthcheck { + url = "http://localhost:13337/healthz" + interval = 3 + threshold = 10 + } +} + +resource "kubernetes_namespace" "workspace" { + metadata { + name = data.coder_workspace.me.name + labels = { + cert-manager-tls = "sync" + } + } +} + +resource "kubernetes_manifest" "cluster" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "Cluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + "labels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "spec" = { + "controlPlaneRef" = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1beta1" + "kind" = "TalosControlPlane" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "clusterNetwork" = { + "pods" = { + "cidrBlocks" = [ + "192.168.0.0/16", + ] + } + "services" = { + "cidrBlocks" = [ + "172.26.0.0/16", + ] + } + } + } + } +} + +resource "kubernetes_manifest" "kvcluster" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtCluster" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneServiceTemplate" = { + "spec" = { + "type" = "ClusterIP" + } + } + # "controlPlaneEndpoint" = { + # "host" = "" + # "port" = 0 + # } + # "kubernetesVersion" = "1.23.4" + # "helmRelease" = { + # "chart" = { + # "name" = null + # "repo" = null + # "version" = null + # } + # "values" = <<-EOT + # service: + # type: NodePort + # securityContext: + # runAsUser: 12345 + # runAsNonRoot: true + # privileged: false + # syncer: + # extraArgs: + # - --tls-san="${data.coder_workspace.me.name}.${var.base_domain}" + # - --tls-san="${data.coder_workspace.me.name}.${data.coder_workspace.me.name}.svc" + # EOT + # } + } + } +} + +resource "kubernetes_manifest" "kubevirtmachinetemplate_control_plane" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = "${data.coder_workspace.me.name}-cp" + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "dataVolumeTemplates" = [ + { + "metadata" = { + "name" = "vmdisk-dv" + } + "spec" = { + "pvc" = { + "accessModes" = ["ReadWriteOnce"] + "resources" = { + "requests" = { + "storage" = "15Gi" + } + } + } + "source" = { + "http" = { + "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" + } + } + } + }, + ] + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "virtio" + } + "name" = "vmdisk" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "dataVolume" = { + "name" = "vmdisk-dv" + } + "name" = "vmdisk" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { + manifest = { + "apiVersion" = "controlplane.cluster.x-k8s.io/v1alpha3" + "kind" = "TalosControlPlane" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "controlPlaneConfig" = { + "controlplane" = { + "configPatches" = [ + { + "op" = "replace" + "path" = "/machine/install" + "value" = { + "bootloader" = true + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.4" + "wipe" = false + } + }, + { + "op" = "add" + "path" = "/machine/kubelet/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/apiServer/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/controllerManager/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/allowSchedulingOnMasters" + "value" = true + }, + ] + "generateType" = "controlplane" + } + "init" = { + "configPatches" = [ + { + "op" = "replace" + "path" = "/machine/install" + "value" = { + "bootloader" = true + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.4" + "wipe" = false + } + }, + { + "op" = "add" + "path" = "/machine/kubelet/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/apiServer/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/controllerManager/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/allowSchedulingOnMasters" + "value" = true + }, + ] + "generateType" = "init" + } + } + "infrastructureTemplate" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = "${data.coder_workspace.me.name}-cp" + } + "replicas" = 1 + "version" = "v1.25.2" + } + } +} + +// TODO check resource cross references + +Resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { + manifest = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "virtualMachineTemplate" = { + "metadata" = { + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "runStrategy" = "Always" + "dataVolumeTemplates" = [ + { + "metadata" = { + "name" = "vmdisk-dv" + } + "spec" = { + "pvc" = { + "accessModes" = [ + "ReadWriteOnce" + ] + "resources" = { + "requests" = { + "storage" = "15Gi" + } + } + } + "source" = { + "http" = { + "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" + } + } + } + }, + ] + "template" = { + "spec" = { + "domain" = { + "cpu" = { + "cores" = 2 + } + "devices" = { + "disks" = [ + { + "disk" = { + "bus" = "virtio" + } + "name" = "vmdisk" + }, + ] + } + "memory" = { + "guest" = "4Gi" + } + } + "evictionStrategy" = "External" + "volumes" = [ + { + "dataVolume" = { + "name" = "vmdisk-dv" + } + "name" = "vmdisk" + }, + ] + } + } + } + } + } + } + } + } +} + +resource "kubernetes_manifest" "talosconfigtemplate_talos_em_worker_a" { + manifest = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1alpha3" + "kind" = "TalosConfigTemplate" + "metadata" = { + "labels" = { + "cluster.x-k8s.io/cluster-name" = data.coder_workspace.me.name + } + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "template" = { + "spec" = { + "generateType" = "init" + } + } + } + } +} + +resource "kubernetes_manifest" "machinedeployment_md_0" { + manifest = { + "apiVersion" = "cluster.x-k8s.io/v1beta1" + "kind" = "MachineDeployment" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterName" = data.coder_workspace.me.name + "replicas" = 0 + "selector" = { + "matchLabels" = null + } + "template" = { + "spec" = { + "bootstrap" = { + "configRef" = { + "apiVersion" = "bootstrap.cluster.x-k8s.io/v1beta1" + "kind" = "TalosConfigTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + } + "clusterName" = "kv1" + "infrastructureRef" = { + "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" + "kind" = "KubevirtMachineTemplate" + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "version" = "v1.23.5" + } + } + } + } +} + +resource "kubernetes_manifest" "configmap_capi_init" { + manifest = { + "kind" = "ConfigMap" + "metadata" = { + "name" = "capi-init" + "namespace" = data.coder_workspace.me.name + } + "apiVersion" = "v1" + "data" = { + "cool.yaml" = templatefile("cool.template.yaml", + { + coder_command = jsonencode(["sh", "-c", coder_agent.main.init_script]), + coder_token = coder_agent.main.token + instance_name = data.coder_workspace.me.name + }) + } + } +} + +resource "kubernetes_manifest" "clusterresourceset_capi_init" { + manifest = { + "apiVersion" = "addons.cluster.x-k8s.io/v1beta1" + "kind" = "ClusterResourceSet" + "metadata" = { + "name" = data.coder_workspace.me.name + "namespace" = data.coder_workspace.me.name + } + "spec" = { + "clusterSelector" = { + "matchLabels" = { + "cluster-name" = data.coder_workspace.me.name + } + } + "resources" = [ + { + "kind" = "ConfigMap" + "name" = "capi-init" + }, + # { + # "kind" = "Secret" + # "name" = "vcluster-kubeconfig" + # }, + ] + "strategy" = "ApplyOnce" + } + } +} diff --git a/examples/templates/kubevirt-talos/cool.template.yaml b/examples/templates/kubevirt-talos/cool.template.yaml new file mode 100644 index 0000000000000..e4693053eec44 --- /dev/null +++ b/examples/templates/kubevirt-talos/cool.template.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coder + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: coder + namespace: default +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: code-server + namespace: default +spec: + selector: + matchLabels: + app: code-server + serviceName: code-server + template: + metadata: + labels: + app: code-server + spec: + serviceAccountName: coder + securityContext: + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: code-server + image: codercom/enterprise-base:ubuntu + command: ${coder_command} + securityContext: + runAsUser: 1000 + env: + - name: CODER_AGENT_TOKEN + value: ${coder_token} From 61b199d6a92792724f82b3bd7482d1044fc0673f Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 12 Oct 2022 13:50:01 +1300 Subject: [PATCH 10/19] Update kubernetes/main.tf and values.template.yaml --- .sharing.io/values.template.yaml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.sharing.io/values.template.yaml b/.sharing.io/values.template.yaml index 5486524922536..bf9209306f9a8 100644 --- a/.sharing.io/values.template.yaml +++ b/.sharing.io/values.template.yaml @@ -9,7 +9,7 @@ coder: # `CODER_TLS_ENABLE`, `CODER_TLS_CERT_FILE` or `CODER_TLS_KEY_FILE` as # they are already set by the Helm chart and will cause conflicts. image: - tag: "v0.9.0" + tag: "v0.9.8" env: - name: CODER_ACCESS_URL value: "https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" @@ -30,5 +30,12 @@ coder: value: "false" - name: CODER_WILDCARD_ACCESS_URL value: "*.coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" - tls: - secretName: null + ingress: + enable: true + className: $SHARINGIO_PAIR_INSTANCE_INGRESS_CLASS_NAME + host: "coder.${SHARINGIO_PAIR_BASE_DNS_NAME}" + wildcardHost: "*.${SHARINGIO_PAIR_BASE_DNS_NAME}" + tls: + enable: true + secretName: letsencrypt-prod + wildcardSecretName: "letsencrypt-prod" From e43cac49d9d369ba54921a9699517e54200b40cb Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 12 Oct 2022 14:00:11 +1300 Subject: [PATCH 11/19] Update init Initialise the environment better to work for us all --- .sharing.io/init | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/.sharing.io/init b/.sharing.io/init index 39bbcc95b1743..05a5afa23e4ba 100755 --- a/.sharing.io/init +++ b/.sharing.io/init @@ -1,18 +1,16 @@ #!/bin/env bash set -x -# upgrade go to 1.19.1 -go version | grep 1.19.1 || curl -L https://dl.google.com/go/go1.19.1.linux-amd64.tar.gz | sudo tar --directory /usr/local --extract --ungzip # shfmt needed for make -which shfmt || sudo apt-get install shfmt +which shfmt || sudo apt-get install -y shfmt # for converting k8s yaml to HCL go install github.com/jrhouston/tfk8s@latest # TODO: Make still failing, possible dependencies still missing. # install coder binary until we can build from src which coder || ( - curl -OL https://github.com/coder/coder/releases/download/v0.9.1/coder_0.9.1_linux_amd64.deb - sudo dpkg -i coder_0.9.1_linux_amd64.deb + curl -L -o /tmp/coder.deb https://github.com/coder/coder/releases/download/v0.9.8/coder_0.9.8_linux_amd64.deb + sudo apt install -y /tmp/coder.deb # Add completion echo '. <(coder completion bash)' >>~/.bashrc ) @@ -22,21 +20,18 @@ kubectl create namespace coder # ensure ingress works / certs secrets get copied kubectl label ns coder cert-manager-tls=sync # needs a postgres db -# helm repo add bitnami https://charts.bitnami.com/bitnami -# helm install postgres bitnami/postgresql \ -# --namespace coder \ -# --set auth.username=coder \ -# --set auth.password=coder \ -# --set auth.database=coder \ -# --set persistence.size=10Gi +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install postgres bitnami/postgresql \ + --namespace coder \ + --set auth.username=coder \ + --set auth.password=coder \ + --set auth.database=coder \ + --set persistence.size=10Gi # deploy via helm for now -# envsubst <.sharing.io/values.template.yaml >.sharing.io/values.yaml -# helm install coder ./helm/ \ -# --namespace coder \ -# --values .sharing.io/values.yaml -# # setup ingress -# envsubst <.sharing.io/ingress.template.yaml >.sharing.io/ingress.yaml -# kubectl apply -f .sharing.io/ingress.yaml +envsubst <.sharing.io/values.template.yaml >.sharing.io/values.yaml +helm install coder ./helm/ \ + --namespace coder \ + --values .sharing.io/values.yaml # # Wait for coder to deploy # kubectl rollout status deployment coder -n coder # kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder @@ -49,7 +44,7 @@ CODER_USERNAME=ii CODER_URL=https://coder.${SHARINGIO_PAIR_BASE_DNS_NAME} # export vars to we can emulate a tty with a short expect script export CODER_EMAIL CODER_PASSWORD CODER_USERNAME -# coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL +coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL export HELM_VALUES="service:\n type: NodePort\nsyncer:\n extraArgs:\n - --tls-san=${SHARINGIO_PAIR_BASE_DNS_NAME}" export EXP_CLUSTER_RESOURCE_SET=true # Install kubevirt @@ -64,6 +59,7 @@ kubectl -n kubevirt wait kv kubevirt --for condition=Available clusterctl init --infrastructure vcluster clusterctl init --infrastructure kubevirt clusterctl init --infrastructure=packet +clusterctl init --bootstrap talos --control-plane talos kubectl create ns coder-workspaces From a4298919796ca5237b7bad123a7b91e9989e96da Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 12 Oct 2022 16:16:20 +1300 Subject: [PATCH 12/19] Update kubevirt-talos --- examples/templates/kubevirt-talos/README.org | 28 +++++-- examples/templates/kubevirt-talos/cluster.tf | 79 ++------------------ 2 files changed, 29 insertions(+), 78 deletions(-) diff --git a/examples/templates/kubevirt-talos/README.org b/examples/templates/kubevirt-talos/README.org index 60ed564548577..1234449609308 100644 --- a/examples/templates/kubevirt-talos/README.org +++ b/examples/templates/kubevirt-talos/README.org @@ -13,13 +13,27 @@ kubectl create -f https://github.com/kubevirt/containerized-data-importer/releas kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr.yaml #+end_src +#+RESULTS: +#+begin_example +namespace/cdi created +customresourcedefinition.apiextensions.k8s.io/cdis.cdi.kubevirt.io created +clusterrole.rbac.authorization.k8s.io/cdi-operator-cluster created +clusterrolebinding.rbac.authorization.k8s.io/cdi-operator created +serviceaccount/cdi-operator created +role.rbac.authorization.k8s.io/cdi-operator created +rolebinding.rbac.authorization.k8s.io/cdi-operator created +deployment.apps/cdi-operator created +configmap/cdi-operator-leader-election-helper created +cdi.cdi.kubevirt.io/cdi created +#+end_example + * Coder Iteration Loop ** Start Coder #+begin_src tmate :window coder :dir "../../.."  cd ~/sharingio/coder rm -rf ~/.config/coderv2/ # delete database -coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 --tunnel \ +coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 \ 2>&1 | tee coder-server.log #+end_src #+begin_src shell @@ -29,10 +43,10 @@ coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz #+RESULTS: #+begin_example > Your Coder deployment hasn't been set up! - - Welcome to Coder, ii! You're authenticated. - - Get started by creating a template: coder templates init + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init #+end_example ** coder url #+begin_src shell :dir "../../.." @@ -54,9 +68,9 @@ export CRI_PATH=/var/run/containerd/containerd.sock export IMAGE_REPO=k8s.gcr.io export NODE_VM_IMAGE_TEMPLATE=quay.io/capk/ubuntu-2004-container-disk:v1.22.0 coder template create kubevirt-talos \ - -d examples/templates/kubevirt-sidero-talos - + -d examples/templates/kubevirt-talos \ --yes -coder create kv1 --template kubevirt-talos --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes +coder create kv1 --template kubevirt-talos #+end_src ** update template and new cluster diff --git a/examples/templates/kubevirt-talos/cluster.tf b/examples/templates/kubevirt-talos/cluster.tf index 1ef6a9702cac8..e6b7a3f3514aa 100644 --- a/examples/templates/kubevirt-talos/cluster.tf +++ b/examples/templates/kubevirt-talos/cluster.tf @@ -135,30 +135,6 @@ resource "kubernetes_manifest" "kvcluster" { "type" = "ClusterIP" } } - # "controlPlaneEndpoint" = { - # "host" = "" - # "port" = 0 - # } - # "kubernetesVersion" = "1.23.4" - # "helmRelease" = { - # "chart" = { - # "name" = null - # "repo" = null - # "version" = null - # } - # "values" = <<-EOT - # service: - # type: NodePort - # securityContext: - # runAsUser: 12345 - # runAsNonRoot: true - # privileged: false - # syncer: - # extraArgs: - # - --tls-san="${data.coder_workspace.me.name}.${var.base_domain}" - # - --tls-san="${data.coder_workspace.me.name}.${data.coder_workspace.me.name}.svc" - # EOT - # } } } } @@ -257,10 +233,11 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { "op" = "replace" "path" = "/machine/install" "value" = { - "bootloader" = true - "disk" = "/dev/sda" - "image" = "ghcr.io/siderolabs/installer:v1.2.4" - "wipe" = false + "bootloader" = true + "disk" = "/dev/vda" + "image" = "ghcr.io/siderolabs/installer:v1.2.4" + "wipe" = false + "extraKernelArgs" = ["console=ttyS0"] } }, { @@ -292,47 +269,6 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { ] "generateType" = "controlplane" } - "init" = { - "configPatches" = [ - { - "op" = "replace" - "path" = "/machine/install" - "value" = { - "bootloader" = true - "disk" = "/dev/sda" - "image" = "ghcr.io/siderolabs/installer:v1.2.4" - "wipe" = false - } - }, - { - "op" = "add" - "path" = "/machine/kubelet/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, - { - "op" = "add" - "path" = "/cluster/apiServer/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, - { - "op" = "add" - "path" = "/cluster/controllerManager/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, - { - "op" = "add" - "path" = "/cluster/allowSchedulingOnMasters" - "value" = true - }, - ] - "generateType" = "init" - } } "infrastructureTemplate" = { "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" @@ -347,7 +283,7 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { // TODO check resource cross references -Resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { +resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { manifest = { "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" "kind" = "KubevirtMachineTemplate" @@ -441,7 +377,8 @@ resource "kubernetes_manifest" "talosconfigtemplate_talos_em_worker_a" { "spec" = { "template" = { "spec" = { - "generateType" = "init" + "generateType" = "join" + "talosVersion" = "v1.2.4" } } } From 5a7fbfd3154079073dcda6510aeb5dffec249519 Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Wed, 12 Oct 2022 17:17:39 +1300 Subject: [PATCH 13/19] Add init patches back --- examples/templates/kubevirt-talos/cluster.tf | 42 ++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/examples/templates/kubevirt-talos/cluster.tf b/examples/templates/kubevirt-talos/cluster.tf index e6b7a3f3514aa..4dfd98d615ccc 100644 --- a/examples/templates/kubevirt-talos/cluster.tf +++ b/examples/templates/kubevirt-talos/cluster.tf @@ -269,6 +269,48 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { ] "generateType" = "controlplane" } + "init" = { + "configPatches" = [ + { + "op" = "replace" + "path" = "/machine/install" + "value" = { + "bootloader" = true + "disk" = "/dev/vda" + "image" = "ghcr.io/siderolabs/installer:v1.2.4" + "wipe" = false + "extraKernelArgs" = ["console=ttyS0"] + } + }, + { + "op" = "add" + "path" = "/machine/kubelet/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/apiServer/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/controllerManager/extraArgs" + "value" = { + "cloud-provider" = "external" + } + }, + { + "op" = "add" + "path" = "/cluster/allowSchedulingOnMasters" + "value" = true + }, + ] + "generateType" = "init" + } } "infrastructureTemplate" = { "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" From 327af8c156f1b48f3f73fb0eb5d182904c01b6d9 Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Wed, 12 Oct 2022 09:37:57 -0600 Subject: [PATCH 14/19] Ensure coder is up before coder login --- .sharing.io/init | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.sharing.io/init b/.sharing.io/init index 05a5afa23e4ba..4cbc343553543 100755 --- a/.sharing.io/init +++ b/.sharing.io/init @@ -33,8 +33,8 @@ helm install coder ./helm/ \ --namespace coder \ --values .sharing.io/values.yaml # # Wait for coder to deploy -# kubectl rollout status deployment coder -n coder -# kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder +# kubectl rollout status deployment coder -n coder... so we can create the inital user +kubectl wait -n coder --for=condition=ready pod -l app.kubernetes.io/name=coder # create the initial user # populate ii or pair as an admin user without logging in From 0a9a7645b1e18de25e86a1fb66bc64c37658bd87 Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Wed, 12 Oct 2022 17:15:27 -0600 Subject: [PATCH 15/19] Add cluster role binding for coder/kubevirt+talos --- .sharing.io/init | 5 + .../kubevirt-talos/role+binding.yaml | 94 +++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 examples/templates/kubevirt-talos/role+binding.yaml diff --git a/.sharing.io/init b/.sharing.io/init index 4cbc343553543..a585ec1538895 100755 --- a/.sharing.io/init +++ b/.sharing.io/init @@ -61,6 +61,11 @@ clusterctl init --infrastructure kubevirt clusterctl init --infrastructure=packet clusterctl init --bootstrap talos --control-plane talos +# we'll need these extra rolebindings for the coder service account for our template to work +# must be applied after coder helm chart is run and clusterctl init -- talos +kubectl apply -f ./examples/templates/kubevirt-talos/role+rolebinding.yaml + kubectl create ns coder-workspaces + #TODO : upload / update the kubernetes template diff --git a/examples/templates/kubevirt-talos/role+binding.yaml b/examples/templates/kubevirt-talos/role+binding.yaml new file mode 100644 index 0000000000000..89c0594b09f41 --- /dev/null +++ b/examples/templates/kubevirt-talos/role+binding.yaml @@ -0,0 +1,94 @@ +# Requires: +# clusterctl init --infrastructure kubevirt +# clusterctl init --bootstrap talos --control-plane talos +# Some are at Cluster Level, some are at the coder namespace level +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: coder-clusterapi-perms +rules: + - apiGroups: + - "apiextensions.k8s.io" + resources: + - "customresourcedefinitions" + verbs: + - "list" + - "get" + - apiGroups: + - "" + - "cluster.x-k8s.io" + - "bootstrap.cluster.x-k8s.io" + - "controlplane.cluster.x-k8s.io" + - "infrastructure.cluster.x-k8s.io" + - "addons.cluster.x-k8s.io" + resources: + - "namespaces" + - "configmaps" + - "clusters" + - "machinedeployments" + - "talosconfigtemplates" + - "taloscontrolplanes" + - "kubevirtclusters" + - "kubevirtmachinetemplates" + - "clusterresourcesets" + verbs: + - "list" + - "get" + - "patch" + - "create" + - "delete" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: coder-clusterapi + namespace: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coder-clusterapi-perms +subjects: + - kind: ServiceAccount + name: coder + namespace: coder +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: Role +# metadata: +# name: coder-clusterapi-perms +# namespace: coder +# rules: +# - apiGroups: +# - "" +# resources: +# - "configmaps" +# verbs: +# - "list" +# - "get" +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: RoleBinding +# metadata: +# name: coder-clusterapi +# namespace: coder +# roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: Role +# name: coder-clusterapi-perms +# subjects: +# - kind: ServiceAccount +# name: coder +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: coder-clusterapi-cluster + namespace: coder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coder-clusterapi-perms +subjects: + - kind: ServiceAccount + name: coder + namespace: coder From 95b1bc7282720019ef569dac15549499b30da90d Mon Sep 17 00:00:00 2001 From: Caleb Woodbine Date: Thu, 13 Oct 2022 16:35:24 +1300 Subject: [PATCH 16/19] Update boot disk. pvc size, talos configuration Co-Authored-By: Hippie Hacker Co-Authored-By: Andrew Rynhard Co-Authored-By: Kat Morgan --- examples/templates/kubevirt-talos/README.org | 86 ++++++++++++-- examples/templates/kubevirt-talos/cluster.tf | 112 +++++++++---------- 2 files changed, 134 insertions(+), 64 deletions(-) diff --git a/examples/templates/kubevirt-talos/README.org b/examples/templates/kubevirt-talos/README.org index 1234449609308..cdaed46778e3d 100644 --- a/examples/templates/kubevirt-talos/README.org +++ b/examples/templates/kubevirt-talos/README.org @@ -33,7 +33,7 @@ cdi.cdi.kubevirt.io/cdi created  cd ~/sharingio/coder rm -rf ~/.config/coderv2/ # delete database -coder server --address=0.0.0.0:7080 --access-url=http://localhost:7080 \ +coder server --address=0.0.0.0:7080 --access-url=https://coder.$SHARINGIO_PAIR_BASE_DNS_NAME \ 2>&1 | tee coder-server.log #+end_src #+begin_src shell @@ -43,10 +43,10 @@ coder login `cat ~/.config/coderv2/url` -u ii -p ii -e ii@ii.nz #+RESULTS: #+begin_example > Your Coder deployment hasn't been set up! - - Welcome to Coder, ii! You're authenticated. - - Get started by creating a template: coder templates init + + Welcome to Coder, ii! You're authenticated. + + Get started by creating a template: coder templates init #+end_example ** coder url #+begin_src shell :dir "../../.." @@ -55,7 +55,7 @@ grep "coder login https://" coder-server.log | cut -d\ -f 4 #+RESULTS: #+begin_example -https://fcca300a80606183a90494dccb9d8ffc.pit-1.try.coder.app +https://coder.bobymcbobs.pair.sharing.io #+end_example @@ -77,7 +77,7 @@ coder create kv1 --template kubevirt-talos #+begin_src tmate :dir "../../.." :window kubevirt export WORKSPACE=kv1 -coder template push kubevirt -d examples/templates/kubevirt-talos --yes --parameter-file examples/templates/kubevirt/kubevirt.param.yaml +coder template push kubevirt-talos -d examples/templates/kubevirt-talos --yes coder create $WORKSPACE --template kubevirt-talos --parameter-file examples/templates/kubevirt/kubevirt.param.yaml --yes #+end_src @@ -151,3 +151,75 @@ Conditions: PIDPressure False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletHasSufficientPID kubelet has sufficient PID available Ready False Sat, 08 Oct 2022 23:39:08 -0600 Sat, 08 Oct 2022 23:38:52 -0600 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized #+end_example + +** list all relevant resources +#+begin_src shell +kubectl get $(kubectl api-resources | grep -E 'x-k8s|sidero' | awk '{print $1}' | xargs | tr ' ' ','),secrets,configmaps,pods,svc,pvc,vmis -o wide -n kv10 +#+end_src + +#+RESULTS: +#+begin_example +NAME AGE +clusterresourceset.addons.cluster.x-k8s.io/kv10 25s + +NAME AGE +talosconfig.bootstrap.cluster.x-k8s.io/kv10-nz842 5s + +NAME AGE +talosconfigtemplate.bootstrap.cluster.x-k8s.io/kv10 27s + +NAME PHASE AGE VERSION +cluster.cluster.x-k8s.io/kv10 Provisioned 24s + +NAME CLUSTER DESIRED REPLICAS READY UPDATED UNAVAILABLE PHASE AGE VERSION +machinedeployment.cluster.x-k8s.io/kv10 kv10 0 Running 23s v1.23.5 + +NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION +machine.cluster.x-k8s.io/kv10-mknmr kv10 Provisioning 5s v1.25.2 + +NAME CLUSTER DESIRED REPLICAS READY AVAILABLE AGE VERSION +machineset.cluster.x-k8s.io/kv10-7c6b4779d9 kv10 0 22s v1.23.5 + +NAME READY INITIALIZED REPLICAS READY REPLICAS UNAVAILABLE REPLICAS +taloscontrolplane.controlplane.cluster.x-k8s.io/kv10 1 1 + +NAME AGE +kubevirtcluster.infrastructure.cluster.x-k8s.io/kv10 26s + +NAME AGE +kubevirtmachine.infrastructure.cluster.x-k8s.io/kv10-cp-9klxl 5s + +NAME AGE +kubevirtmachinetemplate.infrastructure.cluster.x-k8s.io/kv10 22s +kubevirtmachinetemplate.infrastructure.cluster.x-k8s.io/kv10-cp 22s + +NAME TYPE DATA AGE +secret/default-token-5f5r8 kubernetes.io/service-account-token 3 30s +secret/kv10-ca Opaque 2 5s +secret/kv10-kubeconfig cluster.x-k8s.io/secret 1 4s +secret/kv10-mknmr-bootstrap-data Opaque 1 5s +secret/kv10-mknmr-bootstrap-data-userdata cluster.x-k8s.io/secret 1 5s +secret/kv10-ssh-keys cluster.x-k8s.io/secret 2 24s +secret/kv10-talos Opaque 1 5s +secret/kv10-talosconfig Opaque 1 5s +secret/letsencrypt-prod kubernetes.io/tls 2 30s + +NAME DATA AGE +configmap/capi-init 1 24s +configmap/kube-root-ca.crt 1 30s + +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +pod/importer-kv10-cp-9klxl-vmdisk-dv 0/1 Pending 0 5s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +service/kv10-lb ClusterIP 10.97.239.4 6443/TCP 24s cluster.x-k8s.io/cluster-name=kv10,cluster.x-k8s.io/role=control-plane + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE +persistentvolumeclaim/kv10-cp-9klxl-vmdisk-dv Pending local-path 5s Filesystem +#+end_example + +* Tear down + +#+begin_src tmate :window kubevirt +coder delete kv9 +#+end_src diff --git a/examples/templates/kubevirt-talos/cluster.tf b/examples/templates/kubevirt-talos/cluster.tf index 4dfd98d615ccc..6aa747a652811 100644 --- a/examples/templates/kubevirt-talos/cluster.tf +++ b/examples/templates/kubevirt-talos/cluster.tf @@ -166,13 +166,13 @@ resource "kubernetes_manifest" "kubevirtmachinetemplate_control_plane" { "accessModes" = ["ReadWriteOnce"] "resources" = { "requests" = { - "storage" = "15Gi" + "storage" = "50Gi" } } } "source" = { "http" = { - "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" + "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" } } } @@ -188,7 +188,7 @@ resource "kubernetes_manifest" "kubevirtmachinetemplate_control_plane" { "disks" = [ { "disk" = { - "bus" = "virtio" + "bus" = "scsi" } "name" = "vmdisk" }, @@ -234,36 +234,36 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { "path" = "/machine/install" "value" = { "bootloader" = true - "disk" = "/dev/vda" - "image" = "ghcr.io/siderolabs/installer:v1.2.4" "wipe" = false + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.5" "extraKernelArgs" = ["console=ttyS0"] } }, - { - "op" = "add" - "path" = "/machine/kubelet/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, - { - "op" = "add" - "path" = "/cluster/apiServer/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, - { - "op" = "add" - "path" = "/cluster/controllerManager/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, + # { + # "op" = "add" + # "path" = "/machine/kubelet/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/apiServer/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/controllerManager/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, { "op" = "add" - "path" = "/cluster/allowSchedulingOnMasters" + "path" = "/cluster/allowSchedulingOnControlPlanes" "value" = true }, ] @@ -276,36 +276,36 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { "path" = "/machine/install" "value" = { "bootloader" = true - "disk" = "/dev/vda" - "image" = "ghcr.io/siderolabs/installer:v1.2.4" "wipe" = false + "disk" = "/dev/sda" + "image" = "ghcr.io/siderolabs/installer:v1.2.5" "extraKernelArgs" = ["console=ttyS0"] } }, - { - "op" = "add" - "path" = "/machine/kubelet/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, - { - "op" = "add" - "path" = "/cluster/apiServer/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, - { - "op" = "add" - "path" = "/cluster/controllerManager/extraArgs" - "value" = { - "cloud-provider" = "external" - } - }, + # { + # "op" = "add" + # "path" = "/machine/kubelet/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/apiServer/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, + # { + # "op" = "add" + # "path" = "/cluster/controllerManager/extraArgs" + # "value" = { + # "cloud-provider" = "external" + # } + # }, { "op" = "add" - "path" = "/cluster/allowSchedulingOnMasters" + "path" = "/cluster/allowSchedulingOnControlPlanes" "value" = true }, ] @@ -323,8 +323,6 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { } } -// TODO check resource cross references - resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { manifest = { "apiVersion" = "infrastructure.cluster.x-k8s.io/v1alpha1" @@ -354,13 +352,13 @@ resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { ] "resources" = { "requests" = { - "storage" = "15Gi" + "storage" = "50Gi" } } } "source" = { "http" = { - "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" + "url" = "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" } } } @@ -376,7 +374,7 @@ resource "kubernetes_manifest" "kubevirtmachinetemplate_md_0" { "disks" = [ { "disk" = { - "bus" = "virtio" + "bus" = "scsi" } "name" = "vmdisk" }, @@ -420,7 +418,7 @@ resource "kubernetes_manifest" "talosconfigtemplate_talos_em_worker_a" { "template" = { "spec" = { "generateType" = "join" - "talosVersion" = "v1.2.4" + "talosVersion" = "v1.2.5" } } } From 3470d02ff30391a386bc0441404f4d873779288a Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Thu, 13 Oct 2022 08:07:42 -0600 Subject: [PATCH 17/19] add kvtalos manifest --- examples/templates/kubevirt-talos/cluster.tf | 2 +- .../kubevirt-talos/manifest/kvtalos.yaml | 213 ++++++++++++++++++ 2 files changed, 214 insertions(+), 1 deletion(-) create mode 100644 examples/templates/kubevirt-talos/manifest/kvtalos.yaml diff --git a/examples/templates/kubevirt-talos/cluster.tf b/examples/templates/kubevirt-talos/cluster.tf index 6aa747a652811..94a59cf317c7f 100644 --- a/examples/templates/kubevirt-talos/cluster.tf +++ b/examples/templates/kubevirt-talos/cluster.tf @@ -228,6 +228,7 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { "spec" = { "controlPlaneConfig" = { "controlplane" = { + "generateType" = "controlplane" "configPatches" = [ { "op" = "replace" @@ -267,7 +268,6 @@ resource "kubernetes_manifest" "taloscontrolplane_talos_em_control_plane" { "value" = true }, ] - "generateType" = "controlplane" } "init" = { "configPatches" = [ diff --git a/examples/templates/kubevirt-talos/manifest/kvtalos.yaml b/examples/templates/kubevirt-talos/manifest/kvtalos.yaml new file mode 100644 index 0000000000000..16fa214df3f73 --- /dev/null +++ b/examples/templates/kubevirt-talos/manifest/kvtalos.yaml @@ -0,0 +1,213 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster-name: talos + name: talos +spec: + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: TalosControlPlane + name: talos + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: KubevirtCluster + name: talos + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 172.26.0.0/16 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtCluster +metadata: + name: talos +spec: + controlPlaneServiceTemplate: + spec: + type: ClusterIP +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: talos +spec: + clusterName: talos + replicas: 0 + selector: + matchLabes: null + template: + spec: + clusterName: talos + version: v1.23.5 + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: TalosConfigTemplate + name: talos + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: talos +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: TalosControlPlane +metadata: + name: talos +spec: + replicas: 1 + version: "v1.25.2" + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: KubevirtMachineTemplate + name: talos-cp + controlPlaneConfig: + init: + generateType: init + configPatches: + - path: /machine/install + op: replace + value: + bootloader: true + disk: /dev/vda + image: ghcr.io/siderolabs/installer:v1.2.4 + wipe: false + extraKernelArgs: + - console=ttyS0 + - path: /machine/kubelet/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/apiServer/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/controllerManager/extraArgs + op: add + value: + cloud-provider: external + - path: /cluster/allowSchedulingOnMasters + op: add + value: true + controlplane: + generateType: controlplane + configPatches: + - path: /machine/install + op: replace + value: + bootloader: true + disk: /dev/vda + image: ghcr.io/siderolabs/installer:v1.2.4 + wipe: false + extraKernelArgs: + - console=ttyS0 + - path: /machine/kubelet/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/apiServer/extraArgs + op: add + value: + cloud-provider: external + - path: /machine/controllerManager/extraArgs + op: add + value: + cloud-provider: external + - path: /cluster/allowSchedulingOnMasters + op: add + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: talos-cp +spec: + template: + spec: + virtualMachineTemplate: + spec: + runStrategy: Always + template: + spec: + evictionStrategy: External + volumes: + - name: vmdisk + dataVolume: + name: vmdisk-dv + domain: + cpu: + cores: 2 + devices: + disks: + - name: vmdisk + disk: + bus: virtio + memory: + guest: "4Gi" + dataVolumeTemplates: + - metadata: + name: vmdisk-dv + spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "15Gi" + source: + http: + url: "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: KubevirtMachineTemplate +metadata: + name: talos +spec: + template: + spec: + virtualMachineTemplate: + spec: + runStrategy: Always + template: + spec: + evictionStrategy: External + volumes: + - name: vmdisk + dataVolume: + name: vmdisk-dv + domain: + cpu: + cores: 2 + devices: + disks: + - name: vmdisk + disk: + bus: virtio + memory: + guest: "4Gi" + dataVolumeTemplates: + - metadata: + name: vmdisk-dv + spec: + pvc: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "15Gi" + source: + http: + url: "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: talos +spec: + template: + spec: + generateType: join + talosVersion: v1.2.4 From dce7e4967e3d09e0f19e3c0b4219830d9eee6bf0 Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Thu, 13 Oct 2022 08:15:24 -0600 Subject: [PATCH 18/19] Include terraform changes in maninfest --- .../templates/kubevirt-talos/manifest/kvtalos.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/templates/kubevirt-talos/manifest/kvtalos.yaml b/examples/templates/kubevirt-talos/manifest/kvtalos.yaml index 16fa214df3f73..a609441e8b5ad 100644 --- a/examples/templates/kubevirt-talos/manifest/kvtalos.yaml +++ b/examples/templates/kubevirt-talos/manifest/kvtalos.yaml @@ -73,7 +73,7 @@ spec: value: bootloader: true disk: /dev/vda - image: ghcr.io/siderolabs/installer:v1.2.4 + image: ghcr.io/siderolabs/installer:v1.2.5 wipe: false extraKernelArgs: - console=ttyS0 @@ -100,7 +100,7 @@ spec: value: bootloader: true disk: /dev/vda - image: ghcr.io/siderolabs/installer:v1.2.4 + image: ghcr.io/siderolabs/installer:v1.2.5 wipe: false extraKernelArgs: - console=ttyS0 @@ -144,7 +144,7 @@ spec: disks: - name: vmdisk disk: - bus: virtio + bus: scsi memory: guest: "4Gi" dataVolumeTemplates: @@ -159,7 +159,7 @@ spec: storage: "15Gi" source: http: - url: "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" + url: "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: KubevirtMachineTemplate @@ -185,7 +185,7 @@ spec: disks: - name: vmdisk disk: - bus: virtio + bus: scsi memory: guest: "4Gi" dataVolumeTemplates: @@ -200,7 +200,7 @@ spec: storage: "15Gi" source: http: - url: "https://github.com/siderolabs/talos/releases/download/v1.2.4/talos-amd64.iso" + url: "https://github.com/siderolabs/talos/releases/download/v1.2.5/nocloud-amd64.raw.xz" --- apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 kind: TalosConfigTemplate @@ -210,4 +210,4 @@ spec: template: spec: generateType: join - talosVersion: v1.2.4 + talosVersion: v1.2.5 From f42b149d220a7f9f5fb786f82f43e1089eb7a666 Mon Sep 17 00:00:00 2001 From: Hippie Hacker Date: Thu, 13 Oct 2022 13:47:03 -0600 Subject: [PATCH 19/19] Ensure CDI support for KubeVirt in .sharing.io/init Co-Authored-By: BobyMCbobs --- .sharing.io/init | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.sharing.io/init b/.sharing.io/init index a585ec1538895..14a9cc27802b2 100755 --- a/.sharing.io/init +++ b/.sharing.io/init @@ -47,6 +47,7 @@ export CODER_EMAIL CODER_PASSWORD CODER_USERNAME coder login $CODER_URL -u $CODER_USERNAME -p $CODER_PASSWORD -e $CODER_EMAIL export HELM_VALUES="service:\n type: NodePort\nsyncer:\n extraArgs:\n - --tls-san=${SHARINGIO_PAIR_BASE_DNS_NAME}" export EXP_CLUSTER_RESOURCE_SET=true + # Install kubevirt export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt) # Deploy the KubeVirt operator @@ -56,6 +57,13 @@ kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEAS # wait until all KubeVirt components are up kubectl -n kubevirt wait kv kubevirt --for condition=Available +# install CDI support for KubeVirt +export TAG=$(curl -s -w %{redirect_url} https://github.com/kubevirt/containerized-data-importer/releases/latest) +export VERSION=$(echo ${TAG##*/}) +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator.yaml +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr.yaml + +# cluster-api bootstrapping clusterctl init --infrastructure vcluster clusterctl init --infrastructure kubevirt clusterctl init --infrastructure=packet