diff --git a/.prow/e2e-features.yaml b/.prow/e2e-features.yaml index 93a9e3e22..aa4c12907 100644 --- a/.prow/e2e-features.yaml +++ b/.prow/e2e-features.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-invalid-objects-get-rejected always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -35,7 +34,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -44,13 +43,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-custom-ca always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-openstack: "true" @@ -63,7 +63,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -72,13 +72,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-ubuntu-upgrade always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-openstack: "true" @@ -90,7 +91,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -99,13 +100,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-deployment-upgrade always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-hetzner: "true" @@ -116,7 +118,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -125,5 +127,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/postsubmits.yaml b/.prow/postsubmits.yaml index f3e9361c8..baba3fd92 100644 --- a/.prow/postsubmits.yaml +++ b/.prow/postsubmits.yaml @@ -26,7 +26,7 @@ postsubmits: preset-goproxy: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - /bin/bash - -c @@ -54,7 +54,7 @@ postsubmits: preset-goproxy: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/upload-gocache.sh" resources: diff --git a/.prow/provider-alibaba.yaml b/.prow/provider-alibaba.yaml index da5dd7f80..e0cfc2ed2 100644 --- a/.prow/provider-alibaba.yaml +++ b/.prow/provider-alibaba.yaml @@ -17,7 +17,6 @@ presubmits: optional: true always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" max_concurrency: 1 labels: @@ -30,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -39,5 +38,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-anexia.yaml b/.prow/provider-anexia.yaml index 50825a12b..96a7ab09e 100644 --- a/.prow/provider-anexia.yaml +++ b/.prow/provider-anexia.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-anexia always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-hetzner: "true" @@ -28,7 +27,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -37,5 +36,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-aws.yaml b/.prow/provider-aws.yaml index 834031126..1cc78bcc3 100644 --- a/.prow/provider-aws.yaml +++ b/.prow/provider-aws.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-aws always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -29,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -38,13 +37,46 @@ presubmits: privileged: true resources: requests: - memory: 4Gi - cpu: 1 + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi + + - name: pull-machine-controller-e2e-aws-legacy-userdata + always_run: true + decorate: true + clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + labels: + preset-aws: "true" + preset-hetzner: "true" + preset-e2e-ssh: "true" + preset-rhel: "true" + preset-goproxy: "true" + preset-kind-volume-mounts: "true" + preset-docker-mirror: "true" + preset-kubeconfig-ci: "true" + spec: + containers: + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 + env: + - name: OPERATING_SYSTEM_MANAGER + value: "false" + command: + - "./hack/ci/run-e2e-tests.sh" + args: + - "TestAWSProvisioningE2E" + securityContext: + privileged: true + resources: + requests: + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-arm always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -56,7 +88,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -65,13 +97,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-ebs-encryption-enabled always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -83,7 +116,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -92,13 +125,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-flatcar-containerd always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -110,7 +144,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -119,13 +153,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-spot-instance always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -138,7 +173,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -147,13 +182,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-sles always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -165,7 +201,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -174,13 +210,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-flatcar-coreos-cloud-init always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -192,7 +229,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -201,13 +238,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-centos8 always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws: "true" @@ -219,7 +257,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -228,13 +266,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-aws-assume-role always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-aws-assume-role: "true" @@ -246,7 +285,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -255,5 +294,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-azure.yaml b/.prow/provider-azure.yaml index d1e9f689d..6bee8b9f2 100644 --- a/.prow/provider-azure.yaml +++ b/.prow/provider-azure.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-azure always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-azure: "true" @@ -29,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -38,13 +37,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-azure-custom-image-reference always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-azure: "true" @@ -57,7 +57,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -66,14 +66,15 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-azure-redhat-satellite optional: true always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-azure: "true" @@ -86,7 +87,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -95,5 +96,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-digitalocean.yaml b/.prow/provider-digitalocean.yaml index 7989696f6..45f53e484 100644 --- a/.prow/provider-digitalocean.yaml +++ b/.prow/provider-digitalocean.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-digitalocean always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-digitalocean: "true" @@ -28,7 +27,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -37,5 +36,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-equinix-metal.yaml b/.prow/provider-equinix-metal.yaml index 9754203b5..4193213d6 100644 --- a/.prow/provider-equinix-metal.yaml +++ b/.prow/provider-equinix-metal.yaml @@ -17,7 +17,6 @@ presubmits: optional: true run_if_changed: pkg\/cloudprovider\/provider\/equinixmetal\/.* decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-hetzner: "true" @@ -29,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -38,5 +37,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-gcp.yaml b/.prow/provider-gcp.yaml index c2788019a..d16d63ae7 100644 --- a/.prow/provider-gcp.yaml +++ b/.prow/provider-gcp.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-gce always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-gce: "true" @@ -29,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -38,5 +37,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-hetzner.yaml b/.prow/provider-hetzner.yaml index 6f52328b1..493507eec 100644 --- a/.prow/provider-hetzner.yaml +++ b/.prow/provider-hetzner.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-hetzner always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-hetzner: "true" @@ -27,7 +26,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -36,5 +35,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-kubevirt.yaml b/.prow/provider-kubevirt.yaml index 477f9ada2..29184045a 100644 --- a/.prow/provider-kubevirt.yaml +++ b/.prow/provider-kubevirt.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-kubevirt always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" max_concurrency: 1 labels: @@ -30,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -39,5 +38,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-linode.yaml b/.prow/provider-linode.yaml index f1b416667..2e3cf370f 100644 --- a/.prow/provider-linode.yaml +++ b/.prow/provider-linode.yaml @@ -17,7 +17,6 @@ presubmits: always_run: false optional: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-hetzner: "true" @@ -29,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -38,5 +37,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-nutanix.yaml b/.prow/provider-nutanix.yaml index 6b3c68fc0..a1e28f26b 100644 --- a/.prow/provider-nutanix.yaml +++ b/.prow/provider-nutanix.yaml @@ -19,7 +19,6 @@ presubmits: # TODO uncomment this when Nutanix is in a working condition #run_if_changed: "(pkg/cloudprovider/provider/nutanix/)" decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-hetzner: "true" @@ -31,7 +30,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -40,5 +39,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-openstack.yaml b/.prow/provider-openstack.yaml index 7fec6859a..a9205eeb4 100644 --- a/.prow/provider-openstack.yaml +++ b/.prow/provider-openstack.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-openstack always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-openstack: "true" @@ -29,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -38,13 +37,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-openstack-project-auth always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-openstack: "true" @@ -57,7 +57,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -66,5 +66,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-scaleway.yaml b/.prow/provider-scaleway.yaml index 3fe04bfc7..1a2f06e0c 100644 --- a/.prow/provider-scaleway.yaml +++ b/.prow/provider-scaleway.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-scaleway always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-scaleway: "true" @@ -28,7 +27,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -37,5 +36,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-vmware-cloud-director.yaml b/.prow/provider-vmware-cloud-director.yaml index 99704f8b6..32052adcd 100644 --- a/.prow/provider-vmware-cloud-director.yaml +++ b/.prow/provider-vmware-cloud-director.yaml @@ -16,7 +16,6 @@ presubmits: - name: pull-machine-controller-e2e-vmware-cloud-director always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" run_if_changed: "(pkg/cloudprovider/provider/vmwareclouddirector/|pkg/userdata)" labels: @@ -30,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -39,5 +38,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/provider-vsphere.yaml b/.prow/provider-vsphere.yaml index 10a9d9ed9..104d807c8 100644 --- a/.prow/provider-vsphere.yaml +++ b/.prow/provider-vsphere.yaml @@ -16,12 +16,11 @@ presubmits: - name: pull-machine-controller-e2e-vsphere always_run: true decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-hetzner: "true" preset-e2e-ssh: "true" - preset-vsphere: "true" + preset-vsphere-legacy: "true" preset-rhel: "true" preset-goproxy: "true" preset-kind-volume-mounts: "true" @@ -29,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -38,13 +37,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-vsphere-datastore-cluster always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-vsphere: "true" @@ -57,7 +57,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -66,13 +66,14 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-e2e-vsphere-resource-pool always_run: false decorate: true - error_on_eviction: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" labels: preset-vsphere: "true" @@ -85,7 +86,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-5 + - image: quay.io/kubermatic/build:go-1.18-node-16-kind-0.14-9 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -94,5 +95,7 @@ presubmits: privileged: true resources: requests: - memory: 1Gi - cpu: 500m + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi diff --git a/.prow/verify.yaml b/.prow/verify.yaml index dc256eec7..43de08569 100644 --- a/.prow/verify.yaml +++ b/.prow/verify.yaml @@ -21,7 +21,7 @@ presubmits: preset-goproxy: "true" spec: containers: - - image: golang:1.18.2 + - image: golang:1.18.3 command: - make args: @@ -29,7 +29,10 @@ presubmits: - all resources: requests: - cpu: 1 + memory: 6Gi + cpu: 2 + limits: + memory: 6Gi - name: pull-machine-controller-dependencies always_run: true @@ -39,14 +42,18 @@ presubmits: preset-goproxy: "true" spec: containers: - - image: golang:1.18.2 + - image: golang:1.18.3 command: - make args: - check-dependencies resources: requests: - cpu: 800m + memory: 32Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 250m - name: pull-machine-controller-lint always_run: true @@ -65,6 +72,8 @@ presubmits: requests: cpu: 800m memory: 6Gi + limits: + memory: 6Gi - name: pull-machine-controller-yamllint always_run: true @@ -81,7 +90,11 @@ presubmits: - "yamllint -c .yamllint.conf ." resources: requests: - cpu: 200m + memory: 32Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 250m - name: pre-machine-controller-verify-shfmt run_if_changed: "^hack/" @@ -122,8 +135,11 @@ presubmits: - "./hack/verify-boilerplate.sh" resources: requests: - memory: 64Mi - cpu: 100m + memory: 32Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 250m - name: pull-machine-controller-license-validation run_if_changed: "^go.(mod|sum)$" @@ -138,8 +154,11 @@ presubmits: - ./hack/verify-licenses.sh resources: requests: - memory: 512Mi - cpu: 1 + memory: 32Mi + cpu: 50m + limits: + memory: 256Mi + cpu: 250m - name: pull-machine-controller-test always_run: true @@ -149,7 +168,7 @@ presubmits: preset-goproxy: "true" spec: containers: - - image: golang:1.18.2 + - image: golang:1.18.3 command: - make args: @@ -157,4 +176,7 @@ presubmits: - test-unit resources: requests: - cpu: 800m + cpu: 3 + memory: 6Gi + limits: + memory: 6Gi diff --git a/Dockerfile b/Dockerfile index 9485e86db..1a835fe9c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG GO_VERSION=1.18.2 +ARG GO_VERSION=1.18.3 FROM docker.io/golang:${GO_VERSION} AS builder WORKDIR /go/src/github.com/kubermatic/machine-controller COPY . . RUN make all -FROM alpine:3.12 +FROM alpine:3.16 RUN apk add --no-cache ca-certificates cdrkit diff --git a/Makefile b/Makefile index 8cce2f7a5..bfb27d53b 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ SHELL = /bin/bash -eu -o pipefail -GO_VERSION ?= 1.18.2 +GO_VERSION ?= 1.18.3 GOOS ?= $(shell go env GOOS) @@ -95,8 +95,7 @@ test-unit-docker: .PHONY: test-unit test-unit: - @#The `-race` flag requires CGO - CGO_ENABLED=1 go test -v -race ./... + go test -v ./... .PHONY: build-tests build-tests: diff --git a/README.md b/README.md index 9674c1d0d..2627af999 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,6 @@ Currently supported K8S versions are: - 1.24 - 1.23 - 1.22 -- 1.21 ## What does not work diff --git a/cmd/machine-controller/main.go b/cmd/machine-controller/main.go index fe5a52183..1f4f8f9c1 100644 --- a/cmd/machine-controller/main.go +++ b/cmd/machine-controller/main.go @@ -152,7 +152,7 @@ func main() { flag.StringVar(&healthProbeAddress, "health-probe-address", "127.0.0.1:8085", "The address on which the liveness check on /healthz and readiness check on /readyz will be available") flag.StringVar(&metricsAddress, "metrics-address", "127.0.0.1:8080", "The address on which Prometheus metrics will be available under /metrics") flag.StringVar(&name, "name", "", "When set, the controller will only process machines with the label \"machine.k8s.io/controller\": name") - flag.StringVar(&joinClusterTimeout, "join-cluster-timeout", "", "when set, machines that have an owner and do not join the cluster within the configured duration will be deleted, so the owner re-creats them") + flag.StringVar(&joinClusterTimeout, "join-cluster-timeout", "", "when set, machines that have an owner and do not join the cluster within the configured duration will be deleted, so the owner re-creates them") flag.StringVar(&bootstrapTokenServiceAccountName, "bootstrap-token-service-account-name", "", "When set use the service account token from this SA as bootstrap token instead of creating a temporary one. Passed in namespace/name format") flag.BoolVar(&profiling, "enable-profiling", false, "when set, enables the endpoints on the http server under /debug/pprof/") flag.DurationVar(&skipEvictionAfter, "skip-eviction-after", 2*time.Hour, "Skips the eviction if a machine is not gone after the specified duration.") @@ -378,7 +378,7 @@ func (bs *controllerBootstrap) Start(ctx context.Context) error { } // Migrate providerConfig field to providerSpec field. - if err := migrations.MigrateProviderConfigToProviderSpecIfNecesary(ctx, bs.opt.cfg, client); err != nil { + if err := migrations.MigrateProviderConfigToProviderSpecIfNecessary(ctx, bs.opt.cfg, client); err != nil { return fmt.Errorf("migration of providerConfig field to providerSpec field failed: %w", err) } diff --git a/examples/machine-controller.yaml b/examples/machine-controller.yaml index fe5a3409e..b724ba5e8 100644 --- a/examples/machine-controller.yaml +++ b/examples/machine-controller.yaml @@ -218,6 +218,7 @@ spec: - -cluster-dns=10.10.10.10 - -metrics-address=0.0.0.0:8080 - -health-probe-address=0.0.0.0:8085 + - -use-osm=true # Machines that fail to join the cluster within this timeout and # are owned by a MachineSet will get deleted so the MachineSet # controller re-creates them @@ -260,6 +261,8 @@ spec: - /usr/local/bin/webhook - -logtostderr - -v=6 + - -use-osm=true + - -namespace=kube-system - -listen-address=0.0.0.0:9876 volumeMounts: - name: machine-controller-admission-cert @@ -566,6 +569,34 @@ subjects: name: machine-controller namespace: kube-system --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: machine-controller-webhook + namespace: kube-system +rules: + - apiGroups: + - operatingsystemmanager.k8c.io + resources: + - operatingsystemprofiles + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: machine-controller-webhook + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: machine-controller-webhook +subjects: + - kind: ServiceAccount + name: machine-controller + namespace: kube-system +--- apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: diff --git a/examples/openstack-machinedeployment.yaml b/examples/openstack-machinedeployment.yaml index cc7d4e23c..a5bc3eedc 100644 --- a/examples/openstack-machinedeployment.yaml +++ b/examples/openstack-machinedeployment.yaml @@ -129,7 +129,7 @@ spec: image: "Ubuntu 18.04 amd64" flavor: "m1.small" # UUID of the server group - # used to configure affinity or anti-affinity of the VM instaces relative to hypervisor + # used to configure affinity or anti-affinity of the VM instances relative to hypervisor serverGroup: "" securityGroups: - configMapKeyRef: diff --git a/examples/operating-system-manager.yaml b/examples/operating-system-manager.yaml new file mode 100644 index 000000000..150871828 --- /dev/null +++ b/examples/operating-system-manager.yaml @@ -0,0 +1,1412 @@ +# Source: https://github.com/kubermatic/operating-system-manager/tree/v0.5.0/deploy +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: operatingsystemconfigs.operatingsystemmanager.k8c.io +spec: + group: operatingsystemmanager.k8c.io + names: + kind: OperatingSystemConfig + listKind: OperatingSystemConfigList + plural: operatingsystemconfigs + shortNames: + - osc + singular: operatingsystemconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OperatingSystemConfig is the object that represents the OperatingSystemConfig + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + OperatingSystemConfigSpec represents the operating system + configuration spec. + properties: + bootstrapConfig: + description: + BootstrapConfig is used for initial configuration of + machine and to fetch the kubernetes secret that contains the provisioning + config. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains information + about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where the + file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + modules: + description: + CloudInitModules contains the supported cloud-init + modules + properties: + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. + items: + type: string + type: array + rh_subscription: + additionalProperties: + type: string + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org + type: object + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. + items: + type: string + type: array + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" + type: string + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object + type: object + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. + type: string + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. + properties: + content: + description: Content is the content of the drop-in. + type: string + name: + description: Name is the name of the drop-in. + type: string + required: + - content + - name + type: object + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name + type: object + type: array + userSSHKeys: + description: UserSSHKeys is a list of attached user ssh keys + items: + type: string + type: array + type: object + cloudProvider: + description: + CloudProvider represent the cloud provider that support + the given operating system version + properties: + name: + description: Name represents the name of the supported cloud provider + enum: + - aws + - azure + - digitalocean + - gce + - hetzner + - kubevirt + - linode + - nutanix + - openstack + - equinixmetal + - vsphere + - fake + - alibaba + - anexia + - scaleway + - baremetal + - external + - vmware-cloud-director + type: string + spec: + description: + Spec represents the os/image reference in the supported + cloud provider + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - name + type: object + osName: + description: "OSType represent the operating system name e.g: ubuntu" + enum: + - flatcar + - rhel + - centos + - ubuntu + - sles + - amzn2 + - rockylinux + type: string + osVersion: + description: OSVersion the version of the operating system + type: string + provisioningConfig: + description: + ProvisioningConfig is used for provisioning the worker + node. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains information + about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where the + file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + modules: + description: + CloudInitModules contains the supported cloud-init + modules + properties: + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. + items: + type: string + type: array + rh_subscription: + additionalProperties: + type: string + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org + type: object + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. + items: + type: string + type: array + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" + type: string + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object + type: object + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. + type: string + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. + properties: + content: + description: Content is the content of the drop-in. + type: string + name: + description: Name is the name of the drop-in. + type: string + required: + - content + - name + type: object + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name + type: object + type: array + userSSHKeys: + description: UserSSHKeys is a list of attached user ssh keys + items: + type: string + type: array + type: object + required: + - bootstrapConfig + - cloudProvider + - osName + - osVersion + - provisioningConfig + type: object + required: + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: operatingsystemprofiles.operatingsystemmanager.k8c.io +spec: + group: operatingsystemmanager.k8c.io + names: + kind: OperatingSystemProfile + listKind: OperatingSystemProfileList + plural: operatingsystemprofiles + shortNames: + - osp + singular: operatingsystemprofile + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OperatingSystemProfile is the object that represents the OperatingSystemProfile + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + OperatingSystemProfileSpec represents the operating system + configuration spec. + properties: + bootstrapConfig: + description: + BootstrapConfig is used for initial configuration of + machine and to fetch the kubernetes secret that contains the provisioning + config. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains information + about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where the + file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + modules: + description: + CloudInitModules field contains the optional cloud-init + modules which are supported by OSM + properties: + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. + items: + type: string + type: array + rh_subscription: + additionalProperties: + type: string + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org + type: object + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. + items: + type: string + type: array + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" + type: string + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object + type: object + supportedContainerRuntimes: + description: + SupportedContainerRuntimes represents the container + runtimes supported by the given OS + items: + description: + ContainerRuntimeSpec aggregates information about + a specific container runtime + properties: + files: + description: + Files to add to the main files list when the + containerRuntime is selected + items: + description: + File is a file that should get written to + the host's file system. The content can either be inlined + or referenced from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains + information about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding + (e.g. base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where + the file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. + Should be in decimal base and without any leading + zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + name: + description: Name of the Container runtime + enum: + - docker + - containerd + type: string + templates: + additionalProperties: + type: string + description: + Templates to add to the available templates + when the containerRuntime is selected + type: object + required: + - files + - name + type: object + type: array + templates: + additionalProperties: + type: string + description: Templates to be included in units and files + type: object + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. + type: string + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. + properties: + content: + description: Content is the content of the drop-in. + type: string + name: + description: Name is the name of the drop-in. + type: string + required: + - content + - name + type: object + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name + type: object + type: array + type: object + osName: + description: "OSType represent the operating system name e.g: ubuntu" + enum: + - flatcar + - rhel + - centos + - ubuntu + - sles + - amzn2 + - rockylinux + type: string + osVersion: + description: OSVersion the version of the operating system + type: string + provisioningConfig: + description: + ProvisioningConfig is used for provisioning the worker + node. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains information + about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where the + file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + modules: + description: + CloudInitModules field contains the optional cloud-init + modules which are supported by OSM + properties: + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. + items: + type: string + type: array + rh_subscription: + additionalProperties: + type: string + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org + type: object + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. + items: + type: string + type: array + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" + type: string + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object + type: object + supportedContainerRuntimes: + description: + SupportedContainerRuntimes represents the container + runtimes supported by the given OS + items: + description: + ContainerRuntimeSpec aggregates information about + a specific container runtime + properties: + files: + description: + Files to add to the main files list when the + containerRuntime is selected + items: + description: + File is a file that should get written to + the host's file system. The content can either be inlined + or referenced from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains + information about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding + (e.g. base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where + the file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. + Should be in decimal base and without any leading + zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + name: + description: Name of the Container runtime + enum: + - docker + - containerd + type: string + templates: + additionalProperties: + type: string + description: + Templates to add to the available templates + when the containerRuntime is selected + type: object + required: + - files + - name + type: object + type: array + templates: + additionalProperties: + type: string + description: Templates to be included in units and files + type: object + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. + type: string + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. + properties: + content: + description: Content is the content of the drop-in. + type: string + name: + description: Name is the name of the drop-in. + type: string + required: + - content + - name + type: object + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name + type: object + type: array + type: object + supportedCloudProviders: + description: + SupportedCloudProviders represent the cloud providers + that support the given operating system version + items: + description: + CloudProviderSpec contains the os/image reference for + a specific supported cloud provider + properties: + name: + description: + Name represents the name of the supported cloud + provider + enum: + - aws + - azure + - digitalocean + - gce + - hetzner + - kubevirt + - linode + - nutanix + - openstack + - equinixmetal + - vsphere + - fake + - alibaba + - anexia + - scaleway + - baremetal + - external + - vmware-cloud-director + type: string + spec: + description: + Spec represents the os/image reference in the supported + cloud provider + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - name + type: object + type: array + version: + description: Version is the version of the operating System Profile + pattern: v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + type: string + required: + - bootstrapConfig + - osName + - osVersion + - provisioningConfig + - supportedCloudProviders + - version + type: object + required: + - spec + type: object + served: true + storage: true +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: operating-system-manager-selfsigned-issuer + namespace: kube-system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: operating-system-manager-serving-cert + namespace: kube-system +spec: + dnsNames: + - "operating-system-manager-webhook.kube-system.svc" + - "operating-system-manager-webhook.kube-system.svc.cluster.local" + issuerRef: + kind: Issuer + name: operating-system-manager-selfsigned-issuer + secretName: webhook-server-cert +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cloud-init-settings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-init-getter + namespace: cloud-init-settings +--- +apiVersion: v1 +kind: Secret +type: kubernetes.io/service-account-token +metadata: + name: cloud-init-getter-token + namespace: cloud-init-settings + annotations: + kubernetes.io/service-account.name: "cloud-init-getter" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cloud-init-getter + namespace: cloud-init-settings +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloud-init-getter + namespace: cloud-init-settings +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cloud-init-getter +subjects: + - kind: ServiceAccount + name: cloud-init-getter + namespace: cloud-init-settings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: operating-system-manager-webhook + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + name: operating-system-manager-webhook + namespace: kube-system +spec: + ports: + - name: 443-9443 + port: 443 + protocol: TCP + targetPort: 9443 + selector: + app: operating-system-manager-webhook + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operating-system-manager-webhook + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: operating-system-manager-webhook + template: + metadata: + labels: + app: operating-system-manager-webhook + spec: + serviceAccountName: operating-system-manager-webhook + containers: + - image: quay.io/kubermatic/operating-system-manager:v0.6.0 + imagePullPolicy: IfNotPresent + name: webhook + command: + - /usr/local/bin/webhook + - -logtostderr + - -v=6 + - -namespace=kube-system + volumeMounts: + - name: operating-system-manager-admission-cert + mountPath: /tmp/k8s-webhook-server/serving-certs + readOnly: true + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + requests: + memory: 256Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 100m + volumes: + - name: operating-system-manager-admission-cert + secret: + defaultMode: 420 + secretName: webhook-server-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: operatingsystemmanager.k8c.io + annotations: + cert-manager.io/inject-ca-from: kube-system/operating-system-manager-serving-cert +webhooks: + - name: operatingsystemprofiles.operatingsystemmanager.k8c.io + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: ["v1", "v1beta1"] + rules: + - apiGroups: + - "operatingsystemmanager.k8c.io" + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - operatingsystemprofiles + clientConfig: + service: + namespace: kube-system + name: operating-system-manager-webhook + path: /operatingsystemprofile + - name: operatingsystemconfigs.operatingsystemmanager.k8c.io + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: ["v1", "v1beta1"] + rules: + - apiGroups: + - "operatingsystemmanager.k8c.io" + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - operatingsystemconfigs + clientConfig: + service: + namespace: kube-system + name: operating-system-manager-webhook + path: /operatingsystemconfig +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: operating-system-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: operating-system-manager + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - create + - update + - list + - get + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: operating-system-manager + namespace: cloud-init-settings +rules: + # Secrets access is required for managing provisioning configurations + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - create + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: operating-system-manager + namespace: kube-public +rules: + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-info + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: operating-system-manager + namespace: default +rules: + - apiGroups: + - "" + resources: + - endpoints + resourceNames: + - kubernetes + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: operating-system-manager + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operating-system-manager +subjects: + - kind: ServiceAccount + name: operating-system-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: operating-system-manager + namespace: cloud-init-settings +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operating-system-manager +subjects: + - kind: ServiceAccount + name: operating-system-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: operating-system-manager + namespace: kube-public +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operating-system-manager +subjects: + - kind: ServiceAccount + name: operating-system-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: operating-system-manager + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operating-system-manager +subjects: + - kind: ServiceAccount + name: operating-system-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:operating-system-manager +rules: + - apiGroups: + - operatingsystemmanager.k8c.io + resources: + - operatingsystemprofiles + - operatingsystemconfigs + verbs: + - "*" + - apiGroups: + - cluster.k8s.io + resources: + - machinedeployments + verbs: + - get + - list + - watch + - patch + - update + # Secrets and configmaps are needed for the bootstrap token creation and when a ref is used for a + # value in the machineSpec + - apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - list + - get + - watch + - apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:operating-system-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:operating-system-manager +subjects: + - kind: ServiceAccount + name: operating-system-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operating-system-manager + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: operating-system-manager + template: + metadata: + annotations: + "prometheus.io/scrape": "true" + "prometheus.io/port": "8080" + "prometheus.io/path": "/metrics" + labels: + app: operating-system-manager + spec: + serviceAccountName: operating-system-manager + containers: + - image: quay.io/kubermatic/operating-system-manager:v0.6.0 + imagePullPolicy: IfNotPresent + name: operating-system-manager + command: + - /usr/local/bin/osm-controller + - -logtostderr + - -v=5 + - -worker-count=5 + - -cluster-dns=10.10.10.10 + - -metrics-address=0.0.0.0:8080 + - -health-probe-address=0.0.0.0:8085 + - -namespace=kube-system + - -container-runtime=containerd + ports: + - containerPort: 8085 + livenessProbe: + httpGet: + path: /readyz + port: 8085 + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 8085 + periodSeconds: 5 + resources: + requests: + memory: 256Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 100m diff --git a/go.mod b/go.mod index b257202df..6ece1c6ae 100644 --- a/go.mod +++ b/go.mod @@ -36,20 +36,21 @@ require ( github.com/vmware/go-vcloud-director/v2 v2.15.0 github.com/vmware/govmomi v0.28.0 go.anx.io/go-anxcloud v0.4.4 - golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f - golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e + golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb gomodules.xyz/jsonpatch/v2 v2.2.0 google.golang.org/api v0.74.0 google.golang.org/grpc v1.45.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/yaml.v3 v3.0.1 - k8c.io/operating-system-manager v0.4.4 - k8s.io/api v0.24.0 - k8s.io/apiextensions-apiserver v0.24.0 - k8s.io/apimachinery v0.24.0 + // Please ensure that you update the image tags in `examples/operating-system-manager.yaml` as well. + k8c.io/operating-system-manager v0.6.0 + k8s.io/api v0.24.2 + k8s.io/apiextensions-apiserver v0.24.2 + k8s.io/apimachinery v0.24.2 k8s.io/client-go v12.0.0+incompatible k8s.io/klog v1.0.0 - k8s.io/kubelet v0.24.0 + k8s.io/kubelet v0.24.2 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 kubevirt.io/api v0.54.0 kubevirt.io/containerized-data-importer-api v1.50.0 @@ -69,8 +70,6 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 // indirect github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect @@ -82,27 +81,29 @@ require ( github.com/coreos/ignition v0.35.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/emicklei/go-restful v2.15.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/flatcar-linux/container-linux-config-transpiler v0.9.3 // indirect + github.com/flatcar-linux/ignition v0.36.1 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.21.1 // indirect github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.7 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/go-version v1.2.0 // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -120,21 +121,24 @@ require ( github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/peterhellberg/link v1.1.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.34.0 // indirect + github.com/prometheus/common v0.35.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/shopspring/decimal v1.3.1 // indirect - github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/vincent-petithory/dataurl v1.0.0 // indirect go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect go4.org v0.0.0-20201209231011-d4a079459e60 // indirect - golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect + golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -143,17 +147,15 @@ require ( gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.24.0 // indirect + k8s.io/component-base v0.24.2 // indirect k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220413171646-5e7f5fdc6da6 // indirect + k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect + sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect ) replace ( - github.com/packethost/packngo => github.com/packethost/packngo v0.1.1-0.20190410075950-a02c426e4888 - - k8s.io/client-go => k8s.io/client-go v0.24.0 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.0 + k8s.io/client-go => k8s.io/client-go v0.24.2 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.2 ) diff --git a/go.sum b/go.sum index 83230eabc..b138f343f 100644 --- a/go.sum +++ b/go.sum @@ -100,14 +100,13 @@ github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXn github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajeddeloh/go-json v0.0.0-20160803184958-73d058cf8437/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 h1:4SPQljF/GJ8Q+QlCWMWxRBepub4DresnOm4eI2ebFGc= github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd h1:NlKlOv3aVJ5ODMC0JWPvddw05KENkL3cZttIuu8kJRo= @@ -118,6 +117,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1645 h1:IEL/Da0Dtg9j/36UnzyxD84n0eDj0JIoTKTKobN2eks= @@ -137,11 +137,13 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.8.39/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.44.37 h1:KvDxCX6dfJeEDC77U5GPGSP0ErecmNnhDHFxw+NIvlI= github.com/aws/aws-sdk-go v1.44.37/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -151,6 +153,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/briandowns/spinner v1.8.0/go.mod h1://Zf9tMcxfRUA36V23M6YGEAv+kECGfvpnLTnb8n4XQ= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -187,10 +190,12 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.1.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -216,6 +221,7 @@ github.com/digitalocean/godo v1.81.0 h1:sjb3fOfPfSlUQUK22E87BcI8Zx2qtnF7VUCCO4UK github.com/digitalocean/godo v1.81.0/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= @@ -236,8 +242,9 @@ github.com/embik/nutanix-client-go v0.1.0 h1:yPcozUczE2a12RRD/mfk8CehhKPAJWVpisP github.com/embik/nutanix-client-go v0.1.0/go.mod h1:gkKNSxfEt3QtYG3S/wKiN8OmrJ4fpU7JbTlbnrMDOL8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -256,11 +263,17 @@ github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flatcar-linux/container-linux-config-transpiler v0.9.3 h1:0Leh4HX8Wpe/PYuNidytk6v+2mIFHybK50DWipiCnng= +github.com/flatcar-linux/container-linux-config-transpiler v0.9.3/go.mod h1:AGVTulMzeIKwurV9ExYH3UiokET1Ur65g+EIeRDMwzM= +github.com/flatcar-linux/ignition v0.36.1 h1:yNvS9sQvm9HJ8VgxXskx88DsF73qdF35ALJkbTwcYhY= +github.com/flatcar-linux/ignition v0.36.1/go.mod h1:0jS5n4AopgOdwgi7QDo5MFgkMx/fQUDYjuxlGJC1Txg= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -275,6 +288,7 @@ github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aev github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -300,8 +314,9 @@ github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUe github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -327,6 +342,7 @@ github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8z github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -384,8 +400,9 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -398,8 +415,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -496,13 +514,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -666,13 +685,14 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/packethost/packngo v0.1.1-0.20190410075950-a02c426e4888 h1:ARzSqjQJcSR9IXGMEkXN1IQEZChwd2MqXWm6YNGvO5o= -github.com/packethost/packngo v0.1.1-0.20190410075950-a02c426e4888/go.mod h1:RQHg5xR1F614BwJyepfMqrKN+32IH0i7yX+ey43rEeQ= +github.com/packethost/packngo v0.25.0 h1:ujGXL3lVqTiaQoX2/Go74lQAlYfTeop7jBNy5w99w2A= +github.com/packethost/packngo v0.25.0/go.mod h1:/UHguFdPs6Lf6FOkkSEPnRY5tgS0fsVM+Zv/bvBrmt0= github.com/packethost/pkg v0.0.0-20200903155310-0433e0605550/go.mod h1:GSv7cTtIjns4yc0pyajaM1RE/KE4djJONoblFIRDrxA= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -684,6 +704,7 @@ github.com/peterhellberg/link v1.1.0 h1:s2+RH8EGuI/mI4QwrWGSYQCRz7uNgip9BaM04HKu github.com/peterhellberg/link v1.1.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -720,8 +741,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE= -github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -762,6 +783,8 @@ github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sigma/bdoor v0.0.0-20160202064022-babf2a4017b0/go.mod h1:WBu7REWbxC/s/J06jsk//d+9DOz9BbsmcIrimuGRFbs= +github.com/sigma/vmw-guestinfo v0.0.0-20160204083807-95dd4126d6e8/go.mod h1:JrRFFC0veyh0cibh0DAhriSY7/gV3kDdNaVUOmfx01U= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -769,7 +792,9 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= @@ -779,8 +804,8 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.1-0.20200713175500-884edc58ad08/go.mod h1:yk5b0mALVusDL5fMM6Rd1wgnoO5jUPhwsQ6LQAJTidQ= @@ -828,7 +853,12 @@ github.com/vmware/go-vcloud-director/v2 v2.15.0 h1:idQ9NsHLr2dOSLBC8KIdBMq7XOvPi github.com/vmware/go-vcloud-director/v2 v2.15.0/go.mod h1:2BS1yw61VN34WI0/nUYoInFvBc3Zcuf84d4ESiAAl68= github.com/vmware/govmomi v0.28.0 h1:VgeQ/Rvz79U9G8QIKLdgpsN9AndHJL+5iMJLgYIrBGI= github.com/vmware/govmomi v0.28.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= +github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -885,13 +915,16 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -899,12 +932,13 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go4.org v0.0.0-20160314031811-03efcb870d84/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20201209231011-d4a079459e60 h1:iqAGo78tVOJXELHQFRjR6TMwItrvXH4hrGJ32I/NFF8= go4.org v0.0.0-20201209231011-d4a079459e60/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -915,6 +949,7 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -922,8 +957,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f h1:OeJjE6G4dgCY4PIXvIRQbE8+RX+uXZyGhUy/ksMGJoc= -golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -972,6 +1007,7 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1019,8 +1055,8 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1040,8 +1076,8 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1064,7 +1100,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1145,14 +1180,17 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c h1:aFV+BgZ4svzjfabn8ERpuB4JI4N6/rdy1iusx77G3oU= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20190321115727-fe223c5a2583/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1165,8 +1203,9 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1467,6 +1506,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= @@ -1481,23 +1521,23 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8c.io/operating-system-manager v0.4.4 h1:uFwZN1WPVQYmXTV0PzZ6jnk5bApY3GnJTsudLpiAQMs= -k8c.io/operating-system-manager v0.4.4/go.mod h1:yxUFYirh0ge8Hf5wUFGDdu7A0czc+2QVzWEWD0hXDs4= +k8c.io/operating-system-manager v0.6.0 h1:c+WJOV+BlW9NgSi7/QCNKCTXVwcW89s3PlWQDqQBRhA= +k8c.io/operating-system-manager v0.6.0/go.mod h1:8Q1xpjJomTG9X6lfx/y3+yGHCackHtqxuYEk0TIPMfA= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.24.0 h1:J0hann2hfxWr1hinZIDefw7Q96wmCBx6SSB8IY0MdDg= -k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= -k8s.io/apiextensions-apiserver v0.24.0 h1:JfgFqbA8gKJ/uDT++feAqk9jBIwNnL9YGdQvaI9DLtY= -k8s.io/apiextensions-apiserver v0.24.0/go.mod h1:iuVe4aEpe6827lvO6yWQVxiPSpPoSKVjkq+MIdg84cM= +k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= +k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= +k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= +k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ= -k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA= -k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U= -k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= +k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= +k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= +k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= +k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.0 h1:h5jieHZQoHrY/lHG+HyrSbJeyfuitheBvqvKwKHVC0g= -k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA= +k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= +k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= +k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= @@ -1511,10 +1551,10 @@ k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-openapi v0.0.0-20220413171646-5e7f5fdc6da6 h1:nBQrWPlrNIiw0BsX6a6MKr1itkm0ZS0Nl97kNLitFfI= -k8s.io/kube-openapi v0.0.0-20220413171646-5e7f5fdc6da6/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M= -k8s.io/kubelet v0.24.0 h1:fH+D6mSr4DGIeHp/O2+mCEJhkVq3Gpgv9BVOHI+GrWY= -k8s.io/kubelet v0.24.0/go.mod h1:p3BBacmHTCMpUf+nluhlyzuGHmONKAspqCvpu9oPAyA= +k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8 h1:IyQ1DifCBk589JD4Cm2CT2poIdO3lfPzz3WwVh1Ugf8= +k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8/go.mod h1:guXtiQW/y/AWAfPSOaI/1eY0TGBAmL5OygiIyUOKDRc= +k8s.io/kubelet v0.24.2 h1:VAvULig8RiylCtyxudgHV7nhKsLnNIrdVBCRD4bXQ3Y= +k8s.io/kubelet v0.24.2/go.mod h1:Xm9DkWQjwOs+uGOUIIGIPMvvmenvj0lDVOErvIKOOt0= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= @@ -1532,8 +1572,9 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lR sigs.k8s.io/controller-runtime v0.12.1 h1:4BJY01xe9zKQti8oRjj/NeHKRXthf1YkYJAgLONFFoI= sigs.k8s.io/controller-runtime v0.12.1/go.mod h1:BKhxlA4l7FPK4AQcsuL4X6vZeWnKDXez/vp1Y8dxTU0= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= diff --git a/hack/ci/download-gocache.sh b/hack/ci/download-gocache.sh index 4f5178079..650d67032 100755 --- a/hack/ci/download-gocache.sh +++ b/hack/ci/download-gocache.sh @@ -21,6 +21,8 @@ set -euo pipefail # receives a SIGINT set -o monitor +source $(dirname $0)/../lib.sh + # The gocache needs a matching go version to work, so append that to the name GO_VERSION="$(go version | awk '{ print $3 }' | sed 's/go//g')" @@ -34,7 +36,7 @@ exit_gracefully() { trap exit_gracefully EXIT if [ -z "${GOCACHE_MINIO_ADDRESS:-}" ]; then - echo "env var GOCACHE_MINIO_ADDRESS unset, can not download gocache" + echodate "env var GOCACHE_MINIO_ADDRESS unset, cannot download gocache" exit 0 fi @@ -53,25 +55,40 @@ if [[ -z "${CACHE_VERSION}" ]]; then GIT_BRANCH="master" fi -if [ -z "${PULL_NUMBER:-}" ]; then - # Special case: This is called in a Postubmit. Go one revision back, - # as there can't be a cache for the current revision - CACHE_VERSION="$(git rev-parse ${CACHE_VERSION}~1)" -fi - # normalize branch name to prevent accidental directories being created GIT_BRANCH="$(echo "$GIT_BRANCH" | sed 's#/#-#g')" ARCHIVE_NAME="${CACHE_VERSION}-${GO_VERSION}.tar" URL="${GOCACHE_MINIO_ADDRESS}/machine-controller/${GIT_BRANCH}/${ARCHIVE_NAME}" -# Do not go through the retry loop when there is nothing -if ! curl --head --silent --fail "${URL}" > /dev/null; then - echo "Remote has no gocache ${ARCHIVE_NAME}, exiting" +# Do not go through the retry loop when there is nothing, but do try the +# first few parents if no cache was found. This is helpful for retests happening +# quickly after something got merged to master and no gocache for the most +# recent commit exists yet. In this case, taking the previous commit's +# cache is better than nothing. This also helps for postsubmits, where the current +# commit (the one that got merged) cannot have a cache yet. +HAS_CACHE=false +for i in $(seq 1 5); do + # check if we have a cache for the given git revision + if curl --head --silent --fail "${URL}" > /dev/null; then + HAS_CACHE=true + break + fi + echodate "No gocache machine-controller/${GIT_BRANCH}/${ARCHIVE_NAME} available, trying previous commit as a fallback..." + + CACHE_VERSION="$(git rev-parse ${CACHE_VERSION}~1)" + ARCHIVE_NAME="${CACHE_VERSION}-${GO_VERSION}.tar" + URL="${GOCACHE_MINIO_ADDRESS}/machine-controller/${GIT_BRANCH}/${ARCHIVE_NAME}" +done +if ! $HAS_CACHE; then + echodate "Could not find any suitable gocaches, giving up." exit 0 fi -echo "Downloading and extracting gocache" -curl --fail --header "Content-Type: application/octet-stream" "${URL}" | tar -C $GOCACHE -xf - - -echo "Successfully fetched gocache into $GOCACHE" +echodate "Downloading and extracting gocache" +TEST_NAME="Download and extract gocache" +# Passing the Headers as space-separated literals doesn't seem to work +# in conjunction with the retry func, so we just put them in a file instead +echo 'Content-Type: application/octet-stream' > /tmp/headers +retry 5 curl --fail -H @/tmp/headers "${URL}" | tar -C $GOCACHE -xf - +echodate "Successfully fetched gocache into $GOCACHE" diff --git a/hack/ci/run-e2e-tests.sh b/hack/ci/run-e2e-tests.sh index ba4140807..95b4687aa 100755 --- a/hack/ci/run-e2e-tests.sh +++ b/hack/ci/run-e2e-tests.sh @@ -33,6 +33,7 @@ trap cleanup EXIT export GIT_HEAD_HASH="$(git rev-parse HEAD)" export MC_VERSION="${GIT_HEAD_HASH}" +export OPERATING_SYSTEM_MANAGER="${OPERATING_SYSTEM_MANAGER:-true}" TEST_NAME="Pre-warm Go build cache" echodate "Attempting to pre-warm Go build cache" diff --git a/hack/ci/setup-kind-cluster.sh b/hack/ci/setup-kind-cluster.sh index bd8587761..d03becb3a 100755 --- a/hack/ci/setup-kind-cluster.sh +++ b/hack/ci/setup-kind-cluster.sh @@ -191,7 +191,7 @@ if [ -z "${DISABLE_CLUSTER_EXPOSER:-}" ]; then TEST_NAME="Wait for cluster exposer" echodate "Waiting for cluster exposer to be running" - retry 5 curl -s --fail http://127.0.0.1:2047/metrics -o /dev/null + retry 10 curl -s --fail http://127.0.0.1:2047/metrics -o /dev/null echodate "Cluster exposer is running" echodate "Setting up iptables rules to make nodeports available" diff --git a/hack/ci/setup-machine-controller-in-kind.sh b/hack/ci/setup-machine-controller-in-kind.sh index 3ba2a8c87..d34d5b934 100755 --- a/hack/ci/setup-machine-controller-in-kind.sh +++ b/hack/ci/setup-machine-controller-in-kind.sh @@ -22,6 +22,7 @@ if [ -z "${KIND_CLUSTER_NAME:-}" ]; then fi export MC_VERSION="${MC_VERSION:-$(git rev-parse HEAD)}" +export OPERATING_SYSTEM_MANAGER="${OPERATING_SYSTEM_MANAGER:-true}" # Build the Docker image for machine-controller beforeDockerBuild=$(nowms) @@ -45,10 +46,37 @@ if [ ! -f machine-controller-deployed ]; then # This is required for running e2e tests in KIND url="-override-bootstrap-kubelet-apiserver=$MASTER_URL" sed -i "s;-node-csr-approver=true;$url;g" examples/machine-controller.yaml + + # Ensure that we update `use-osm` flag if OSM is disabled + if [[ "$OPERATING_SYSTEM_MANAGER" == "false" ]]; then + sed -i "s;-use-osm=true;-use-osm=false;g" examples/machine-controller.yaml + fi + make deploy touch machine-controller-deployed fi +if [[ "$OPERATING_SYSTEM_MANAGER" == "true" ]]; then + # cert-manager is required by OSM for generating TLS Certificates + echodate "Installing cert-manager" + ( + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.1/cert-manager.yaml + # Wait for cert-manager to be ready + kubectl -n cert-manager rollout status deploy/cert-manager + kubectl -n cert-manager rollout status deploy/cert-manager-cainjector + kubectl -n cert-manager rollout status deploy/cert-manager-webhook + ) + + echodate "Installing operating-system-manager" + ( + # This is required for running e2e tests in KIND + url="-override-bootstrap-kubelet-apiserver=$MASTER_URL" + sed -i "s;-container-runtime=containerd;$url;g" examples/operating-system-manager.yaml + sed -i -e 's/-worker-count=5/-worker-count=50/g' examples/operating-system-manager.yaml + kubectl apply -f examples/operating-system-manager.yaml + ) +fi + sleep 10 retry 10 check_all_deployments_ready kube-system diff --git a/hack/ci/upload-gocache.sh b/hack/ci/upload-gocache.sh index 2ff04c5ad..5db05684f 100755 --- a/hack/ci/upload-gocache.sh +++ b/hack/ci/upload-gocache.sh @@ -55,7 +55,7 @@ ARCHIVE_FILE="/tmp/${GIT_HEAD_HASH}.tar" # No compression because that needs quite a bit of CPU tar -C "$GOCACHE" -cf "$ARCHIVE_FILE" . -echo "Uploading gocache archive" +echo "Uploading gocache archive machine-controller/${GIT_BRANCH}/${GIT_HEAD_HASH}-${GO_VERSION}.tar" curl \ --fail \ --upload-file "${ARCHIVE_FILE}" \ diff --git a/hack/run-machine-controller.sh b/hack/run-machine-controller.sh index 69cc68125..7718af663 100755 --- a/hack/run-machine-controller.sh +++ b/hack/run-machine-controller.sh @@ -21,7 +21,7 @@ set -e MC_KUBECONFIG=${MC_KUBECONFIG:-$(dirname $0)/../.kubeconfig} # If you want to use the default kubeconfig `export MC_KUBECONFIG=$KUBECONFIG` -# `-use-osm` flag can be specified if https://github.com/kubermatic/operating-system-manager is used to manage user data. +# `-use-osm` flag can be removed to use legacy userdata that is generated by machine-controller. make -C $(dirname $0)/.. build-machine-controller $(dirname $0)/../machine-controller \ @@ -29,8 +29,9 @@ $(dirname $0)/../machine-controller \ -worker-count=50 \ -logtostderr \ -v=6 \ - -cluster-dns=172.16.0.10 \ + -cluster-dns=169.254.20.10 \ -enable-profiling \ -metrics-address=0.0.0.0:8080 \ -health-probe-address=0.0.0.0:8085 \ + -use-osm=true \ -node-container-runtime=containerd diff --git a/hack/update-fixtures.sh b/hack/update-fixtures.sh index a85226f41..f91ab4a2f 100755 --- a/hack/update-fixtures.sh +++ b/hack/update-fixtures.sh @@ -19,7 +19,7 @@ set -euo pipefail cd $(dirname $0)/.. source hack/lib.sh -CONTAINERIZE_IMAGE=golang:1.18.2 containerize ./hack/update-fixtures.sh +CONTAINERIZE_IMAGE=golang:1.18.3 containerize ./hack/update-fixtures.sh go test ./... -v -update || go test ./... diff --git a/pkg/admission/machinedeployments_validation.go b/pkg/admission/machinedeployments_validation.go index 57442b589..e7b13c2b5 100644 --- a/pkg/admission/machinedeployments_validation.go +++ b/pkg/admission/machinedeployments_validation.go @@ -157,7 +157,8 @@ func ensureOSPAnnotation(md *v1alpha1.MachineDeployment, providerConfig provider // Annotation not specified, populate default OSP annotation switch providerConfig.OperatingSystem { case providerconfigtypes.OperatingSystemUbuntu, providerconfigtypes.OperatingSystemCentOS, providerconfigtypes.OperatingSystemFlatcar, - providerconfigtypes.OperatingSystemAmazonLinux2: + providerconfigtypes.OperatingSystemAmazonLinux2, providerconfigtypes.OperatingSystemRockyLinux, providerconfigtypes.OperatingSystemSLES, + providerconfigtypes.OperatingSystemRHEL: md.Annotations[osmresources.MachineDeploymentOSPAnnotation] = fmt.Sprintf(ospNamePattern, providerConfig.OperatingSystem) return nil diff --git a/pkg/admission/machines.go b/pkg/admission/machines.go index 295ebb135..21e4f0111 100644 --- a/pkg/admission/machines.go +++ b/pkg/admission/machines.go @@ -27,6 +27,7 @@ import ( "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" "github.com/kubermatic/machine-controller/pkg/cloudprovider" + controllerutil "github.com/kubermatic/machine-controller/pkg/controller/util" "github.com/kubermatic/machine-controller/pkg/providerconfig" providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" @@ -97,6 +98,14 @@ func (ad *admissionData) mutateMachines(ctx context.Context, ar admissionv1.Admi common.SetOSLabel(&machine.Spec, string(providerConfig.OperatingSystem)) } + // Set LegacyMachineControllerUserDataLabel to false if OSM was used for managing the machine configuration. + if ad.useOSM { + if machine.Labels == nil { + machine.Labels = make(map[string]string) + } + machine.Labels[controllerutil.LegacyMachineControllerUserDataLabel] = "false" + } + return createAdmissionResponse(machineOriginal, &machine) } @@ -160,6 +169,7 @@ func (ad *admissionData) defaultAndValidateMachineSpec(ctx context.Context, spec providerConfig.OperatingSystem, providerConfig.CloudProvider, providerConfig.OperatingSystemSpec, + ad.useOSM, ) if err != nil { return err diff --git a/pkg/apis/cluster/v1alpha1/migrations/migrations.go b/pkg/apis/cluster/v1alpha1/migrations/migrations.go index 0d1f712fa..284bfa18a 100644 --- a/pkg/apis/cluster/v1alpha1/migrations/migrations.go +++ b/pkg/apis/cluster/v1alpha1/migrations/migrations.go @@ -51,7 +51,7 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func MigrateProviderConfigToProviderSpecIfNecesary(ctx context.Context, config *restclient.Config, client ctrlruntimeclient.Client) error { +func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config *restclient.Config, client ctrlruntimeclient.Client) error { klog.Infof("Starting to migrate providerConfigs to providerSpecs") dynamicClient, err := dynamicclient.NewForConfig(config) if err != nil { diff --git a/pkg/cloudprovider/instance/instance.go b/pkg/cloudprovider/instance/instance.go index 64299ada4..f97c327a9 100644 --- a/pkg/cloudprovider/instance/instance.go +++ b/pkg/cloudprovider/instance/instance.go @@ -24,6 +24,8 @@ type Instance interface { Name() string // ID returns the instance identifier. ID() string + // ProviderID returns the expected providerID for the instance + ProviderID() string // Addresses returns a list of addresses associated with the instance. Addresses() map[string]v1.NodeAddressType // Status returns the instance status. diff --git a/pkg/cloudprovider/provider/alibaba/provider.go b/pkg/cloudprovider/provider/alibaba/provider.go index 64cd3339a..3b4f773f9 100644 --- a/pkg/cloudprovider/provider/alibaba/provider.go +++ b/pkg/cloudprovider/provider/alibaba/provider.go @@ -86,6 +86,11 @@ func (a *alibabaInstance) ID() string { return a.instance.InstanceId } +// TODO: Implement once we start supporting Alibaba CCM. +func (a *alibabaInstance) ProviderID() string { + return "" +} + func (a *alibabaInstance) Addresses() map[string]v1.NodeAddressType { primaryIPAddresses := map[string]v1.NodeAddressType{} for _, networkInterface := range a.instance.NetworkInterfaces.NetworkInterface { diff --git a/pkg/cloudprovider/provider/anexia/instance.go b/pkg/cloudprovider/provider/anexia/instance.go index 80c404d38..c8cf6400c 100644 --- a/pkg/cloudprovider/provider/anexia/instance.go +++ b/pkg/cloudprovider/provider/anexia/instance.go @@ -45,6 +45,11 @@ func (ai *anexiaInstance) ID() string { return ai.info.Identifier } +// TODO(xmudrii): Implement this. +func (ai *anexiaInstance) ProviderID() string { + return "" +} + func (ai *anexiaInstance) Addresses() map[string]v1.NodeAddressType { addresses := map[string]v1.NodeAddressType{} diff --git a/pkg/cloudprovider/provider/anexia/provider.go b/pkg/cloudprovider/provider/anexia/provider.go index 7d99885e2..7d451e84b 100644 --- a/pkg/cloudprovider/provider/anexia/provider.go +++ b/pkg/cloudprovider/provider/anexia/provider.go @@ -228,7 +228,7 @@ func getIPAddress(ctx context.Context, client anxclient.Client) (string, error) klog.Info("reusing already provisioned ip", "IP", status.ReservedIP) return status.ReservedIP, nil } - klog.Info(fmt.Sprintf("Creating a new IP for machine ''%s", reconcileContext.Machine.Name)) + klog.Info(fmt.Sprintf("Creating a new IP for machine %q", reconcileContext.Machine.Name)) addrAPI := anxaddr.NewAPI(client) config := reconcileContext.Config res, err := addrAPI.ReserveRandom(ctx, anxaddr.ReserveRandom{ diff --git a/pkg/cloudprovider/provider/anexia/provider_test.go b/pkg/cloudprovider/provider/anexia/provider_test.go index d03feda8a..8592f5764 100644 --- a/pkg/cloudprovider/provider/anexia/provider_test.go +++ b/pkg/cloudprovider/provider/anexia/provider_test.go @@ -79,7 +79,7 @@ func TestAnexiaProvider(t *testing.T) { } if providerStatus.InstanceID != TestIdentifier { - t.Errorf("Excpected InstanceID to be set") + t.Error("Expected InstanceID to be set") } }) diff --git a/pkg/cloudprovider/provider/anexia/types/errors.go b/pkg/cloudprovider/provider/anexia/types/errors.go index 28d3dd7bb..65f7ab6d2 100644 --- a/pkg/cloudprovider/provider/anexia/types/errors.go +++ b/pkg/cloudprovider/provider/anexia/types/errors.go @@ -29,7 +29,7 @@ func (r MultiErrors) Error() string { for i, err := range r { errString[i] = fmt.Sprintf("Error %d: %s", i, err) } - return fmt.Sprintf("Multiple errors occoured:\n%s", strings.Join(errString, "\n")) + return fmt.Sprintf("Multiple errors occurred:\n%s", strings.Join(errString, "\n")) } func NewMultiError(errs ...error) error { diff --git a/pkg/cloudprovider/provider/aws/provider.go b/pkg/cloudprovider/provider/aws/provider.go index a6436caef..ee28c0570 100644 --- a/pkg/cloudprovider/provider/aws/provider.go +++ b/pkg/cloudprovider/provider/aws/provider.go @@ -1004,6 +1004,17 @@ func (d *awsInstance) ID() string { return aws.StringValue(d.instance.InstanceId) } +func (d *awsInstance) ProviderID() string { + if d.instance.InstanceId == nil { + return "" + } + if d.instance.Placement.AvailabilityZone == nil { + return "aws:///" + *d.instance.InstanceId + } + + return "aws:///" + *d.instance.Placement.AvailabilityZone + "/" + *d.instance.InstanceId +} + func (d *awsInstance) Addresses() map[string]v1.NodeAddressType { addresses := map[string]v1.NodeAddressType{ aws.StringValue(d.instance.PublicIpAddress): v1.NodeExternalIP, @@ -1126,7 +1137,7 @@ func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) e } type ec2Credentials struct { - acccessKeyID string + accessKeyID string secretAccessKey string region string assumeRoleARN string @@ -1144,7 +1155,7 @@ func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) e // Very simple and very stupid machineEc2Credentials[fmt.Sprintf("%s/%s/%s/%s/%s", config.AccessKeyID, config.SecretAccessKey, config.Region, config.AssumeRoleARN, config.AssumeRoleExternalID)] = ec2Credentials{ - acccessKeyID: config.AccessKeyID, + accessKeyID: config.AccessKeyID, secretAccessKey: config.SecretAccessKey, region: config.Region, assumeRoleARN: config.AssumeRoleARN, @@ -1154,7 +1165,7 @@ func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) e allReservations := []*ec2.Reservation{} for _, cred := range machineEc2Credentials { - ec2Client, err := getEC2client(cred.acccessKeyID, cred.secretAccessKey, cred.region, cred.assumeRoleARN, cred.assumeRoleExternalID) + ec2Client, err := getEC2client(cred.accessKeyID, cred.secretAccessKey, cred.region, cred.assumeRoleARN, cred.assumeRoleExternalID) if err != nil { machineErrors = append(machineErrors, fmt.Errorf("failed to get EC2 client: %w", err)) continue @@ -1169,7 +1180,7 @@ func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) e for _, machine := range machines.Items { metricInstancesForMachines.WithLabelValues(fmt.Sprintf("%s/%s", machine.Namespace, machine.Name)).Set( - getIntanceCountForMachine(machine, allReservations)) + getInstanceCountForMachine(machine, allReservations)) } if len(machineErrors) > 0 { @@ -1179,7 +1190,7 @@ func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) e return nil } -func getIntanceCountForMachine(machine clusterv1alpha1.Machine, reservations []*ec2.Reservation) float64 { +func getInstanceCountForMachine(machine clusterv1alpha1.Machine, reservations []*ec2.Reservation) float64 { var count float64 for _, reservation := range reservations { for _, i := range reservation.Instances { diff --git a/pkg/cloudprovider/provider/azure/create_delete_resources.go b/pkg/cloudprovider/provider/azure/create_delete_resources.go index 90ddce7ea..68931e11a 100644 --- a/pkg/cloudprovider/provider/azure/create_delete_resources.go +++ b/pkg/cloudprovider/provider/azure/create_delete_resources.go @@ -319,7 +319,7 @@ func getVirtualNetwork(ctx context.Context, c *config) (network.VirtualNetwork, return virtualNetworksClient.Get(ctx, c.VNetResourceGroup, c.VNetName, "") } -func createOrUpdateNetworkInterface(ctx context.Context, ifName string, machineUID types.UID, config *config, publicIP, publicIPv6 *network.PublicIPAddress, ipFamily util.IPFamily) (*network.Interface, error) { +func createOrUpdateNetworkInterface(ctx context.Context, ifName string, machineUID types.UID, config *config, publicIP, publicIPv6 *network.PublicIPAddress, ipFamily util.IPFamily, enableAcceleratedNetworking *bool) (*network.Interface, error) { ifClient, err := getInterfacesClient(config) if err != nil { return nil, fmt.Errorf("failed to create interfaces client: %w", err) @@ -362,6 +362,8 @@ func createOrUpdateNetworkInterface(ctx context.Context, ifName string, machineU }) } + ifSpec.InterfacePropertiesFormat.EnableAcceleratedNetworking = enableAcceleratedNetworking + if config.SecurityGroupName != "" { authorizer, err := auth.NewClientCredentialsConfig(config.ClientID, config.ClientSecret, config.TenantID).Authorizer() if err != nil { diff --git a/pkg/cloudprovider/provider/azure/provider.go b/pkg/cloudprovider/provider/azure/provider.go index b5b8e7003..ecf60fc54 100644 --- a/pkg/cloudprovider/provider/azure/provider.go +++ b/pkg/cloudprovider/provider/azure/provider.go @@ -49,9 +49,10 @@ import ( ) const ( - CapabilityPremiumIO = "PremiumIO" - CapabilityUltraSSD = "UltraSSDAvailable" - CapabilityValueTrue = "True" + CapabilityPremiumIO = "PremiumIO" + CapabilityUltraSSD = "UltraSSDAvailable" + CapabilityValueTrue = "True" + capabilityAcceleratedNetworking = "AcceleratedNetworkingEnabled" machineUIDTag = "Machine-UID" @@ -100,8 +101,9 @@ type config struct { DataDiskSize int32 DataDiskSKU *compute.StorageAccountTypes - AssignPublicIP bool - Tags map[string]string + AssignPublicIP bool + EnableAcceleratedNetworking *bool + Tags map[string]string } type azureVM struct { @@ -122,6 +124,14 @@ func (vm *azureVM) Name() string { return *vm.vm.Name } +func (vm *azureVM) ProviderID() string { + if vm.vm.ID == nil { + return "" + } + + return "azure://" + *vm.vm.ID +} + func (vm *azureVM) Status() instance.Status { return vm.status } @@ -316,6 +326,7 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*config, *p } c.AssignAvailabilitySet = rawCfg.AssignAvailabilitySet + c.EnableAcceleratedNetworking = rawCfg.EnableAcceleratedNetworking c.AvailabilitySet, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.AvailabilitySet) if err != nil { @@ -612,7 +623,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return nil, err } - iface, err := createOrUpdateNetworkInterface(ctx, ifaceName(machine), machine.UID, config, publicIP, publicIPv6, ipFamily) + iface, err := createOrUpdateNetworkInterface(ctx, ifaceName(machine), machine.UID, config, publicIP, publicIPv6, ipFamily, config.EnableAcceleratedNetworking) if err != nil { return nil, fmt.Errorf("failed to generate main network interface: %w", err) } @@ -935,13 +946,8 @@ func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config stri return s, "azure", nil } -func validateDiskSKUs(ctx context.Context, c *config) error { +func validateDiskSKUs(ctx context.Context, c *config, sku compute.ResourceSku) error { if c.OSDiskSKU != nil || c.DataDiskSKU != nil { - sku, err := getSKU(ctx, c) - if err != nil { - return fmt.Errorf("failed to get VM SKU: %w", err) - } - if c.OSDiskSKU != nil { if _, ok := osDiskSKUs[*c.OSDiskSKU]; !ok { return fmt.Errorf("invalid OS disk SKU '%s'", *c.OSDiskSKU) @@ -972,6 +978,15 @@ func validateDiskSKUs(ctx context.Context, c *config) error { return nil } +func validateSKUCapabilities(ctx context.Context, c *config, sku compute.ResourceSku) error { + if c.EnableAcceleratedNetworking != nil && *c.EnableAcceleratedNetworking { + if !SKUHasCapability(sku, capabilityAcceleratedNetworking) { + return fmt.Errorf("VM size %q does not support accelerated networking", c.VMSize) + } + } + return nil +} + func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { c, providerConfig, err := p.getConfig(spec.ProviderSpec) if err != nil { @@ -1039,10 +1054,19 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe return fmt.Errorf("failed to get subnet: %w", err) } - if err := validateDiskSKUs(ctx, c); err != nil { + sku, err := getSKU(ctx, c) + if err != nil { + return fmt.Errorf("failed to get VM SKU: %w", err) + } + + if err := validateDiskSKUs(ctx, c, sku); err != nil { return fmt.Errorf("failed to validate disk SKUs: %w", err) } + if err := validateSKUCapabilities(ctx, c, sku); err != nil { + return fmt.Errorf("failed to validate SKU capabilities: %w", err) + } + _, err = getOSImageReference(c, providerConfig.OperatingSystem) return err } @@ -1092,7 +1116,7 @@ func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Mach } if kuberneteshelper.HasFinalizer(machine, finalizerNIC) { - _, err = createOrUpdateNetworkInterface(ctx, ifaceName(machine), newUID, config, publicIP, publicIPv6, util.Unspecified) + _, err = createOrUpdateNetworkInterface(ctx, ifaceName(machine), newUID, config, publicIP, publicIPv6, util.Unspecified, config.EnableAcceleratedNetworking) if err != nil { return fmt.Errorf("failed to update UID on main network interface: %w", err) } @@ -1239,3 +1263,14 @@ func supportsDiskSKU(vmSKU compute.ResourceSku, diskSKU compute.StorageAccountTy return nil } + +func SKUHasCapability(sku compute.ResourceSku, name string) bool { + if sku.Capabilities != nil { + for _, capability := range *sku.Capabilities { + if capability.Name != nil && *capability.Name == name && *capability.Value == CapabilityValueTrue { + return true + } + } + } + return false +} diff --git a/pkg/cloudprovider/provider/azure/types/types.go b/pkg/cloudprovider/provider/azure/types/types.go index de6de2166..f5705b60c 100644 --- a/pkg/cloudprovider/provider/azure/types/types.go +++ b/pkg/cloudprovider/provider/azure/types/types.go @@ -28,20 +28,21 @@ type RawConfig struct { ClientID providerconfigtypes.ConfigVarString `json:"clientID,omitempty"` ClientSecret providerconfigtypes.ConfigVarString `json:"clientSecret,omitempty"` - Location providerconfigtypes.ConfigVarString `json:"location"` - ResourceGroup providerconfigtypes.ConfigVarString `json:"resourceGroup"` - VNetResourceGroup providerconfigtypes.ConfigVarString `json:"vnetResourceGroup"` - VMSize providerconfigtypes.ConfigVarString `json:"vmSize"` - VNetName providerconfigtypes.ConfigVarString `json:"vnetName"` - SubnetName providerconfigtypes.ConfigVarString `json:"subnetName"` - LoadBalancerSku providerconfigtypes.ConfigVarString `json:"loadBalancerSku"` - RouteTableName providerconfigtypes.ConfigVarString `json:"routeTableName"` - AvailabilitySet providerconfigtypes.ConfigVarString `json:"availabilitySet"` - AssignAvailabilitySet *bool `json:"assignAvailabilitySet"` - SecurityGroupName providerconfigtypes.ConfigVarString `json:"securityGroupName"` - Zones []string `json:"zones"` - ImagePlan *ImagePlan `json:"imagePlan,omitempty"` - ImageReference *ImageReference `json:"imageReference,omitempty"` + Location providerconfigtypes.ConfigVarString `json:"location"` + ResourceGroup providerconfigtypes.ConfigVarString `json:"resourceGroup"` + VNetResourceGroup providerconfigtypes.ConfigVarString `json:"vnetResourceGroup"` + VMSize providerconfigtypes.ConfigVarString `json:"vmSize"` + VNetName providerconfigtypes.ConfigVarString `json:"vnetName"` + SubnetName providerconfigtypes.ConfigVarString `json:"subnetName"` + LoadBalancerSku providerconfigtypes.ConfigVarString `json:"loadBalancerSku"` + RouteTableName providerconfigtypes.ConfigVarString `json:"routeTableName"` + AvailabilitySet providerconfigtypes.ConfigVarString `json:"availabilitySet"` + AssignAvailabilitySet *bool `json:"assignAvailabilitySet"` + SecurityGroupName providerconfigtypes.ConfigVarString `json:"securityGroupName"` + Zones []string `json:"zones"` + ImagePlan *ImagePlan `json:"imagePlan,omitempty"` + ImageReference *ImageReference `json:"imageReference,omitempty"` + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking"` ImageID providerconfigtypes.ConfigVarString `json:"imageID"` OSDiskSize int32 `json:"osDiskSize"` diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go index 356593248..8c08ff8c4 100644 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go +++ b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go @@ -172,7 +172,7 @@ func (d *driver) ProvisionServer(ctx context.Context, uid types.UID, cfg *plugin } if _, err := d.workflowClient.Create(ctx, workflowTemplate.Id, hw.GetID()); err != nil { - return nil, fmt.Errorf("failed to provisioing server id %s running template id %s: %w", workflowTemplate.Id, hw.GetID(), err) + return nil, fmt.Errorf("failed to provision server id %s running template id %s: %w", workflowTemplate.Id, hw.GetID(), err) } return &hw, nil diff --git a/pkg/cloudprovider/provider/baremetal/provider.go b/pkg/cloudprovider/provider/baremetal/provider.go index e62e757ff..1203cb786 100644 --- a/pkg/cloudprovider/provider/baremetal/provider.go +++ b/pkg/cloudprovider/provider/baremetal/provider.go @@ -53,6 +53,11 @@ func (b bareMetalServer) ID() string { return b.server.GetID() } +// TODO: Tinkerbell doesn't have a CCM. +func (b bareMetalServer) ProviderID() string { + return "" +} + func (b bareMetalServer) Addresses() map[string]corev1.NodeAddressType { return map[string]corev1.NodeAddressType{ b.server.GetIPAddress(): corev1.NodeInternalIP, diff --git a/pkg/cloudprovider/provider/digitalocean/provider.go b/pkg/cloudprovider/provider/digitalocean/provider.go index 1c5ba1f58..fbb121f6f 100644 --- a/pkg/cloudprovider/provider/digitalocean/provider.go +++ b/pkg/cloudprovider/provider/digitalocean/provider.go @@ -503,6 +503,10 @@ func (d *doInstance) ID() string { return strconv.Itoa(d.droplet.ID) } +func (d *doInstance) ProviderID() string { + return fmt.Sprintf("digitalocean://%d", d.droplet.ID) +} + func (d *doInstance) Addresses() map[string]v1.NodeAddressType { addresses := map[string]v1.NodeAddressType{} for _, n := range d.droplet.Networks.V4 { diff --git a/pkg/cloudprovider/provider/equinixmetal/provider.go b/pkg/cloudprovider/provider/equinixmetal/provider.go index 94ac01b82..0fd67b6c8 100644 --- a/pkg/cloudprovider/provider/equinixmetal/provider.go +++ b/pkg/cloudprovider/provider/equinixmetal/provider.go @@ -56,6 +56,7 @@ type Config struct { ProjectID string BillingCycle string InstanceType string + Metro string Facilities []string Tags []string } @@ -140,6 +141,10 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *e } c.Facilities = append(c.Facilities, facilityValue) } + c.Metro, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Metro) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get the value of \"metro\" field, error = %w", err) + } // ensure we have defaults c.populateDefaults() @@ -187,18 +192,38 @@ func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) client := getClient(c.Token) - if len(c.Facilities) == 0 || c.Facilities[0] == "" { - return fmt.Errorf("must have at least one non-blank facility") + if c.Metro == "" && (len(c.Facilities) == 0 || c.Facilities[0] == "") { + return fmt.Errorf("must have at least one non-blank facility or a metro") } - // get all valid facilities - facilities, _, err := client.Facilities.List(nil) - if err != nil { - return fmt.Errorf("failed to list facilities: %w", err) + if c.Facilities != nil && (len(c.Facilities) > 0 || c.Facilities[0] != "") { + // get all valid facilities + facilities, _, err := client.Facilities.List(nil) + if err != nil { + return fmt.Errorf("failed to list facilities: %w", err) + } + // ensure our requested facilities are in those facilities + if missingFacilities := itemsNotInList(facilityProp(facilities, "Code"), c.Facilities); len(missingFacilities) > 0 { + return fmt.Errorf("unknown facilities: %s", strings.Join(missingFacilities, ",")) + } } - // ensure our requested facilities are in those facilities - if missingFacilities := itemsNotInList(facilityProp(facilities, "Code"), c.Facilities); len(missingFacilities) > 0 { - return fmt.Errorf("unknown facilities: %s", strings.Join(missingFacilities, ",")) + + if c.Metro != "" { + metros, _, err := client.Metros.List(nil) + if err != nil { + return fmt.Errorf("failed to list metros: %w", err) + } + + var metroExists bool + for _, metro := range metros { + if strings.EqualFold(metro.Code, c.Metro) { + metroExists = true + } + } + + if !metroExists { + return fmt.Errorf("unknown metro: %s", c.Metro) + } } // get all valid plans a.k.a. instance types @@ -239,6 +264,7 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d UserData: userdata, ProjectID: c.ProjectID, Facility: c.Facilities, + Metro: c.Metro, BillingCycle: c.BillingCycle, Plan: c.InstanceType, OS: imageName, @@ -273,7 +299,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } client := getClient(c.Token) - res, err := client.Devices.Delete(instance.(*metalDevice).device.ID) + res, err := client.Devices.Delete(instance.(*metalDevice).device.ID, false) if err != nil { return false, metalErrorToTerminalError(err, res, "failed to delete the server") } @@ -373,6 +399,10 @@ func (s *metalDevice) ID() string { return s.device.ID } +func (s *metalDevice) ProviderID() string { + return "equinixmetal://" + s.device.ID +} + func (s *metalDevice) Addresses() map[string]v1.NodeAddressType { // returns addresses in CIDR format addresses := map[string]v1.NodeAddressType{} @@ -446,6 +476,8 @@ func getNameForOS(os providerconfigtypes.OperatingSystem) (string, error) { return "centos_7", nil case providerconfigtypes.OperatingSystemFlatcar: return "flatcar_stable", nil + case providerconfigtypes.OperatingSystemRockyLinux: + return "rocky_8", nil } return "", providerconfigtypes.ErrOSNotSupported } diff --git a/pkg/cloudprovider/provider/equinixmetal/types/types.go b/pkg/cloudprovider/provider/equinixmetal/types/types.go index 676c0f14a..b34625af0 100644 --- a/pkg/cloudprovider/provider/equinixmetal/types/types.go +++ b/pkg/cloudprovider/provider/equinixmetal/types/types.go @@ -26,7 +26,8 @@ type RawConfig struct { ProjectID providerconfigtypes.ConfigVarString `json:"projectID,omitempty"` BillingCycle providerconfigtypes.ConfigVarString `json:"billingCycle"` InstanceType providerconfigtypes.ConfigVarString `json:"instanceType"` - Facilities []providerconfigtypes.ConfigVarString `json:"facilities"` + Metro providerconfigtypes.ConfigVarString `json:"metro,omitempty"` + Facilities []providerconfigtypes.ConfigVarString `json:"facilities,omitempty"` Tags []providerconfigtypes.ConfigVarString `json:"tags,omitempty"` } diff --git a/pkg/cloudprovider/provider/fake/provider.go b/pkg/cloudprovider/provider/fake/provider.go index b3d116d59..c1d0c7c56 100644 --- a/pkg/cloudprovider/provider/fake/provider.go +++ b/pkg/cloudprovider/provider/fake/provider.go @@ -48,6 +48,10 @@ func (f CloudProviderInstance) ID() string { return "" } +func (f CloudProviderInstance) ProviderID() string { + return "" +} + func (f CloudProviderInstance) Addresses() map[string]corev1.NodeAddressType { return nil } diff --git a/pkg/cloudprovider/provider/gce/instance.go b/pkg/cloudprovider/provider/gce/instance.go index f53967ae4..1d61d4bae 100644 --- a/pkg/cloudprovider/provider/gce/instance.go +++ b/pkg/cloudprovider/provider/gce/instance.go @@ -60,6 +60,10 @@ func (gi *googleInstance) ID() string { return strconv.FormatUint(gi.ci.Id, 10) } +func (gi *googleInstance) ProviderID() string { + return fmt.Sprintf("gce://%s/%s/%s", gi.projectID, gi.zone, gi.ci.Name) +} + // Addresses implements instance.Instance. func (gi *googleInstance) Addresses() map[string]v1.NodeAddressType { addrs := map[string]v1.NodeAddressType{} diff --git a/pkg/cloudprovider/provider/hetzner/provider.go b/pkg/cloudprovider/provider/hetzner/provider.go index 076514949..81dcb476d 100644 --- a/pkg/cloudprovider/provider/hetzner/provider.go +++ b/pkg/cloudprovider/provider/hetzner/provider.go @@ -533,6 +533,10 @@ func (s *hetznerServer) ID() string { return strconv.Itoa(s.server.ID) } +func (s *hetznerServer) ProviderID() string { + return fmt.Sprintf("hcloud://%d", s.server.ID) +} + func (s *hetznerServer) Addresses() map[string]v1.NodeAddressType { addresses := map[string]v1.NodeAddressType{} for _, fips := range s.server.PublicNet.FloatingIPs { diff --git a/pkg/cloudprovider/provider/kubevirt/provider.go b/pkg/cloudprovider/provider/kubevirt/provider.go index 423a42909..27175f320 100644 --- a/pkg/cloudprovider/provider/kubevirt/provider.go +++ b/pkg/cloudprovider/provider/kubevirt/provider.go @@ -161,6 +161,10 @@ func (k *kubeVirtServer) ID() string { return string(k.vmi.UID) } +func (k *kubeVirtServer) ProviderID() string { + return "kubevirt://" + k.vmi.Name +} + func (k *kubeVirtServer) Addresses() map[string]corev1.NodeAddressType { addresses := map[string]corev1.NodeAddressType{} for _, kvInterface := range k.vmi.Status.Interfaces { @@ -491,7 +495,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, resourceRequirements := kubevirtv1.ResourceRequirements{} labels := map[string]string{"kubevirt.io/vm": machine.Name} // Add a common label to all VirtualMachines spawned by the same MachineDeployment (= MachineDeployment name). - if mdName, err := controllerutil.GetMachineDeploymentNameForMachine(ctx, machine, data.Client); err == nil { + if mdName, _, err := controllerutil.GetMachineDeploymentNameAndRevisionForMachine(ctx, machine, data.Client); err == nil { labels[machineDeploymentLabelKey] = mdName } diff --git a/pkg/cloudprovider/provider/linode/provider.go b/pkg/cloudprovider/provider/linode/provider.go index 2738c7d01..030fe4585 100644 --- a/pkg/cloudprovider/provider/linode/provider.go +++ b/pkg/cloudprovider/provider/linode/provider.go @@ -403,6 +403,11 @@ func (d *linodeInstance) ID() string { return strconv.Itoa(d.linode.ID) } +// TODO: Implement once we start supporting Linode CCM. +func (d *linodeInstance) ProviderID() string { + return "" +} + func (d *linodeInstance) Addresses() map[string]v1.NodeAddressType { addresses := map[string]v1.NodeAddressType{} for _, n := range d.linode.IPv4 { diff --git a/pkg/cloudprovider/provider/nutanix/client.go b/pkg/cloudprovider/provider/nutanix/client.go index e2ef6c9b3..0ebf070e4 100644 --- a/pkg/cloudprovider/provider/nutanix/client.go +++ b/pkg/cloudprovider/provider/nutanix/client.go @@ -57,7 +57,7 @@ func GetClientSet(config *Config) (*ClientSet, error) { } if config.Password == "" { - return nil, errors.New("no password specificed") + return nil, errors.New("no password specified") } if config.Endpoint == "" { diff --git a/pkg/cloudprovider/provider/nutanix/provider.go b/pkg/cloudprovider/provider/nutanix/provider.go index 697a7c363..70c2b701e 100644 --- a/pkg/cloudprovider/provider/nutanix/provider.go +++ b/pkg/cloudprovider/provider/nutanix/provider.go @@ -85,6 +85,11 @@ func (nutanixServer Server) ID() string { return nutanixServer.id } +// NB: Nutanix doesn't have a CCM. +func (nutanixServer Server) ProviderID() string { + return "" +} + func (nutanixServer Server) Addresses() map[string]corev1.NodeAddressType { return nutanixServer.addresses } diff --git a/pkg/cloudprovider/provider/openstack/provider.go b/pkg/cloudprovider/provider/openstack/provider.go index 8b591c1c6..705b5f724 100644 --- a/pkg/cloudprovider/provider/openstack/provider.go +++ b/pkg/cloudprovider/provider/openstack/provider.go @@ -934,6 +934,10 @@ func (d *osInstance) ID() string { return d.server.ID } +func (d *osInstance) ProviderID() string { + return "openstack:///" + d.server.ID +} + func (d *osInstance) Addresses() map[string]corev1.NodeAddressType { addresses := map[string]corev1.NodeAddressType{} for _, networkAddresses := range d.server.Addresses { diff --git a/pkg/cloudprovider/provider/scaleway/provider.go b/pkg/cloudprovider/provider/scaleway/provider.go index cb69420ce..6a2aa4887 100644 --- a/pkg/cloudprovider/provider/scaleway/provider.go +++ b/pkg/cloudprovider/provider/scaleway/provider.go @@ -379,6 +379,11 @@ func (s *scwServer) ID() string { return s.server.ID } +// TODO: Implement once we start supporting Scaleway CCM. +func (s *scwServer) ProviderID() string { + return "" +} + func (s *scwServer) Addresses() map[string]corev1.NodeAddressType { addresses := map[string]corev1.NodeAddressType{} if s.server.PrivateIP != nil { @@ -415,12 +420,12 @@ func (s *scwServer) Status() cloudInstance.Status { // if the given error doesn't qualify the error passed as // an argument will be returned. func scalewayErrToTerminalError(err error) error { - var deinedErr *scw.PermissionsDeniedError + var deniedErr *scw.PermissionsDeniedError var invalidArgErr *scw.InvalidArgumentsError var outOfStackErr *scw.OutOfStockError var quotaErr *scw.QuotasExceededError - if errors.As(err, &deinedErr) { + if errors.As(err, &deniedErr) { return cloudprovidererrors.TerminalError{ Reason: common.InvalidConfigurationMachineError, Message: "A request has been rejected due to invalid credentials which were taken from the MachineSpec", diff --git a/pkg/cloudprovider/provider/vmwareclouddirector/helper.go b/pkg/cloudprovider/provider/vmwareclouddirector/helper.go index 48dcdc8f1..1b21f0ef9 100644 --- a/pkg/cloudprovider/provider/vmwareclouddirector/helper.go +++ b/pkg/cloudprovider/provider/vmwareclouddirector/helper.go @@ -28,7 +28,6 @@ import ( vcdapitypes "github.com/vmware/go-vcloud-director/v2/types/v56" clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" "k8s.io/utils/pointer" ) @@ -238,14 +237,20 @@ func recomposeComputeAndDisk(config *Config, vm *govcd.VM) (*govcd.VM, error) { return vm, nil } -func setUserData(userdata string, vm *govcd.VM, providerConfig *providerconfigtypes.Config) error { +func setUserData(userdata string, vm *govcd.VM, isFlatcar bool) error { userdataBase64 := base64.StdEncoding.EncodeToString([]byte(userdata)) props := map[string]string{ - "user-data": userdataBase64, "disk.enableUUID": "1", "instance-id": vm.VM.Name, } + if isFlatcar { + props["guestinfo.ignition.config.data"] = userdataBase64 + props["guestinfo.ignition.config.data.encoding"] = "base64" + } else { + props["user-data"] = userdataBase64 + } + vmProperties := &vcdapitypes.ProductSectionList{ ProductSection: &vcdapitypes.ProductSection{ Info: "Custom properties", diff --git a/pkg/cloudprovider/provider/vmwareclouddirector/provider.go b/pkg/cloudprovider/provider/vmwareclouddirector/provider.go index 2414c1a39..9c06152d1 100644 --- a/pkg/cloudprovider/provider/vmwareclouddirector/provider.go +++ b/pkg/cloudprovider/provider/vmwareclouddirector/provider.go @@ -120,6 +120,11 @@ func (s Server) ID() string { return s.id } +// TODO: Implement once we start supporting vCloud Director CCM. +func (s Server) ProviderID() string { + return "" +} + func (s Server) Addresses() map[string]corev1.NodeAddressType { return s.addresses } @@ -246,7 +251,7 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, // 5. Before powering on the VM, configure customization to attach userdata with the VM // update guest properties. - err = setUserData(userdata, vm, providerConfig) + err = setUserData(userdata, vm, providerConfig.OperatingSystem == providerconfigtypes.OperatingSystemFlatcar) if err != nil { return nil, err } diff --git a/pkg/cloudprovider/provider/vmwareclouddirector/types/types.go b/pkg/cloudprovider/provider/vmwareclouddirector/types/types.go index c8c5aa3de..188e3c2bd 100644 --- a/pkg/cloudprovider/provider/vmwareclouddirector/types/types.go +++ b/pkg/cloudprovider/provider/vmwareclouddirector/types/types.go @@ -46,7 +46,7 @@ type RawConfig struct { // Network configuration. Network providerconfigtypes.ConfigVarString `json:"network"` - IPAllocationMode IPAllocationMode `json:"ipAllocationMode"` + IPAllocationMode IPAllocationMode `json:"ipAllocationMode,omitempty"` // Compute configuration. CPUs int64 `json:"cpus"` diff --git a/pkg/cloudprovider/provider/vsphere/provider.go b/pkg/cloudprovider/provider/vsphere/provider.go index 1d47efeb7..a16f58b62 100644 --- a/pkg/cloudprovider/provider/vsphere/provider.go +++ b/pkg/cloudprovider/provider/vsphere/provider.go @@ -82,6 +82,7 @@ var _ instance.Instance = &Server{} type Server struct { name string id string + uuid string status instance.Status addresses map[string]corev1.NodeAddressType } @@ -94,6 +95,10 @@ func (vsphereServer Server) ID() string { return vsphereServer.id } +func (vsphereServer Server) ProviderID() string { + return "vsphere://" + vsphereServer.uuid +} + func (vsphereServer Server) Addresses() map[string]corev1.NodeAddressType { return vsphereServer.addresses } @@ -372,7 +377,7 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, return nil, fmt.Errorf("error when waiting for vm powerOn task: %w", err) } - return Server{name: virtualMachine.Name(), status: instance.StatusRunning, id: virtualMachine.Reference().Value}, nil + return Server{name: virtualMachine.Name(), status: instance.StatusRunning, id: virtualMachine.Reference().Value, uuid: virtualMachine.UUID(ctx)}, nil } func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { @@ -502,7 +507,7 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, da } // We must return here because the vendored code for determining if the guest // utils are running yields an NPD when using with an instance that is not running - return Server{name: virtualMachine.Name(), status: instance.StatusUnknown}, nil + return Server{name: virtualMachine.Name(), status: instance.StatusUnknown, uuid: virtualMachine.UUID(ctx)}, nil } // virtualMachine.IsToolsRunning panics when executed on a VM that is not powered on @@ -530,7 +535,7 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, da klog.V(3).Infof("Can't fetch the IP addresses for machine %s, the VMware guest utils are not running yet. This might take a few minutes", machine.Spec.Name) } - return Server{name: virtualMachine.Name(), status: instance.StatusRunning, addresses: addresses, id: virtualMachine.Reference().Value}, nil + return Server{name: virtualMachine.Name(), status: instance.StatusRunning, addresses: addresses, id: virtualMachine.Reference().Value, uuid: virtualMachine.UUID(ctx)}, nil } func (p *provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ ktypes.UID) error { diff --git a/pkg/cloudprovider/provider/vsphere/types/cloudconfig.go b/pkg/cloudprovider/provider/vsphere/types/cloudconfig.go index 073262a8b..9e9f997bf 100644 --- a/pkg/cloudprovider/provider/vsphere/types/cloudconfig.go +++ b/pkg/cloudprovider/provider/vsphere/types/cloudconfig.go @@ -36,6 +36,9 @@ working-dir = {{ .Global.WorkingDir | iniEscape }} datacenter = {{ .Global.Datacenter | iniEscape }} datastore = {{ .Global.DefaultDatastore | iniEscape }} server = {{ .Global.VCenterIP | iniEscape }} +{{- if .Global.IPFamily }} +ip-family = {{ .Global.IPFamily | iniEscape }} +{{- end }} [Disk] scsicontrollertype = {{ .Disk.SCSIControllerType | iniEscape }} @@ -53,6 +56,9 @@ user = {{ $vc.User | iniEscape }} password = {{ $vc.Password | iniEscape }} port = {{ $vc.VCenterPort }} datacenters = {{ $vc.Datacenters | iniEscape }} +{{- if $vc.IPFamily }} +ip-family = {{ $vc.IPFamily | iniEscape }} +{{- end }} {{ end }} ` ) @@ -79,6 +85,7 @@ type GlobalOpts struct { DefaultDatastore string `gcfg:"datastore"` VCenterIP string `gcfg:"server"` ClusterID string `gcfg:"cluster-id"` + IPFamily string `gcfg:"ip-family"` // NOTE: supported only in case of out-of-tree CCM } type VirtualCenterConfig struct { @@ -86,6 +93,7 @@ type VirtualCenterConfig struct { Password string `gcfg:"password"` VCenterPort string `gcfg:"port"` Datacenters string `gcfg:"datacenters"` + IPFamily string `gcfg:"ip-family"` // NOTE: supported only in case of out-of-tree CCM } // CloudConfig is used to read and store information from the cloud configuration file. diff --git a/pkg/cloudprovider/provider/vsphere/types/cloudconfig_test.go b/pkg/cloudprovider/provider/vsphere/types/cloudconfig_test.go index f63c60cbd..399f31231 100644 --- a/pkg/cloudprovider/provider/vsphere/types/cloudconfig_test.go +++ b/pkg/cloudprovider/provider/vsphere/types/cloudconfig_test.go @@ -87,6 +87,36 @@ func TestCloudConfigToString(t *testing.T) { }, }, }, + { + name: "3-dual-stack", + config: &CloudConfig{ + Global: GlobalOpts{ + User: "admin", + Password: "password", + InsecureFlag: true, + IPFamily: "ipv4,ipv6", + }, + Workspace: WorkspaceOpts{ + VCenterIP: "https://127.0.0.1:8443", + ResourcePoolPath: "/some-resource-pool", + DefaultDatastore: "Datastore", + Folder: "some-folder", + Datacenter: "Datacenter", + }, + Disk: DiskOpts{ + SCSIControllerType: "pvscsi", + }, + VirtualCenter: map[string]*VirtualCenterConfig{ + "vc1": { + User: "1-some-user", + Password: "1-some-password", + VCenterPort: "443", + Datacenters: "1-foo", + IPFamily: "ipv4,ipv6", + }, + }, + }, + }, } for _, test := range tests { diff --git a/pkg/cloudprovider/provider/vsphere/types/testdata/3-dual-stack.golden b/pkg/cloudprovider/provider/vsphere/types/testdata/3-dual-stack.golden new file mode 100644 index 000000000..88343530b --- /dev/null +++ b/pkg/cloudprovider/provider/vsphere/types/testdata/3-dual-stack.golden @@ -0,0 +1,29 @@ +[Global] +user = "admin" +password = "password" +port = "" +insecure-flag = true +working-dir = "" +datacenter = "" +datastore = "" +server = "" +ip-family = "ipv4,ipv6" + +[Disk] +scsicontrollertype = "pvscsi" + +[Workspace] +server = "https://127.0.0.1:8443" +datacenter = "Datacenter" +folder = "some-folder" +default-datastore = "Datastore" +resourcepool-path = "/some-resource-pool" + + +[VirtualCenter "vc1"] +user = "1-some-user" +password = "1-some-password" +port = 443 +datacenters = "1-foo" +ip-family = "ipv4,ipv6" + diff --git a/pkg/cloudprovider/util/cloud_init_settings.go b/pkg/cloudprovider/util/cloud_init_settings.go index fc199b92d..ed32c6e5a 100644 --- a/pkg/cloudprovider/util/cloud_init_settings.go +++ b/pkg/cloudprovider/util/cloud_init_settings.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "strings" "gopkg.in/yaml.v3" @@ -33,55 +32,27 @@ import ( ) const ( - CloudInitNamespace = "cloud-init-settings" - jwtTokenNamePrefix = "cloud-init-getter-token" + CloudInitNamespace = "cloud-init-settings" + cloudInitGetterSecret = "cloud-init-getter-token" ) -func ExtractAPIServerToken(ctx context.Context, client ctrlruntimeclient.Client) (string, error) { - secretList := corev1.SecretList{} - if err := client.List(ctx, &secretList, &ctrlruntimeclient.ListOptions{Namespace: CloudInitNamespace}); err != nil { - return "", fmt.Errorf("failed to list secrets in namespace %s: %w", CloudInitNamespace, err) +func ExtractTokenAndAPIServer(ctx context.Context, userdata string, client ctrlruntimeclient.Client) (string, string, error) { + secret := &corev1.Secret{} + if err := client.Get(ctx, types.NamespacedName{Name: cloudInitGetterSecret, Namespace: CloudInitNamespace}, secret); err != nil { + return "", "", fmt.Errorf("failed to get %s secrets in namespace %s: %w", cloudInitGetterSecret, CloudInitNamespace, err) } - for _, secret := range secretList.Items { - if strings.HasPrefix(secret.Name, jwtTokenNamePrefix) { - if secret.Data != nil { - jwtToken := secret.Data["token"] - if jwtToken != nil { - token := string(jwtToken) - return token, nil - } - } - } + token := secret.Data["token"] + if token == nil { + return "", "", errors.New("failed to extract token from cloud-init secret") } - return "", errors.New("failed to fetch api server token") -} - -func ExtractTokenAndAPIServer(ctx context.Context, userdata string, client ctrlruntimeclient.Client) (token string, apiServer string, err error) { - secretList := corev1.SecretList{} - if err := client.List(ctx, &secretList, &ctrlruntimeclient.ListOptions{Namespace: CloudInitNamespace}); err != nil { - return "", "", fmt.Errorf("failed to list secrets in namespace %s: %w", CloudInitNamespace, err) - } - - apiServer, err = extractAPIServer(userdata) + apiServer, err := extractAPIServer(userdata) if err != nil { return "", "", fmt.Errorf("failed to extract api server address: %w", err) } - for _, secret := range secretList.Items { - if strings.HasPrefix(secret.Name, jwtTokenNamePrefix) { - if secret.Data != nil { - jwtToken := secret.Data["token"] - if jwtToken != nil { - token = string(jwtToken) - return token, apiServer, nil - } - } - } - } - - return "", "", errors.New("failed to find cloud-init secret") + return string(token), apiServer, nil } func CreateMachineCloudInitSecret(ctx context.Context, userdata, machineName string, client ctrlruntimeclient.Client) error { diff --git a/pkg/cloudprovider/util/cloud_init_settings_test.go b/pkg/cloudprovider/util/cloud_init_settings_test.go index a4bf41131..8a72079f2 100644 --- a/pkg/cloudprovider/util/cloud_init_settings_test.go +++ b/pkg/cloudprovider/util/cloud_init_settings_test.go @@ -39,7 +39,7 @@ var testData = []struct { userdata: "./testdata/userdata.yaml", secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: jwtTokenNamePrefix, + Name: cloudInitGetterSecret, Namespace: CloudInitNamespace, }, Data: map[string][]byte{ diff --git a/pkg/containerruntime/docker.go b/pkg/containerruntime/docker.go index dbbc1e58d..e79920d7b 100644 --- a/pkg/containerruntime/docker.go +++ b/pkg/containerruntime/docker.go @@ -28,8 +28,8 @@ import ( const ( DefaultDockerContainerdVersion = "1.4" - DefaultDockerVersion = "19.03" - LegacyDockerVersion = "18.09" + DefaultDockerVersion = "20.10" + LegacyDockerVersion = "19.03" ) type Docker struct { diff --git a/pkg/controller/machine/bootstrap.go b/pkg/controller/machine/bootstrap.go index 6e86b8fe3..68ea4a656 100644 --- a/pkg/controller/machine/bootstrap.go +++ b/pkg/controller/machine/bootstrap.go @@ -17,388 +17,31 @@ limitations under the License. package controller import ( - "bytes" - "context" - "encoding/base64" - "fmt" + "net/url" "regexp" - "text/template" + "strings" - "github.com/Masterminds/sprig/v3" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - "github.com/kubermatic/machine-controller/pkg/userdata/helper" - "github.com/kubermatic/machine-controller/pkg/userdata/rhel" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + corev1 "k8s.io/api/core/v1" ) -func getOSMBootstrapUserdata(ctx context.Context, client ctrlruntimeclient.Client, req plugin.UserDataRequest, secretName string) (string, error) { - var clusterName string - for key := range req.Kubeconfig.Clusters { - clusterName = key - } - - token, err := util.ExtractAPIServerToken(ctx, client) - if err != nil { - return "", fmt.Errorf("failed to fetch api-server token: %w", err) - } - - // Retrieve provider config from machine - pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("failed to get providerSpec: %w", err) - } - - bootstrapKubeconfig, err := helper.StringifyKubeconfig(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("failed to format bootstrap kubeconfig: %w", err) - } - - // Regardless if the provisioningUtility is set to use cloud-init, we only allow using ignition to provision flatcar - // machines with osm. - if pconfig.OperatingSystem == providerconfigtypes.OperatingSystemFlatcar { - return getOSMBootstrapUserDataForIgnition(req, pconfig.SSHPublicKeys, token, secretName, clusterName, bootstrapKubeconfig) - } - - // cloud-init is used for all other operating systems. - return getOSMBootstrapUserDataForCloudInit(req, pconfig, token, secretName, clusterName, bootstrapKubeconfig) -} - -// getOSMBootstrapUserDataForIgnition returns the userdata for the ignition bootstrap config. -func getOSMBootstrapUserDataForIgnition(req plugin.UserDataRequest, sshPublicKeys []string, token, secretName, clusterName, bootstrapKfg string) (string, error) { - data := struct { - Token string - SecretName string - ServerURL string - }{ - Token: token, - SecretName: secretName, - ServerURL: req.Kubeconfig.Clusters[clusterName].Server, - } - bsScript, err := template.New("bootstrap-script").Parse(ignitionBootstrapBinContentTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse bootstrapBinContentTemplate template for ignition: %w", err) - } - script := &bytes.Buffer{} - err = bsScript.Execute(script, data) - if err != nil { - return "", fmt.Errorf("failed to execute bootstrapBinContentTemplate template for ignition: %w", err) - } - bsIgnitionConfig, err := template.New("bootstrap-ignition-config").Funcs(sprig.TxtFuncMap()).Parse(ignitionTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse bootstrap-ignition-config template: %w", err) - } +const hostnamePlaceholder = "" - ignitionConfig := &bytes.Buffer{} - err = bsIgnitionConfig.Execute(ignitionConfig, struct { - plugin.UserDataRequest - Script string - Service string - SSHPublicKeys []string - BootstrapKubeconfig string - }{ - UserDataRequest: req, - Script: script.String(), - Service: bootstrapServiceContentTemplate, - SSHPublicKeys: sshPublicKeys, - BootstrapKubeconfig: bootstrapKfg, - }) - if err != nil { - return "", fmt.Errorf("failed to execute ignitionTemplate template: %w", err) - } +func getOSMBootstrapUserdata(machineName string, bootstrapSecret corev1.Secret) string { + bootstrapConfig := string(bootstrapSecret.Data["cloud-config"]) - return convert.ToIgnition(ignitionConfig.String()) -} - -// getOSMBootstrapUserDataForCloudInit returns the userdata for the cloud-init bootstrap script. -func getOSMBootstrapUserDataForCloudInit(req plugin.UserDataRequest, pconfig *providerconfigtypes.Config, token, secretName, clusterName, bootstrapKfg string) (string, error) { - data := struct { - Token string - SecretName string - ServerURL string - MachineName string - EnterpriseLinux bool - ProviderSpec *providerconfigtypes.Config - RHELConfig rhel.Config - }{ - Token: token, - SecretName: secretName, - ServerURL: req.Kubeconfig.Clusters[clusterName].Server, - MachineName: req.MachineSpec.Name, - ProviderSpec: pconfig, - } - - var ( - rhelConfig *rhel.Config - bsScript *template.Template - err error - ) - - switch pconfig.OperatingSystem { - case providerconfigtypes.OperatingSystemUbuntu: - bsScript, err = template.New("bootstrap-cloud-init").Parse(bootstrapAptBinContentTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse bootstrapAptBinContentTemplate template: %w", err) - } - case providerconfigtypes.OperatingSystemCentOS: - data.EnterpriseLinux = true - bsScript, err = template.New("bootstrap-cloud-init").Parse(bootstrapYumBinContentTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse bootstrapYumBinContentTemplate template: %w", err) - } - case providerconfigtypes.OperatingSystemAmazonLinux2: - bsScript, err = template.New("bootstrap-cloud-init").Parse(bootstrapYumBinContentTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse bootstrapYumBinContentTemplate template: %w", err) - } - case providerconfigtypes.OperatingSystemSLES: - bsScript, err = template.New("bootstrap-cloud-init").Parse(bootstrapZypperBinContentTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse bootstrapZypperBinContentTemplate template: %w", err) - } - case providerconfigtypes.OperatingSystemRHEL: - rhelConfig, err = rhel.LoadConfig(pconfig.OperatingSystemSpec) - if err != nil { - return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) - } - bsScript, err = template.New("bootstrap-cloud-init").Parse(bootstrapYumBinContentTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse bootstrapYumBinContentTemplate template: %w", err) - } - } - - script := &bytes.Buffer{} - err = bsScript.Execute(script, data) - if err != nil { - return "", fmt.Errorf("failed to execute bootstrap script template: %w", err) - } - bsCloudInit, err := template.New("bootstrap-cloud-init").Parse(cloudInitTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse download-binaries template: %w", err) - } - - cloudInit := &bytes.Buffer{} - err = bsCloudInit.Execute(cloudInit, struct { - Script string - Service string - plugin.UserDataRequest - ProviderSpec *providerconfigtypes.Config - BootstrapKubeconfig string - RHELConfig *rhel.Config - }{ - Script: base64.StdEncoding.EncodeToString(script.Bytes()), - Service: base64.StdEncoding.EncodeToString([]byte(bootstrapServiceContentTemplate)), - UserDataRequest: req, - ProviderSpec: pconfig, - BootstrapKubeconfig: base64.StdEncoding.EncodeToString([]byte(bootstrapKfg)), - RHELConfig: rhelConfig, - }) - if err != nil { - return "", fmt.Errorf("failed to execute cloudInitTemplate template: %w", err) - } - return cloudInit.String(), nil + // We have to inject the hostname i.e. machine name. + bootstrapConfig = strings.ReplaceAll(bootstrapConfig, hostnamePlaceholder, machineName) + // Data is HTML Encoded for ignition. + bootstrapConfig = strings.ReplaceAll(bootstrapConfig, url.QueryEscape(hostnamePlaceholder), url.QueryEscape(machineName)) + return cleanupTemplateOutput(bootstrapConfig) } // cleanupTemplateOutput postprocesses the output of the template processing. Those // may exist due to the working of template functions like those of the sprig package // or template condition. -func cleanupTemplateOutput(output string) (string, error) { +func cleanupTemplateOutput(output string) string { // Valid YAML files are not allowed to have empty lines containing spaces or tabs. // So far only cleanup. woBlankLines := regexp.MustCompile(`(?m)^[ \t]+$`).ReplaceAllString(output, "") - return woBlankLines, nil + return woBlankLines } - -const ( - bootstrapAptBinContentTemplate = `#!/bin/bash -set -xeuo pipefail - -export DEBIAN_FRONTEND=noninteractive -apt update && apt install -y curl jq -curl -s -k -v --header 'Authorization: Bearer {{ .Token }}' {{ .ServerURL }}/api/v1/namespaces/cloud-init-settings/secrets/{{ .SecretName }} | jq '.data["cloud-config"]' -r| base64 -d > /etc/cloud/cloud.cfg.d/{{ .SecretName }}.cfg -cloud-init clean -cloud-init --file /etc/cloud/cloud.cfg.d/{{ .SecretName }}.cfg init -systemctl daemon-reload - -{{- /* The default cloud-init configurations files have some bug on Digital Ocean which causes the machine to be in-accessible on 2nd cloud-init. We have to manually run the module */}} -{{- if and (eq .ProviderSpec.CloudProvider "digitalocean") (eq .ProviderSpec.OperatingSystem "ubuntu") }} -rm /etc/netplan/50-cloud-init.yaml -netplan generate -netplan apply -{{- end }} - -systemctl restart setup.service -systemctl restart kubelet.service -systemctl restart kubelet-healthcheck.service - ` - - bootstrapYumBinContentTemplate = `#!/bin/bash -set -xeuo pipefail -source /etc/os-release -if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* -fi -{{- if .EnterpriseLinux }} -yum install epel-release -y -{{- end }} - -yum install -y curl jq - -curl -s -k -v --header 'Authorization: Bearer {{ .Token }}' {{ .ServerURL }}/api/v1/namespaces/cloud-init-settings/secrets/{{ .SecretName }} | jq '.data["cloud-config"]' -r| base64 -d > /etc/cloud/cloud.cfg.d/{{ .SecretName }}.cfg -cloud-init clean -cloud-init --file /etc/cloud/cloud.cfg.d/{{ .SecretName }}.cfg init -systemctl daemon-reload -systemctl restart setup.service -systemctl restart kubelet.service -systemctl restart kubelet-healthcheck.service - ` - - bootstrapZypperBinContentTemplate = `#!/bin/bash -set -xeuo pipefail - -# Install JQ -zypper -n --quiet addrepo -C https://download.opensuse.org/repositories/utilities/openSUSE_Leap_15.3/utilities.repo -zypper -n --no-gpg-checks refresh -zypper -n install jq - -# Install CURL -zypper -n install curl - -curl -s -k -v --header 'Authorization: Bearer {{ .Token }}' {{ .ServerURL }}/api/v1/namespaces/cloud-init-settings/secrets/{{ .SecretName }} | jq '.data["cloud-config"]' -r| base64 -d > /etc/cloud/cloud.cfg.d/{{ .SecretName }}.cfg -cloud-init clean -cloud-init --file /etc/cloud/cloud.cfg.d/{{ .SecretName }}.cfg init -systemctl daemon-reload -systemctl restart setup.service -systemctl restart kubelet.service -systemctl restart kubelet-healthcheck.service - ` - - bootstrapServiceContentTemplate = `[Install] -WantedBy=multi-user.target - -[Unit] -Requires=network-online.target -After=network-online.target -[Service] -Type=oneshot -RemainAfterExit=true -ExecStart=/opt/bin/bootstrap - ` - - cloudInitTemplate = `#cloud-config -{{ if ne .CloudProviderName "aws" }} -hostname: {{ .MachineSpec.Name }} -{{- /* Never set the hostname on AWS nodes. Kubernetes(kube-proxy) requires the hostname to be the private dns name */}} -{{ end }} -ssh_pwauth: false - -{{- if .ProviderSpec.SSHPublicKeys }} -ssh_authorized_keys: -{{- range .ProviderSpec.SSHPublicKeys }} -- "{{ . }}" -{{- end }} -{{- end }} - -write_files: -- path: /opt/bin/bootstrap - permissions: '0755' - encoding: b64 - content: | - {{ .Script }} -- path: /etc/kubernetes/bootstrap-kubelet.conf - permissions: '0600' - encoding: b64 - content: | - {{ .BootstrapKubeconfig }} -{{- if and (eq .ProviderSpec.CloudProvider "openstack") (or (eq .ProviderSpec.OperatingSystem "centos") (eq .ProviderSpec.OperatingSystem "rhel")) }} -{{- /* The normal way of setting it via cloud-init is broken, see */}} -{{- /* https://bugs.launchpad.net/cloud-init/+bug/1662542 */}} -- path: /etc/hostname - permissions: '0600' - content: | - {{ .MachineSpec.Name }} -{{ end }} -- path: /etc/systemd/system/bootstrap.service - permissions: '0644' - encoding: b64 - content: | - {{ .Service }} -{{- /* The default cloud-init configurations files have some bug on Digital Ocean which causes the machine to be in-accessible on 2nd cloud-init. Hence we disable network configuration */}} -{{- if and (eq .ProviderSpec.CloudProvider "digitalocean") (eq .ProviderSpec.OperatingSystem "ubuntu") }} -- path: /etc/cloud/cloud.cfg.d/99-custom-networking.cfg - permissions: '0644' - content: | - network: {config: disabled} -{{- end }} -runcmd: -- systemctl restart bootstrap.service -- systemctl daemon-reload -{{- if .RHELConfig }} -rh_subscription: -{{- if .RHELConfig.RHELUseSatelliteServer }} - org: "{{.RHELConfig.RHELOrganizationName}}" - activation-key: "{{.RHELConfig.RHELActivationKey}}" - server-hostname: {{ .RHELConfig.RHELSatelliteServer }} - rhsm-baseurl: https://{{ .RHELConfig.RHELSatelliteServer }}/pulp/repos -{{- else }} - username: "{{.RHELConfig.RHELSubscriptionManagerUser}}" - password: "{{.RHELConfig.RHELSubscriptionManagerPassword}}" - auto-attach: {{.RHELConfig.AttachSubscription}} -{{- end }} -{{- end }} -` - - ignitionBootstrapBinContentTemplate = `#!/bin/bash -set -xeuo pipefail -apt update && apt install -y curl jq -curl -s -k -v --header 'Authorization: Bearer {{ .Token }}' {{ .ServerURL }}/api/v1/namespaces/cloud-init-settings/secrets/{{ .SecretName }} | jq '.data["cloud-config"]' -r| base64 -d > /usr/share/oem/config.ign -touch /boot/flatcar/first_boot -systemctl disable bootstrap.service -rm /etc/systemd/system/bootstrap.service -rm /etc/machine-id -reboot -` - - ignitionTemplate = `passwd: -{{- if ne (len .SSHPublicKeys) 0 }} - users: - - name: core - ssh_authorized_keys: - {{range .SSHPublicKeys }}- {{.}} - {{end}} -{{- end }} -storage: - files: - - path: /etc/kubernetes/bootstrap-kubelet.conf - mode: 0600 - filesystem: root - contents: - inline: | -{{ .BootstrapKubeconfig | indent 10 }} - - path: /opt/bin/bootstrap - mode: 0755 - filesystem: root - contents: - inline: | -{{ .Script | indent 10}} -{{ if ne .CloudProviderName "aws" }} -{{- /* Never set the hostname on AWS nodes. Kubernetes(kube-proxy) requires the hostname to be the private dns name */}} - - path: /etc/hostname - mode: 0600 - filesystem: root - contents: - inline: '{{ .MachineSpec.Name }}' -{{ end }} -systemd: - units: - - name: bootstrap.service - enabled: true - contents: | -{{ .Service | indent 10 }} -` -) diff --git a/pkg/controller/machine/kubeconfig_test.go b/pkg/controller/machine/kubeconfig_test.go index 731080762..a98b702a1 100644 --- a/pkg/controller/machine/kubeconfig_test.go +++ b/pkg/controller/machine/kubeconfig_test.go @@ -31,20 +31,20 @@ import ( func TestUpdateSecretExpirationAndGetToken(t *testing.T) { tests := []struct { - initialExperirationTime time.Time - shouldRenew bool + initialExpirationTime time.Time + shouldRenew bool }{ { - initialExperirationTime: time.Now().Add(1 * time.Hour), - shouldRenew: false, + initialExpirationTime: time.Now().Add(1 * time.Hour), + shouldRenew: false, }, { - initialExperirationTime: time.Now().Add(25 * time.Minute), - shouldRenew: true, + initialExpirationTime: time.Now().Add(25 * time.Minute), + shouldRenew: true, }, { - initialExperirationTime: time.Now().Add(-25 * time.Minute), - shouldRenew: true, + initialExpirationTime: time.Now().Add(-25 * time.Minute), + shouldRenew: true, }, } @@ -58,7 +58,7 @@ func TestUpdateSecretExpirationAndGetToken(t *testing.T) { data := map[string][]byte{} data[tokenSecretKey] = []byte("tokenSecret") data[tokenIDKey] = []byte("tokenID") - data[expirationKey] = []byte(testCase.initialExperirationTime.Format(time.RFC3339)) + data[expirationKey] = []byte(testCase.initialExpirationTime.Format(time.RFC3339)) secret.Data = data reconciler.client = ctrlruntimefake. NewClientBuilder(). @@ -79,12 +79,12 @@ func TestUpdateSecretExpirationAndGetToken(t *testing.T) { } if testCase.shouldRenew && - bytes.Equal(updatedSecret.Data[expirationKey], []byte(testCase.initialExperirationTime.Format(time.RFC3339))) { + bytes.Equal(updatedSecret.Data[expirationKey], []byte(testCase.initialExpirationTime.Format(time.RFC3339))) { t.Errorf("Error, token secret did not update but was expected to!") } if !testCase.shouldRenew && - !bytes.Equal(updatedSecret.Data[expirationKey], []byte(testCase.initialExperirationTime.Format(time.RFC3339))) { + !bytes.Equal(updatedSecret.Data[expirationKey], []byte(testCase.initialExpirationTime.Format(time.RFC3339))) { t.Errorf("Error, token secret was expected to get updated, but did not happen!") } diff --git a/pkg/controller/machine/machine_controller.go b/pkg/controller/machine/machine_controller.go index df7fd73a4..9bd7472a4 100644 --- a/pkg/controller/machine/machine_controller.go +++ b/pkg/controller/machine/machine_controller.go @@ -47,6 +47,8 @@ import ( userdatamanager "github.com/kubermatic/machine-controller/pkg/userdata/manager" userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" "github.com/kubermatic/machine-controller/pkg/userdata/rhel" + "k8c.io/operating-system-manager/pkg/controllers/osc" + osmresources "k8c.io/operating-system-manager/pkg/controllers/osc/resources" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -95,7 +97,7 @@ const ( // cluster-api provider to match Nodes to Machines. AnnotationAutoscalerIdentifier = "cluster.k8s.io/machine" - provisioningSuffix = "osc-provisioning" + CloudInitNotReadyError = "cloud-init configuration to %s machine: %v is not ready yet" ) // Reconciler is the controller implementation for machine resources. @@ -744,9 +746,14 @@ func (r *Reconciler) ensureInstanceExistsForMachine( if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { klog.V(3).Infof("Validated machine spec of %s", machine.Name) - kubeconfig, err := r.createBootstrapKubeconfig(ctx, machine.Name) - if err != nil { - return nil, fmt.Errorf("failed to create bootstrap kubeconfig: %w", err) + var kubeconfig *clientcmdapi.Config + + // OSM will take care of the bootstrap kubeconfig and token by itself. + if !r.useOSM { + kubeconfig, err = r.createBootstrapKubeconfig(ctx, machine.Name) + if err != nil { + return nil, fmt.Errorf("failed to create bootstrap kubeconfig: %w", err) + } } cloudConfig, kubeletCloudProviderName, err := prov.GetCloudConfig(machine.Spec) @@ -808,33 +815,51 @@ func (r *Reconciler) ensureInstanceExistsForMachine( var userdata string if r.useOSM { - referencedMachineDeployment, err := controllerutil.GetMachineDeploymentNameForMachine(ctx, machine, r.client) + referencedMachineDeployment, machineDeploymentRevision, err := controllerutil.GetMachineDeploymentNameAndRevisionForMachine(ctx, machine, r.client) if err != nil { return nil, fmt.Errorf("failed to find machine's MachineDployment: %w", err) } - cloudConfigSecretName := fmt.Sprintf("%s-%s-%s", + // We need to ensure that both provisoning and bootstrapping secrets have been created. And that the revision + // matches with the machine deployment revision + provisioningSecretName := fmt.Sprintf(osmresources.CloudConfigSecretNamePattern, referencedMachineDeployment, machine.Namespace, - provisioningSuffix) + osmresources.ProvisioningCloudConfig) - // It is important to check if the secret holding cloud-config exists + // Ensure that the provisioning secret exists + provisioningSecret := &corev1.Secret{} if err := r.client.Get(ctx, - types.NamespacedName{Name: cloudConfigSecretName, Namespace: util.CloudInitNamespace}, - &corev1.Secret{}); err != nil { - klog.Errorf("Cloud init configurations for machine: %v is not ready yet", machine.Name) + types.NamespacedName{Name: provisioningSecretName, Namespace: util.CloudInitNamespace}, + provisioningSecret); err != nil { + klog.Errorf(CloudInitNotReadyError, osmresources.ProvisioningCloudConfig, machine.Name) return nil, err } - userdata, err = getOSMBootstrapUserdata(ctx, r.client, req, cloudConfigSecretName) - if err != nil { - return nil, fmt.Errorf("failed get OSM userdata: %w", err) + provisioningSecretRevision := provisioningSecret.Annotations[osc.MachineDeploymentRevision] + if provisioningSecretRevision != machineDeploymentRevision { + return nil, fmt.Errorf(CloudInitNotReadyError, osmresources.ProvisioningCloudConfig, machine.Name) } - userdata, err = cleanupTemplateOutput(userdata) - if err != nil { - return nil, fmt.Errorf("failed to cleanup user-data template: %w", err) + bootstrapSecretName := fmt.Sprintf(osmresources.CloudConfigSecretNamePattern, + referencedMachineDeployment, + machine.Namespace, + osmresources.BootstrapCloudConfig) + + bootstrapSecret := &corev1.Secret{} + if err := r.client.Get(ctx, + types.NamespacedName{Name: bootstrapSecretName, Namespace: util.CloudInitNamespace}, + bootstrapSecret); err != nil { + klog.Errorf(CloudInitNotReadyError, osmresources.BootstrapCloudConfig, machine.Name) + return nil, err + } + + bootstrapSecretRevision := bootstrapSecret.Annotations[osc.MachineDeploymentRevision] + if bootstrapSecretRevision != machineDeploymentRevision { + return nil, fmt.Errorf(CloudInitNotReadyError, osmresources.BootstrapCloudConfig, machine.Name) } + + userdata = getOSMBootstrapUserdata(req.MachineSpec.Name, *bootstrapSecret) } else { userdata, err = userdataPlugin.UserData(req) if err != nil { @@ -878,6 +903,23 @@ func (r *Reconciler) ensureInstanceExistsForMachine( addresses := providerInstance.Addresses() eventMessage := fmt.Sprintf("Found instance at cloud provider, addresses: %v", addresses) r.recorder.Event(machine, corev1.EventTypeNormal, "InstanceFound", eventMessage) + // It might happen that we got here, but we still don't have IP addresses + // for the instance. In that case it doesn't make sense to proceed because: + // * if we match Node by ProviderID, Machine will get NodeOwnerRef, but + // there will be no IP address on that Machine object. Since we + // successfully set NodeOwnerRef, Machine will not be reconciled again, + // so it will never get IP addresses. This breaks the NodeCSRApprover + // workflow because NodeCSRApprover cannot validate certificates without + // IP addresses, resulting in a broken Node + // * if we can't match Node by ProviderID, fallback to matching by IP + // address will not have any result because we still don't have IP + // addresses for that instance + // Considering that, we just retry after 15 seconds, hoping that we'll + // get IP addresses by then. + if len(addresses) == 0 { + return &reconcile.Result{RequeueAfter: 15 * time.Second}, nil + } + machineAddresses := []corev1.NodeAddress{} for address, addressType := range addresses { machineAddresses = append(machineAddresses, corev1.NodeAddress{Address: address, Type: addressType}) @@ -1040,19 +1082,13 @@ func (r *Reconciler) getNode(ctx context.Context, instance instance.Instance, pr return nil, false, err } - // We trim leading slashes in raw ID, since we always want three slashes in full ID - providerID := fmt.Sprintf("%s:///%s", provider, strings.TrimLeft(instance.ID(), "/")) for _, node := range nodes.Items { - if provider == providerconfigtypes.CloudProviderAzure { - // Azure IDs are case-insensitive - if strings.EqualFold(node.Spec.ProviderID, providerID) { - return node.DeepCopy(), true, nil - } - } else { - if node.Spec.ProviderID == providerID { - return node.DeepCopy(), true, nil - } + // Try to find Node by providerID. Should work if CCM is deployed. + if node := findNodeByProviderID(instance, provider, nodes.Items); node != nil { + klog.V(4).Infof("Found node %q by providerID", node.Name) + return node, true, nil } + // If we were unable to find Node by ProviderID, fallback to IP address matching. // This usually happens if there's no CCM deployed in the cluster. // @@ -1079,6 +1115,7 @@ func (r *Reconciler) getNode(ctx context.Context, instance instance.Instance, pr continue } if nodeAddress.Address == instanceAddress { + klog.V(4).Infof("Found node %q by IP address", node.Name) return node.DeepCopy(), true, nil } } @@ -1087,6 +1124,32 @@ func (r *Reconciler) getNode(ctx context.Context, instance instance.Instance, pr return nil, false, nil } +func findNodeByProviderID(instance instance.Instance, provider providerconfigtypes.CloudProvider, nodes []corev1.Node) *corev1.Node { + providerID := instance.ProviderID() + if providerID == "" { + return nil + } + + for _, node := range nodes { + if strings.EqualFold(node.Spec.ProviderID, providerID) { + return node.DeepCopy() + } + + // AWS has two different providerID notations: + // * aws://// + // * aws:/// + // The first case is handled above, while the second here is handled here. + if provider == providerconfigtypes.CloudProviderAWS { + pid := strings.Split(node.Spec.ProviderID, "aws:///") + if len(pid) == 2 && pid[1] == instance.ID() { + return node.DeepCopy() + } + } + } + + return nil +} + func (r *Reconciler) ReadinessChecks(ctx context.Context) map[string]healthcheck.Check { return map[string]healthcheck.Check{ "valid-info-kubeconfig": func() error { diff --git a/pkg/controller/machine/machine_test.go b/pkg/controller/machine/machine_test.go index d528ec4f6..0fc1f5ccf 100644 --- a/pkg/controller/machine/machine_test.go +++ b/pkg/controller/machine/machine_test.go @@ -48,10 +48,11 @@ func init() { } type fakeInstance struct { - name string - id string - addresses map[string]corev1.NodeAddressType - status instance.Status + name string + id string + providerID string + addresses map[string]corev1.NodeAddressType + status instance.Status } func (i *fakeInstance) Name() string { @@ -62,6 +63,10 @@ func (i *fakeInstance) ID() string { return i.id } +func (i *fakeInstance) ProviderID() string { + return i.providerID +} + func (i *fakeInstance) Status() instance.Status { return i.status } @@ -70,11 +75,7 @@ func (i *fakeInstance) Addresses() map[string]corev1.NodeAddressType { return i.addresses } -func getTestNode(id, provider string) corev1.Node { - providerID := "" - if provider != "" { - providerID = fmt.Sprintf("%s:///%s", provider, id) - } +func getTestNode(id, providerID string) corev1.Node { return corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("node%s", id), @@ -98,10 +99,10 @@ func getTestNode(id, provider string) corev1.Node { } func TestController_GetNode(t *testing.T) { - node1 := getTestNode("1", "aws") - node2 := getTestNode("2", "openstack") + node1 := getTestNode("1", "aws:///i-1") + node2 := getTestNode("2", "openstack:///test") node3 := getTestNode("3", "") - node4 := getTestNode("4", "hetzner") + node4 := getTestNode("4", "hcloud://123") nodeList := []*corev1.Node{&node1, &node2, &node3, &node4} tests := []struct { @@ -134,7 +135,7 @@ func TestController_GetNode(t *testing.T) { resNode: &node1, exists: true, err: nil, - instance: &fakeInstance{id: "1", addresses: map[string]corev1.NodeAddressType{"": ""}}, + instance: &fakeInstance{id: "1", addresses: map[string]corev1.NodeAddressType{"": ""}, providerID: "aws:///i-1"}, }, { name: "node found by internal ip", @@ -182,7 +183,7 @@ func TestController_GetNode(t *testing.T) { resNode: &node4, exists: true, err: nil, - instance: &fakeInstance{id: "4", addresses: map[string]corev1.NodeAddressType{"": ""}}, + instance: &fakeInstance{id: "4", addresses: map[string]corev1.NodeAddressType{"": ""}, providerID: "hcloud://123"}, }, } @@ -257,7 +258,7 @@ func TestControllerDeletesMachinesOnJoinTimeout(t *testing.T) { joinTimeoutConfig: durationPtr(10 * time.Minute), }, { - name: "machine older than joinClusterTimout gets deleted", + name: "machine older than joinClusterTimeout gets deleted", creationTimestamp: metav1.Time{Time: time.Now().Add(-20 * time.Minute)}, hasNode: false, ownerReferences: []metav1.OwnerReference{{Name: "owner", Kind: "MachineSet"}}, @@ -265,7 +266,7 @@ func TestControllerDeletesMachinesOnJoinTimeout(t *testing.T) { joinTimeoutConfig: durationPtr(10 * time.Minute), }, { - name: "machine older than joinClusterTimout does not get deleted when ownerReference.Kind != MachineSet", + name: "machine older than joinClusterTimeout does not get deleted when ownerReference.Kind != MachineSet", creationTimestamp: metav1.Time{Time: time.Now().Add(-20 * time.Minute)}, hasNode: false, ownerReferences: []metav1.OwnerReference{{Name: "owner", Kind: "Cat"}}, @@ -647,3 +648,53 @@ func TestControllerDeleteNodeForMachine(t *testing.T) { }) } } + +func TestControllerFindNodeByProviderID(t *testing.T) { + tests := []struct { + name string + instance instance.Instance + provider providerconfigtypes.CloudProvider + nodes []corev1.Node + expectedNode bool + }{ + { + name: "aws providerID type 1", + instance: &fakeInstance{id: "99", providerID: "aws:///some-zone/i-99"}, + provider: providerconfigtypes.CloudProviderAWS, + nodes: []corev1.Node{ + getTestNode("1", "random"), + getTestNode("2", "aws:///some-zone/i-99"), + }, + expectedNode: true, + }, + { + name: "aws providerID type 2", + instance: &fakeInstance{id: "99", providerID: "aws:///i-99"}, + provider: providerconfigtypes.CloudProviderAWS, + nodes: []corev1.Node{ + getTestNode("1", "aws:///i-99"), + getTestNode("2", "random"), + }, + expectedNode: true, + }, + { + name: "azure providerID", + instance: &fakeInstance{id: "99", providerID: "azure:///test/test"}, + provider: providerconfigtypes.CloudProviderAWS, + nodes: []corev1.Node{ + getTestNode("1", "random"), + getTestNode("2", "azure:///test/test"), + }, + expectedNode: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + node := findNodeByProviderID(test.instance, test.provider, test.nodes) + if (node != nil) != test.expectedNode { + t.Errorf("expected %t, but got %t", test.expectedNode, (node != nil)) + } + }) + } +} diff --git a/pkg/controller/util/machine.go b/pkg/controller/util/machine.go index 85a6c5c21..06afef450 100644 --- a/pkg/controller/util/machine.go +++ b/pkg/controller/util/machine.go @@ -26,7 +26,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func GetMachineDeploymentNameForMachine(ctx context.Context, machine *clusterv1alpha1.Machine, c client.Client) (string, error) { +// LegacyMachineControllerUserDataLabel is set to true when machine-controller is used for managing machine configuration. +const LegacyMachineControllerUserDataLabel = "machine.clusters.k8s.io/legacy-machine-controller-user-data" + +func GetMachineDeploymentNameAndRevisionForMachine(ctx context.Context, machine *clusterv1alpha1.Machine, c client.Client) (string, string, error) { var ( machineSetName string machineDeploymentName string @@ -40,7 +43,7 @@ func GetMachineDeploymentNameForMachine(ctx context.Context, machine *clusterv1a if machineSetName != "" { machineSet := &clusterv1alpha1.MachineSet{} if err := c.Get(ctx, types.NamespacedName{Name: machineSetName, Namespace: "kube-system"}, machineSet); err != nil { - return "", err + return "", "", err } for _, ownerRef := range machineSet.OwnerReferences { @@ -49,10 +52,11 @@ func GetMachineDeploymentNameForMachine(ctx context.Context, machine *clusterv1a } } + revision := machineSet.Annotations[RevisionAnnotation] if machineDeploymentName != "" { - return machineDeploymentName, nil + return machineDeploymentName, revision, nil } } - return "", fmt.Errorf("failed to find machine deployment reference for the machine %s", machine.Name) + return "", "", fmt.Errorf("failed to find machine deployment reference for the machine %s", machine.Name) } diff --git a/pkg/providerconfig/types.go b/pkg/providerconfig/types.go index 27e73c6a5..0b0879a80 100644 --- a/pkg/providerconfig/types.go +++ b/pkg/providerconfig/types.go @@ -184,6 +184,7 @@ func DefaultOperatingSystemSpec( osys providerconfigtypes.OperatingSystem, cloudProvider providerconfigtypes.CloudProvider, operatingSystemSpec runtime.RawExtension, + operatingSystemManagerEnabled bool, ) (runtime.RawExtension, error) { switch osys { case providerconfigtypes.OperatingSystemAmazonLinux2: @@ -191,7 +192,7 @@ func DefaultOperatingSystemSpec( case providerconfigtypes.OperatingSystemCentOS: return centos.DefaultConfig(operatingSystemSpec), nil case providerconfigtypes.OperatingSystemFlatcar: - return flatcar.DefaultConfigForCloud(operatingSystemSpec, cloudProvider), nil + return flatcar.DefaultConfigForCloud(operatingSystemSpec, cloudProvider, operatingSystemManagerEnabled), nil case providerconfigtypes.OperatingSystemRHEL: return rhel.DefaultConfig(operatingSystemSpec), nil case providerconfigtypes.OperatingSystemSLES: diff --git a/pkg/providerconfig/types/types.go b/pkg/providerconfig/types/types.go index b9ddbbcac..b4e12ad73 100644 --- a/pkg/providerconfig/types/types.go +++ b/pkg/providerconfig/types/types.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "strconv" clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" @@ -226,7 +227,16 @@ func (configVarString *ConfigVarString) UnmarshalJSON(b []byte) error { if !bytes.HasPrefix(b, []byte("{")) { b = bytes.TrimPrefix(b, []byte(`"`)) b = bytes.TrimSuffix(b, []byte(`"`)) - configVarString.Value = string(b) + + // `Unquote` expects the input string to be inside quotation marks. + // Since we can have a string without any quotations, in which case `TrimPrefix` and + // `TrimSuffix` will be noop. We explicitly add quotation marks to the input string + // to make sure that `Unquote` never fails. + s, err := strconv.Unquote("\"" + string(b) + "\"") + if err != nil { + return err + } + configVarString.Value = s return nil } // This type must have the same fields as ConfigVarString but not diff --git a/pkg/providerconfig/types/types_test.go b/pkg/providerconfig/types/types_test.go index 7b0d8601a..d1d7252bd 100644 --- a/pkg/providerconfig/types/types_test.go +++ b/pkg/providerconfig/types/types_test.go @@ -21,7 +21,7 @@ import ( "reflect" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" ) @@ -160,6 +160,7 @@ func TestConfigVarBoolMarshalling(t *testing.T) { func TestConfigVarStringMarshallingAndUnmarshalling(t *testing.T) { testCases := []ConfigVarString{ {Value: "val"}, + {Value: "spe /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.22-aws.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.22-aws.yaml index b8845879e..42864bd2e 100644 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.22-aws.yaml +++ b/pkg/userdata/amzn2/testdata/kubelet-v1.22-aws.yaml @@ -90,7 +90,7 @@ write_files: yum install -y \ containerd-1.4* \ - docker-19.03* \ + docker-20.10* \ yum-plugin-versionlock yum versionlock add docker containerd diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.21-aws-external.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.23-aws-external.yaml similarity index 98% rename from pkg/userdata/amzn2/testdata/kubelet-v1.21-aws-external.yaml rename to pkg/userdata/amzn2/testdata/kubelet-v1.23-aws-external.yaml index 38cf73097..77275d458 100644 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.21-aws-external.yaml +++ b/pkg/userdata/amzn2/testdata/kubelet-v1.23-aws-external.yaml @@ -90,7 +90,7 @@ write_files: yum install -y \ containerd-1.4* \ - docker-19.03* \ + docker-20.10* \ yum-plugin-versionlock yum versionlock add docker containerd @@ -139,7 +139,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.8}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -225,8 +225,6 @@ write_files: --lock-file=/tmp/kubelet.lock \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.23-aws.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.23-aws.yaml index 96ac2eeda..10cf2cebe 100644 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.23-aws.yaml +++ b/pkg/userdata/amzn2/testdata/kubelet-v1.23-aws.yaml @@ -90,7 +90,7 @@ write_files: yum install -y \ containerd-1.4* \ - docker-19.03* \ + docker-20.10* \ yum-plugin-versionlock yum versionlock add docker containerd @@ -139,7 +139,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.23.0}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere-mirrors.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere-mirrors.yaml similarity index 98% rename from pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere-mirrors.yaml rename to pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere-mirrors.yaml index 3ecb5eba1..bb9ac745e 100644 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere-mirrors.yaml +++ b/pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere-mirrors.yaml @@ -103,7 +103,7 @@ write_files: yum install -y \ containerd-1.4* \ - docker-19.03* \ + docker-20.10* \ yum-plugin-versionlock yum versionlock add docker containerd @@ -152,7 +152,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.8}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -242,8 +242,6 @@ write_files: --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere-proxy.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere-proxy.yaml similarity index 98% rename from pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere-proxy.yaml rename to pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere-proxy.yaml index 563d9a827..9c36c7abd 100644 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere-proxy.yaml +++ b/pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere-proxy.yaml @@ -103,7 +103,7 @@ write_files: yum install -y \ containerd-1.4* \ - docker-19.03* \ + docker-20.10* \ yum-plugin-versionlock yum versionlock add docker containerd @@ -152,7 +152,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.8}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -242,8 +242,6 @@ write_files: --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere.yaml similarity index 98% rename from pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere.yaml rename to pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere.yaml index 48dbf051b..c6eb43a78 100644 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.21-vsphere.yaml +++ b/pkg/userdata/amzn2/testdata/kubelet-v1.23-vsphere.yaml @@ -95,7 +95,7 @@ write_files: yum install -y \ containerd-1.4* \ - docker-19.03* \ + docker-20.10* \ yum-plugin-versionlock yum versionlock add docker containerd @@ -144,7 +144,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.8}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -233,8 +233,6 @@ write_files: --lock-file=/tmp/kubelet.lock \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/centos/provider.go b/pkg/userdata/centos/provider.go index a2aaae6a3..3943bf0a8 100644 --- a/pkg/userdata/centos/provider.go +++ b/pkg/userdata/centos/provider.go @@ -66,11 +66,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) } - serverAddr, err := userdatahelper.GetServerAddressFromKubeconfig(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting server address from kubeconfig: %w", err) - } - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) if err != nil { return "", err @@ -102,7 +97,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { ProviderSpec *providerconfigtypes.Config OSConfig *Config KubeletVersion string - ServerAddr string Kubeconfig string KubernetesCACert string NodeIPScript string @@ -118,10 +112,9 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { ProviderSpec: pconfig, OSConfig: centosConfig, KubeletVersion: kubeletVersion.String(), - ServerAddr: serverAddr, Kubeconfig: kubeconfigString, KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(), + NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), ExtraKubeletFlags: crEngine.KubeletFlags(), ContainerRuntimeScript: crScript, ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), @@ -279,7 +272,7 @@ write_files: - path: "/etc/systemd/system/kubelet.service" content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} +{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - path: "/etc/kubernetes/cloud-config" permissions: "0600" diff --git a/pkg/userdata/centos/provider_test.go b/pkg/userdata/centos/provider_test.go index 37f36be41..68e8cb472 100644 --- a/pkg/userdata/centos/provider_test.go +++ b/pkg/userdata/centos/provider_test.go @@ -100,89 +100,80 @@ func TestUserDataGeneration(t *testing.T) { tests := []userDataTestCase{ { - name: "kubelet-v1.21-aws", + name: "kubelet-v1.22-aws", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.22.7", }, }, }, { - name: "kubelet-v1.21-aws-external", + name: "kubelet-v1.23-aws", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, - externalCloudProvider: true, }, { - name: "kubelet-v1.21-vsphere", + name: "kubelet-v1.23-nutanix", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, - cloudProviderName: stringPtr("vsphere"), + cloudProviderName: stringPtr("nutanix"), }, { - name: "kubelet-v1.21-vsphere-proxy", + name: "kubelet-v1.23-aws-external", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - insecureRegistries: "192.168.100.100:5000, 10.0.0.1:5000", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", + externalCloudProvider: true, }, { - name: "kubelet-v1.21-vsphere-mirrors", + name: "kubelet-v1.23-vsphere", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - registryMirrors: "https://registry.docker-cn.com", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", }, { - name: "kubelet-v1.22-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - }, - { - name: "kubelet-v1.23-aws", + name: "kubelet-v1.23-vsphere-proxy", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ Kubelet: "1.23.5", }, }, + cloudProviderName: stringPtr("vsphere"), + httpProxy: "http://192.168.100.100:3128", + noProxy: "192.168.1.0", + insecureRegistries: "192.168.100.100:5000, 10.0.0.1:5000", + pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", }, { - name: "kubelet-v1.23-nutanix", + name: "kubelet-v1.23-vsphere-mirrors", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ Kubelet: "1.23.5", }, }, - cloudProviderName: stringPtr("nutanix"), + cloudProviderName: stringPtr("vsphere"), + httpProxy: "http://192.168.100.100:3128", + noProxy: "192.168.1.0", + registryMirrors: "https://registry.docker-cn.com", + pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", }, { name: "kubelet-v1.24-aws", @@ -221,10 +212,10 @@ func TestUserDataGeneration(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - emtpyProviderSpec := clusterv1alpha1.ProviderSpec{ + emptyProviderSpec := clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{}, } - test.spec.ProviderSpec = emtpyProviderSpec + test.spec.ProviderSpec = emptyProviderSpec var cloudProvider *fakeCloudConfigProvider if test.cloudProviderName != nil { cloudProvider = &fakeCloudConfigProvider{ diff --git a/pkg/userdata/centos/testdata/kubelet-v1.21-aws.yaml b/pkg/userdata/centos/testdata/kubelet-v1.21-aws.yaml deleted file mode 100644 index 442917bd7..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.21-aws.yaml +++ /dev/null @@ -1,446 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.22-aws.yaml b/pkg/userdata/centos/testdata/kubelet-v1.22-aws.yaml index de96a9328..92555b815 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.22-aws.yaml +++ b/pkg/userdata/centos/testdata/kubelet-v1.22-aws.yaml @@ -98,9 +98,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/centos/testdata/kubelet-v1.21-aws-external.yaml b/pkg/userdata/centos/testdata/kubelet-v1.23-aws-external.yaml similarity index 98% rename from pkg/userdata/centos/testdata/kubelet-v1.21-aws-external.yaml rename to pkg/userdata/centos/testdata/kubelet-v1.23-aws-external.yaml index f1b92e886..da8f203ec 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.21-aws-external.yaml +++ b/pkg/userdata/centos/testdata/kubelet-v1.23-aws-external.yaml @@ -98,9 +98,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -149,7 +149,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -235,8 +235,6 @@ write_files: --lock-file=/tmp/kubelet.lock \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/centos/testdata/kubelet-v1.23-aws.yaml b/pkg/userdata/centos/testdata/kubelet-v1.23-aws.yaml index e912b1131..bcb8c3ade 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.23-aws.yaml +++ b/pkg/userdata/centos/testdata/kubelet-v1.23-aws.yaml @@ -98,9 +98,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/centos/testdata/kubelet-v1.23-nutanix.yaml b/pkg/userdata/centos/testdata/kubelet-v1.23-nutanix.yaml index 50a5ee4a0..ad28ad691 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.23-nutanix.yaml +++ b/pkg/userdata/centos/testdata/kubelet-v1.23-nutanix.yaml @@ -105,9 +105,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/centos/testdata/kubelet-v1.21-vsphere-mirrors.yaml b/pkg/userdata/centos/testdata/kubelet-v1.23-vsphere-mirrors.yaml similarity index 98% rename from pkg/userdata/centos/testdata/kubelet-v1.21-vsphere-mirrors.yaml rename to pkg/userdata/centos/testdata/kubelet-v1.23-vsphere-mirrors.yaml index 10d664f39..11c411561 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.21-vsphere-mirrors.yaml +++ b/pkg/userdata/centos/testdata/kubelet-v1.23-vsphere-mirrors.yaml @@ -111,9 +111,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -162,7 +162,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -252,8 +252,6 @@ write_files: --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/centos/testdata/kubelet-v1.21-vsphere-proxy.yaml b/pkg/userdata/centos/testdata/kubelet-v1.23-vsphere-proxy.yaml similarity index 98% rename from pkg/userdata/centos/testdata/kubelet-v1.21-vsphere-proxy.yaml rename to pkg/userdata/centos/testdata/kubelet-v1.23-vsphere-proxy.yaml index 4ef234434..8cc2eb137 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.21-vsphere-proxy.yaml +++ b/pkg/userdata/centos/testdata/kubelet-v1.23-vsphere-proxy.yaml @@ -111,9 +111,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -162,7 +162,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -252,8 +252,6 @@ write_files: --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/centos/testdata/kubelet-v1.21-vsphere.yaml b/pkg/userdata/centos/testdata/kubelet-v1.23-vsphere.yaml similarity index 98% rename from pkg/userdata/centos/testdata/kubelet-v1.21-vsphere.yaml rename to pkg/userdata/centos/testdata/kubelet-v1.23-vsphere.yaml index be2791b17..aee1a886d 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.21-vsphere.yaml +++ b/pkg/userdata/centos/testdata/kubelet-v1.23-vsphere.yaml @@ -103,9 +103,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -154,7 +154,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -243,8 +243,6 @@ write_files: --lock-file=/tmp/kubelet.lock \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/flatcar/flatcar.go b/pkg/userdata/flatcar/flatcar.go index cdb5166e9..c76c63a35 100644 --- a/pkg/userdata/flatcar/flatcar.go +++ b/pkg/userdata/flatcar/flatcar.go @@ -44,19 +44,29 @@ type Config struct { } func DefaultConfig(operatingSystemSpec runtime.RawExtension) runtime.RawExtension { - return DefaultConfigForCloud(operatingSystemSpec, "") + // Webhook has already performed the defaulting at this point. So the value for + // cloudProvider and operatingSystemManagerEnabled parameters are insignificant. + return DefaultConfigForCloud(operatingSystemSpec, "", true) } -func DefaultConfigForCloud(operatingSystemSpec runtime.RawExtension, cloudProvider types.CloudProvider) runtime.RawExtension { +func DefaultConfigForCloud(operatingSystemSpec runtime.RawExtension, cloudProvider types.CloudProvider, operatingSystemManagerEnabled bool) runtime.RawExtension { + // If userdata is being used from machine-controller and selected cloud provider is AWS then we + // force cloud-init. Because AWS has a very low cap for the maximum size of user-data. In case of ignition, + // we always exceed that limit which prevents new ec2 instances from being created. osSpec := Config{} - if operatingSystemSpec.Raw != nil { _ = json.Unmarshal(operatingSystemSpec.Raw, &osSpec) } - if cloudProvider == types.CloudProviderAWS { + // In case of OSM this is not required. + if cloudProvider == types.CloudProviderAWS && !operatingSystemManagerEnabled { osSpec.ProvisioningUtility = CloudInit } + // Always default to ignition if no value was provided + if osSpec.ProvisioningUtility == "" { + osSpec.ProvisioningUtility = Ignition + } + operatingSystemSpec.Raw, _ = json.Marshal(osSpec) return operatingSystemSpec diff --git a/pkg/userdata/flatcar/provider.go b/pkg/userdata/flatcar/provider.go index 471ce53ec..8e89415bb 100644 --- a/pkg/userdata/flatcar/provider.go +++ b/pkg/userdata/flatcar/provider.go @@ -120,7 +120,7 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { KubeletVersion: kubeletVersion.String(), Kubeconfig: kubeconfigString, KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(), + NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), ExtraKubeletFlags: crEngine.KubeletFlags(), ContainerRuntimeScript: crScript, ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), @@ -295,13 +295,17 @@ systemd: contents: | [Service] EnvironmentFile=/etc/kubernetes/nodeip.conf + - name: resolv.conf + contents: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - name: 40-download.conf contents: | [Unit] Requires=download-script.service After=download-script.service contents: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints .ExtraKubeletFlags false | indent 8 }} +{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags false | indent 8 }} storage: files: @@ -617,13 +621,17 @@ coreos: content: | [Service] EnvironmentFile=/etc/kubernetes/nodeip.conf + - name: resolv.conf + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - name: 40-download.conf content: | [Unit] Requires=download-script.service After=download-script.service content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints .ExtraKubeletFlags false | indent 6 }} +{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags false | indent 6 }} - name: apply-sysctl-settings.service enable: true diff --git a/pkg/userdata/flatcar/provider_test.go b/pkg/userdata/flatcar/provider_test.go index 18688dd67..cc82c03a9 100644 --- a/pkg/userdata/flatcar/provider_test.go +++ b/pkg/userdata/flatcar/provider_test.go @@ -121,37 +121,6 @@ func TestUserDataGeneration(t *testing.T) { t.Parallel() tests := []userDataTestCase{ - { - name: "ignition_v1.21.10", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "vsphere", - SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, - CAPublicKey: "ssh-rsa AAABBB", - Network: &providerconfigtypes.NetworkConfig{ - CIDR: "192.168.81.4/24", - Gateway: "192.168.81.1", - DNS: providerconfigtypes.DNSConfig{ - Servers: []string{"8.8.8.8"}, - }, - }, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "v1.21.10", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "vsphere", - config: "{vsphere-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - osConfig: &Config{ - DisableAutoUpdate: true, - ProvisioningUtility: Ignition, - }, - }, { name: "ignition_v1.22.7", providerSpec: &providerconfigtypes.Config{ @@ -276,37 +245,6 @@ func TestUserDataGeneration(t *testing.T) { ProvisioningUtility: Ignition, }, }, - { - name: "cloud-init_v1.21.10", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "anexia", - SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, - CAPublicKey: "ssh-rsa AAABBB", - Network: &providerconfigtypes.NetworkConfig{ - CIDR: "192.168.81.4/24", - Gateway: "192.168.81.1", - DNS: providerconfigtypes.DNSConfig{ - Servers: []string{"8.8.8.8"}, - }, - }, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "v1.21.10", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "anexia", - config: "{anexia-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - osConfig: &Config{ - DisableAutoUpdate: true, - ProvisioningUtility: CloudInit, - }, - }, { name: "cloud-init_v1.22.7", providerSpec: &providerconfigtypes.Config{ @@ -413,7 +351,7 @@ func TestUserDataGeneration(t *testing.T) { Name: "node1", }, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "v1.21.10", + Kubelet: "v1.24.0", }, }, ccProvider: &fakeCloudConfigProvider{}, diff --git a/pkg/userdata/flatcar/testdata/cloud-init_v1.21.10.yaml b/pkg/userdata/flatcar/testdata/cloud-init_v1.21.10.yaml deleted file mode 100644 index a1b39a561..000000000 --- a/pkg/userdata/flatcar/testdata/cloud-init_v1.21.10.yaml +++ /dev/null @@ -1,478 +0,0 @@ -#cloud-config - -users: -- name: core - ssh_authorized_keys: - - ssh-rsa AAABBB - - ssh-rsa CCCDDD - - -coreos: - units: - - name: static-nic.network - content: | - [Match] - # Because of difficulty predicting specific NIC names on different cloud providers, - # we only support static addressing on VSphere. There should be a single NIC attached - # that we will match by name prefix 'en' which denotes ethernet devices. - Name=en* - - [Network] - DHCP=no - Address=192.168.81.4/24 - Gateway=192.168.81.1 - DNS=8.8.8.8 - - - name: update-engine.service - command: stop - mask: true - - name: locksmithd.service - command: stop - mask: true - - name: download-script.service - enable: true - command: start - content: | - [Unit] - Requires=network-online.target - After=network-online.target - [Service] - Type=oneshot - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/download.sh - [Install] - WantedBy=multi-user.target - - - name: kubelet-healthcheck.service - enable: true - command: start - drop-ins: - - name: 40-download.conf - content: | - [Unit] - Requires=download-script.service - After=download-script.service - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - - - name: nodeip.service - enable: true - command: start - content: | - [Unit] - Description=Setup Kubelet Node IP Env - Requires=network-online.target - After=network-online.target - - [Service] - ExecStart=/opt/bin/setup_net_env.sh - RemainAfterExit=yes - Type=oneshot - [Install] - WantedBy=multi-user.target - - - name: kubelet.service - enable: true - command: start - drop-ins: - - name: 10-nodeip.conf - content: | - [Service] - EnvironmentFile=/etc/kubernetes/nodeip.conf - - name: 40-download.conf - content: | - [Unit] - Requires=download-script.service - After=download-script.service - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=anexia \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - - - name: apply-sysctl-settings.service - enable: true - command: start - content: | - [Unit] - Requires=network-online.target - After=network-online.target - [Service] - Type=oneshot - ExecStart=/opt/bin/apply_sysctl_settings.sh - [Install] - WantedBy=multi-user.target - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - permissions: "0644" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/etc/kubernetes/kubelet.conf" - permissions: "0644" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /opt/load-kernel-modules.sh - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: /etc/sysctl.d/k8s.conf - permissions: "0644" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: /etc/kubernetes/bootstrap-kubelet.conf - permissions: "0400" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: /etc/kubernetes/cloud-config - permissions: "0400" - content: | - {anexia-config:true} - -- path: /etc/kubernetes/pki/ca.crt - permissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - - -- path: /etc/hostname - permissions: "0600" - content: 'node1' - -- path: /etc/ssh/sshd_config - permissions: "0600" - user: root - content: | - # Use most defaults for sshd configuration. - Subsystem sftp internal-sftp - ClientAliveInterval 180 - UseDNS no - UsePAM yes - PrintLastLog no # handled by PAM - PrintMotd no # handled by PAM - PasswordAuthentication no - ChallengeResponseAuthentication no - -- path: /opt/bin/download.sh - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - opt_bin=/opt/bin - usr_local_bin=/usr/local/bin - cni_bin_dir=/opt/cni/bin - mkdir -p /etc/cni/net.d /etc/kubernetes/dynamic-config-dir /etc/kubernetes/manifests "$opt_bin" "$cni_bin_dir" - arch=${HOST_ARCH-} - if [ -z "$arch" ] - then - case $(uname -m) in - x86_64) - arch="amd64" - ;; - aarch64) - arch="arm64" - ;; - *) - echo "unsupported CPU architecture, exiting" - exit 1 - ;; - esac - fi - CNI_VERSION="${CNI_VERSION:-v0.8.7}" - cni_base_url="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION" - cni_filename="cni-plugins-linux-$arch-$CNI_VERSION.tgz" - curl -Lfo "$cni_bin_dir/$cni_filename" "$cni_base_url/$cni_filename" - cni_sum=$(curl -Lf "$cni_base_url/$cni_filename.sha256") - cd "$cni_bin_dir" - sha256sum -c <<<"$cni_sum" - tar xvf "$cni_filename" - rm -f "$cni_filename" - cd - - CRI_TOOLS_RELEASE="${CRI_TOOLS_RELEASE:-v1.22.0}" - cri_tools_base_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRI_TOOLS_RELEASE}" - cri_tools_filename="crictl-${CRI_TOOLS_RELEASE}-linux-${arch}.tar.gz" - curl -Lfo "$opt_bin/$cri_tools_filename" "$cri_tools_base_url/$cri_tools_filename" - cri_tools_sum=$(curl -Lf "$cri_tools_base_url/$cri_tools_filename.sha256" | sed 's/\*\///') - cd "$opt_bin" - sha256sum -c <<<"$cri_tools_sum" - tar xvf "$cri_tools_filename" - rm -f "$cri_tools_filename" - ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" - cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" - kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" - kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" - kube_sum_file="$kube_dir/sha256" - mkdir -p "$kube_dir" - : >"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - - systemctl daemon-reload - systemctl enable --now docker - - systemctl disable download-script.service - -- path: /opt/bin/apply_sysctl_settings.sh - permissions: "0755" - user: root - content: | - #!/bin/bash - set -xeuo pipefail - sysctl --system - systemctl disable apply-sysctl-settings.service - -- path: "/etc/ssh/trusted-user-ca-keys.pem" - content: | - ssh-rsa AAABBB - -- path: "/etc/ssh/sshd_config" - content: | - TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem - CASignatureAlgorithms ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,rsa-sha2-512,rsa-sha2-256,ssh-rsa - append: true - -- path: /etc/docker/daemon.json - permissions: "0644" - user: root - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: /etc/crictl.yaml - permissions: "0644" - user: root - content: | - runtime-endpoint: unix:///run/containerd/containerd.sock diff --git a/pkg/userdata/flatcar/testdata/cloud-init_v1.22.7.yaml b/pkg/userdata/flatcar/testdata/cloud-init_v1.22.7.yaml index 33c02cf25..e899ddd45 100644 --- a/pkg/userdata/flatcar/testdata/cloud-init_v1.22.7.yaml +++ b/pkg/userdata/flatcar/testdata/cloud-init_v1.22.7.yaml @@ -88,6 +88,10 @@ coreos: content: | [Service] EnvironmentFile=/etc/kubernetes/nodeip.conf + - name: resolv.conf + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - name: 40-download.conf content: | [Unit] diff --git a/pkg/userdata/flatcar/testdata/cloud-init_v1.23.5.yaml b/pkg/userdata/flatcar/testdata/cloud-init_v1.23.5.yaml index fabfc11fc..a222e8af1 100644 --- a/pkg/userdata/flatcar/testdata/cloud-init_v1.23.5.yaml +++ b/pkg/userdata/flatcar/testdata/cloud-init_v1.23.5.yaml @@ -88,6 +88,10 @@ coreos: content: | [Service] EnvironmentFile=/etc/kubernetes/nodeip.conf + - name: resolv.conf + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - name: 40-download.conf content: | [Unit] diff --git a/pkg/userdata/flatcar/testdata/cloud-init_v1.24.0.yaml b/pkg/userdata/flatcar/testdata/cloud-init_v1.24.0.yaml index 941d78668..b1593ad03 100644 --- a/pkg/userdata/flatcar/testdata/cloud-init_v1.24.0.yaml +++ b/pkg/userdata/flatcar/testdata/cloud-init_v1.24.0.yaml @@ -88,6 +88,10 @@ coreos: content: | [Service] EnvironmentFile=/etc/kubernetes/nodeip.conf + - name: resolv.conf + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - name: 40-download.conf content: | [Unit] diff --git a/pkg/userdata/flatcar/testdata/containerd.yaml b/pkg/userdata/flatcar/testdata/containerd.yaml index 3f98342f8..577086ee0 100644 --- a/pkg/userdata/flatcar/testdata/containerd.yaml +++ b/pkg/userdata/flatcar/testdata/containerd.yaml @@ -73,6 +73,10 @@ coreos: content: | [Service] EnvironmentFile=/etc/kubernetes/nodeip.conf + - name: resolv.conf + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - name: 40-download.conf content: | [Unit] @@ -110,9 +114,6 @@ coreos: --lock-file=/tmp/kubelet.lock \ --container-runtime=remote \ --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} [Install] @@ -399,7 +400,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.24.0}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" diff --git a/pkg/userdata/flatcar/testdata/ignition_v1.21.10.json b/pkg/userdata/flatcar/testdata/ignition_v1.21.10.json deleted file mode 100644 index b40d43f74..000000000 --- a/pkg/userdata/flatcar/testdata/ignition_v1.21.10.json +++ /dev/null @@ -1 +0,0 @@ -{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{"units":[{"contents":"[Match]\n# Because of difficulty predicting specific NIC names on different cloud providers,\n# we only support static addressing on VSphere. There should be a single NIC attached\n# that we will match by name prefix 'en' which denotes ethernet devices.\nName=en*\n\n[Network]\nDHCP=no\nAddress=192.168.81.4/24\nGateway=192.168.81.1\nDNS=8.8.8.8\n","name":"static-nic.network"}]},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAABBB","ssh-rsa CCCDDD"]}]},"storage":{"files":[{"filesystem":"root","path":"/etc/systemd/journald.conf.d/max_disk_use.conf","contents":{"source":"data:,%5BJournal%5D%0ASystemMaxUse%3D5G%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/kubernetes/kubelet.conf","contents":{"source":"data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0A%20%20webhook%3A%0A%20%20%20%20cacheTTL%3A%200s%0A%20%20%20%20enabled%3A%20true%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fpki%2Fca.crt%0Aauthorization%3A%0A%20%20mode%3A%20Webhook%0A%20%20webhook%3A%0A%20%20%20%20cacheAuthorizedTTL%3A%200s%0A%20%20%20%20cacheUnauthorizedTTL%3A%200s%0AcgroupDriver%3A%20systemd%0AclusterDNS%3A%0A-%2010.10.10.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%20100Mi%0AcpuManagerReconcilePeriod%3A%200s%0AevictionHard%3A%0A%20%20imagefs.available%3A%2015%25%0A%20%20memory.available%3A%20100Mi%0A%20%20nodefs.available%3A%2010%25%0A%20%20nodefs.inodesFree%3A%205%25%0AevictionPressureTransitionPeriod%3A%200s%0AfeatureGates%3A%0A%20%20RotateKubeletServerCertificate%3A%20true%0AfileCheckFrequency%3A%200s%0AhttpCheckFrequency%3A%200s%0AimageMinimumGCAge%3A%200s%0Akind%3A%20KubeletConfiguration%0AkubeReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0Alogging%3A%0A%20%20flushFrequency%3A%200%0A%20%20options%3A%0A%20%20%20%20json%3A%0A%20%20%20%20%20%20infoBufferSize%3A%20%220%22%0A%20%20verbosity%3A%200%0AmemorySwap%3A%20%7B%7D%0AnodeStatusReportFrequency%3A%200s%0AnodeStatusUpdateFrequency%3A%200s%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AruntimeRequestTimeout%3A%200s%0AserverTLSBootstrap%3A%20true%0AshutdownGracePeriod%3A%200s%0AshutdownGracePeriodCriticalPods%3A%200s%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AstreamingConnectionIdleTimeout%3A%200s%0AsyncFrequency%3A%200s%0AsystemReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0AtlsCipherSuites%3A%0A-%20TLS_AES_128_GCM_SHA256%0A-%20TLS_AES_256_GCM_SHA384%0A-%20TLS_CHACHA20_POLY1305_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305%0A-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305%0AvolumePluginDir%3A%20%2Fvar%2Flib%2Fkubelet%2Fvolumeplugins%0AvolumeStatsAggPeriod%3A%200s%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/load-kernel-modules.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aset%20-euo%20pipefail%0A%0Amodprobe%20ip_vs%0Amodprobe%20ip_vs_rr%0Amodprobe%20ip_vs_wrr%0Amodprobe%20ip_vs_sh%0A%0Aif%20modinfo%20nf_conntrack_ipv4%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20modprobe%20nf_conntrack_ipv4%0Aelse%0A%20%20modprobe%20nf_conntrack%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/sysctl.d/k8s.conf","contents":{"source":"data:,net.bridge.bridge-nf-call-ip6tables%20%3D%201%0Anet.bridge.bridge-nf-call-iptables%20%3D%201%0Akernel.panic_on_oops%20%3D%201%0Akernel.panic%20%3D%2010%0Anet.ipv4.ip_forward%20%3D%201%0Avm.overcommit_memory%20%3D%201%0Afs.inotify.max_user_watches%20%3D%201048576%0Afs.inotify.max_user_instances%20%3D%208192%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic_on_oops","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic","contents":{"source":"data:,10%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/vm/overcommit_memory","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/bin/setup_net_env.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aechodate()%20%7B%0A%20%20echo%20%22%5B%24(date%20-Is)%5D%22%20%22%24%40%22%0A%7D%0A%0A%23%20get%20the%20default%20interface%20IP%20address%0ADEFAULT_IFC_IP%3D%24(ip%20-o%20%20route%20get%201%20%7C%20grep%20-oP%20%22src%20%5CK%5CS%2B%22)%0A%0A%23%20get%20the%20full%20hostname%0AFULL_HOSTNAME%3D%24(hostname%20-f)%0A%0Aif%20%5B%20-z%20%22%24%7BDEFAULT_IFC_IP%7D%22%20%5D%0Athen%0A%09echodate%20%22Failed%20to%20get%20IP%20address%20for%20the%20default%20route%20interface%22%0A%09exit%201%0Afi%0A%0A%23%20write%20the%20nodeip_env%20file%0A%23%20we%20need%20the%20line%20below%20because%20flatcar%20has%20the%20same%20string%20%22coreos%22%20in%20that%20file%0Aif%20grep%20-q%20coreos%20%2Fetc%2Fos-release%0Athen%0A%20%20echo%20-e%20%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5CnKUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%22%20%3E%20%2Fetc%2Fkubernetes%2Fnodeip.conf%0Aelif%20%5B%20!%20-d%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%20%5D%0Athen%0A%09echodate%20%22Can't%20find%20kubelet%20service%20extras%20directory%22%0A%09exit%201%0Aelse%0A%20%20echo%20-e%20%22%5BService%5D%5CnEnvironment%3D%5C%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5C%22%5CnEnvironment%3D%5C%22KUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%5C%22%22%20%3E%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%2Fnodeip.conf%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/kubernetes/bootstrap-kubelet.conf","contents":{"source":"data:,apiVersion%3A%20v1%0Aclusters%3A%0A-%20cluster%3A%0A%20%20%20%20certificate-authority-data%3A%20LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t%0A%20%20%20%20server%3A%20https%3A%2F%2Fserver%3A443%0A%20%20name%3A%20%22%22%0Acontexts%3A%20null%0Acurrent-context%3A%20%22%22%0Akind%3A%20Config%0Apreferences%3A%20%7B%7D%0Ausers%3A%0A-%20name%3A%20%22%22%0A%20%20user%3A%0A%20%20%20%20token%3A%20my-token%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/cloud-config","contents":{"source":"data:,%7Bvsphere-config%3Atrue%7D%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/pki/ca.crt","contents":{"source":"data:,-----BEGIN%20CERTIFICATE-----%0AMIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV%0ABAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG%0AA1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3%0ADQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0%0ANjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG%0AcmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv%0Ac3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B%0AAQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS%0AR8Od0%2B9Q62Hyny%2BGFwMTb4A%2FKU8mssoHvcceSAAbwfbxFK%2F%2Bs51TobqUnORZrOoT%0AZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk%0AJfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS%2FPlPbUj2q7YnoVLposUBMlgUb%2FCykX3%0AmOoLb4yJJQyA%2FiST6ZxiIEj36D4yWZ5lg7YJl%2BUiiBQHGCnPdGyipqV06ex0heYW%0AcaiW8LWZSUQ93jQ%2BWVCH8hT7DQO1dmsvUmXlq%2FJeAlwQ%2FQIDAQABo4HgMIHdMB0G%0AA1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt%0AhS4P4U7vTfjByC569R7E6KF%2FpH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB%0AMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES%0AMBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv%0AbYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h%0AU9f9sNH0%2F6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k%2FXkDjQm%2B3lzjT0iGR4IxE%2FAo%0AeU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb%2FLnDUjs5Yj9brP0NWzXfYU4%0AUK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm%2Bje6voD%0A58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj%2Bqvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n%0AsH9BBH38%2FSzUmAN4QHSPy1gjqm00OAE8NaYDkh%2FbzE4d7mLGGMWp%2FWE3KPSu82HF%0AkPe6XoSbiLm%2Fkxk32T0%3D%0A-----END%20CERTIFICATE-----%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/hostname","contents":{"source":"data:,node1","verification":{}},"mode":384},{"filesystem":"root","group":{"id":0},"path":"/etc/ssh/sshd_config","user":{"id":0},"contents":{"source":"data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0APasswordAuthentication%20no%0AChallengeResponseAuthentication%20no%0A","verification":{}},"mode":384},{"filesystem":"root","path":"/opt/bin/setup.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0A%23%20We%20stop%20these%20services%20here%20explicitly%20since%20masking%20only%20removes%20the%20symlinks%20for%20these%20services%20so%20that%20they%20can't%20be%20started.%0A%23%20But%20that%20wouldn't%20%22stop%22%20the%20already%20running%20services%20on%20the%20first%20boot.%0Asystemctl%20stop%20update-engine.service%0Asystemctl%20stop%20locksmithd.service%0Asystemctl%20disable%20setup.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/opt/bin/download.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0Aopt_bin%3D%2Fopt%2Fbin%0Ausr_local_bin%3D%2Fusr%2Flocal%2Fbin%0Acni_bin_dir%3D%2Fopt%2Fcni%2Fbin%0Amkdir%20-p%20%2Fetc%2Fcni%2Fnet.d%20%2Fetc%2Fkubernetes%2Fdynamic-config-dir%20%2Fetc%2Fkubernetes%2Fmanifests%20%22%24opt_bin%22%20%22%24cni_bin_dir%22%0Aarch%3D%24%7BHOST_ARCH-%7D%0Aif%20%5B%20-z%20%22%24arch%22%20%5D%0Athen%0Acase%20%24(uname%20-m)%20in%0Ax86_64)%0A%20%20%20%20arch%3D%22amd64%22%0A%20%20%20%20%3B%3B%0Aaarch64)%0A%20%20%20%20arch%3D%22arm64%22%0A%20%20%20%20%3B%3B%0A*)%0A%20%20%20%20echo%20%22unsupported%20CPU%20architecture%2C%20exiting%22%0A%20%20%20%20exit%201%0A%20%20%20%20%3B%3B%0Aesac%0Afi%0ACNI_VERSION%3D%22%24%7BCNI_VERSION%3A-v0.8.7%7D%22%0Acni_base_url%3D%22https%3A%2F%2Fgithub.com%2Fcontainernetworking%2Fplugins%2Freleases%2Fdownload%2F%24CNI_VERSION%22%0Acni_filename%3D%22cni-plugins-linux-%24arch-%24CNI_VERSION.tgz%22%0Acurl%20-Lfo%20%22%24cni_bin_dir%2F%24cni_filename%22%20%22%24cni_base_url%2F%24cni_filename%22%0Acni_sum%3D%24(curl%20-Lf%20%22%24cni_base_url%2F%24cni_filename.sha256%22)%0Acd%20%22%24cni_bin_dir%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cni_sum%22%0Atar%20xvf%20%22%24cni_filename%22%0Arm%20-f%20%22%24cni_filename%22%0Acd%20-%0ACRI_TOOLS_RELEASE%3D%22%24%7BCRI_TOOLS_RELEASE%3A-v1.22.0%7D%22%0Acri_tools_base_url%3D%22https%3A%2F%2Fgithub.com%2Fkubernetes-sigs%2Fcri-tools%2Freleases%2Fdownload%2F%24%7BCRI_TOOLS_RELEASE%7D%22%0Acri_tools_filename%3D%22crictl-%24%7BCRI_TOOLS_RELEASE%7D-linux-%24%7Barch%7D.tar.gz%22%0Acurl%20-Lfo%20%22%24opt_bin%2F%24cri_tools_filename%22%20%22%24cri_tools_base_url%2F%24cri_tools_filename%22%0Acri_tools_sum%3D%24(curl%20-Lf%20%22%24cri_tools_base_url%2F%24cri_tools_filename.sha256%22%20%7C%20sed%20's%2F%5C*%5C%2F%2F%2F')%0Acd%20%22%24opt_bin%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cri_tools_sum%22%0Atar%20xvf%20%22%24cri_tools_filename%22%0Arm%20-f%20%22%24cri_tools_filename%22%0Aln%20-sf%20%22%24opt_bin%2Fcrictl%22%20%22%24usr_local_bin%22%2Fcrictl%20%7C%7C%20echo%20%22symbolic%20link%20is%20skipped%22%0Acd%20-%0AKUBE_VERSION%3D%22%24%7BKUBE_VERSION%3A-v1.21.10%7D%22%0Akube_dir%3D%22%24opt_bin%2Fkubernetes-%24KUBE_VERSION%22%0Akube_base_url%3D%22https%3A%2F%2Fstorage.googleapis.com%2Fkubernetes-release%2Frelease%2F%24KUBE_VERSION%2Fbin%2Flinux%2F%24arch%22%0Akube_sum_file%3D%22%24kube_dir%2Fsha256%22%0Amkdir%20-p%20%22%24kube_dir%22%0A%3A%20%3E%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20curl%20-Lfo%20%22%24kube_dir%2F%24bin%22%20%22%24kube_base_url%2F%24bin%22%0A%20%20%20%20chmod%20%2Bx%20%22%24kube_dir%2F%24bin%22%0A%20%20%20%20sum%3D%24(curl%20-Lf%20%22%24kube_base_url%2F%24bin.sha256%22)%0A%20%20%20%20echo%20%22%24sum%20%20%24kube_dir%2F%24bin%22%20%3E%3E%22%24kube_sum_file%22%0Adone%0Asha256sum%20-c%20%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20ln%20-sf%20%22%24kube_dir%2F%24bin%22%20%22%24opt_bin%22%2F%24bin%0Adone%0A%0Aif%20%5B%5B%20!%20-x%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20%5D%5D%3B%20then%0A%20%20%20%20curl%20-Lfo%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20https%3A%2F%2Fraw.githubusercontent.com%2Fkubermatic%2Fmachine-controller%2F7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde%2Fpkg%2Fuserdata%2Fscripts%2Fhealth-monitor.sh%0A%20%20%20%20chmod%20%2Bx%20%2Fopt%2Fbin%2Fhealth-monitor.sh%0Afi%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2Fenvironment.conf%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%2Fenvironment.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironmentFile%3D-%2Fetc%2Fenvironment%0AEOF%0A%0Asystemctl%20daemon-reload%0Asystemctl%20enable%20--now%20docker%0A%0Asystemctl%20disable%20download-script.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/docker/daemon.json","contents":{"source":"data:,%7B%22exec-opts%22%3A%5B%22native.cgroupdriver%3Dsystemd%22%5D%2C%22storage-driver%22%3A%22overlay2%22%2C%22log-driver%22%3A%22json-file%22%2C%22log-opts%22%3A%7B%22max-file%22%3A%225%22%2C%22max-size%22%3A%22100m%22%7D%7D%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/crictl.yaml","contents":{"source":"data:,runtime-endpoint%3A%20unix%3A%2F%2F%2Frun%2Fcontainerd%2Fcontainerd.sock%0A","verification":{}},"mode":420}]},"systemd":{"units":[{"mask":true,"name":"update-engine.service"},{"mask":true,"name":"locksmithd.service"},{"contents":"[Install]\nWantedBy=multi-user.target\n\n[Unit]\nRequires=network-online.target\nRequires=nodeip.service\nAfter=network-online.target\nAfter=nodeip.service\n\nDescription=Service responsible for configuring the flatcar machine\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/setup.sh\n","enabled":true,"name":"setup.service"},{"contents":"[Unit]\nRequires=network-online.target\nRequires=setup.service\nAfter=network-online.target\nAfter=setup.service\n[Service]\nType=oneshot\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/download.sh\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"download-script.service"},{"contents":"[Unit]\nRequires=kubelet.service\nAfter=kubelet.service\n\n[Service]\nExecStart=/opt/bin/health-monitor.sh kubelet\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet-healthcheck.service"},{"contents":"[Unit]\nDescription=Setup Kubelet Node IP Env\nRequires=network-online.target\nAfter=network-online.target\n\n[Service]\nExecStart=/opt/bin/setup_net_env.sh\nRemainAfterExit=yes\nType=oneshot\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"nodeip.service"},{"contents":"[Unit]\nAfter=docker.service\nRequires=docker.service\n\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https://kubernetes.io/docs/home/\n\n[Service]\nUser=root\nRestart=always\nStartLimitInterval=0\nRestartSec=10\nCPUAccounting=true\nMemoryAccounting=true\n\nEnvironment=\"PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/\"\nEnvironmentFile=-/etc/environment\n\nExecStartPre=/bin/bash /opt/load-kernel-modules.sh\n\nExecStartPre=/bin/bash /opt/bin/setup_net_env.sh\nExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --config=/etc/kubernetes/kubelet.conf \\\n --cert-dir=/etc/kubernetes/pki \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --container-runtime=docker \\\n --container-runtime-endpoint=unix:///var/run/dockershim.sock \\\n --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \\\n --feature-gates=DynamicKubeletConfig=true \\\n --network-plugin=cni \\\n --node-ip ${KUBELET_NODE_IP}\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Service]\nEnvironmentFile=/etc/kubernetes/nodeip.conf\n","name":"10-nodeip.conf"},{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet.service"}]}} \ No newline at end of file diff --git a/pkg/userdata/flatcar/testdata/ignition_v1.22.7.json b/pkg/userdata/flatcar/testdata/ignition_v1.22.7.json index a7d6cc773..34834bf0b 100644 --- a/pkg/userdata/flatcar/testdata/ignition_v1.22.7.json +++ b/pkg/userdata/flatcar/testdata/ignition_v1.22.7.json @@ -1 +1 @@ -{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{"units":[{"contents":"[Match]\n# Because of difficulty predicting specific NIC names on different cloud providers,\n# we only support static addressing on VSphere. There should be a single NIC attached\n# that we will match by name prefix 'en' which denotes ethernet devices.\nName=en*\n\n[Network]\nDHCP=no\nAddress=192.168.81.4/24\nGateway=192.168.81.1\nDNS=8.8.8.8\n","name":"static-nic.network"}]},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAABBB","ssh-rsa CCCDDD"]}]},"storage":{"files":[{"filesystem":"root","path":"/etc/systemd/journald.conf.d/max_disk_use.conf","contents":{"source":"data:,%5BJournal%5D%0ASystemMaxUse%3D5G%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/kubernetes/kubelet.conf","contents":{"source":"data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0A%20%20webhook%3A%0A%20%20%20%20cacheTTL%3A%200s%0A%20%20%20%20enabled%3A%20true%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fpki%2Fca.crt%0Aauthorization%3A%0A%20%20mode%3A%20Webhook%0A%20%20webhook%3A%0A%20%20%20%20cacheAuthorizedTTL%3A%200s%0A%20%20%20%20cacheUnauthorizedTTL%3A%200s%0AcgroupDriver%3A%20systemd%0AclusterDNS%3A%0A-%2010.10.10.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%20100Mi%0AcpuManagerReconcilePeriod%3A%200s%0AevictionHard%3A%0A%20%20imagefs.available%3A%2015%25%0A%20%20memory.available%3A%20100Mi%0A%20%20nodefs.available%3A%2010%25%0A%20%20nodefs.inodesFree%3A%205%25%0AevictionPressureTransitionPeriod%3A%200s%0AfeatureGates%3A%0A%20%20RotateKubeletServerCertificate%3A%20true%0AfileCheckFrequency%3A%200s%0AhttpCheckFrequency%3A%200s%0AimageMinimumGCAge%3A%200s%0Akind%3A%20KubeletConfiguration%0AkubeReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0Alogging%3A%0A%20%20flushFrequency%3A%200%0A%20%20options%3A%0A%20%20%20%20json%3A%0A%20%20%20%20%20%20infoBufferSize%3A%20%220%22%0A%20%20verbosity%3A%200%0AmemorySwap%3A%20%7B%7D%0AnodeStatusReportFrequency%3A%200s%0AnodeStatusUpdateFrequency%3A%200s%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AruntimeRequestTimeout%3A%200s%0AserverTLSBootstrap%3A%20true%0AshutdownGracePeriod%3A%200s%0AshutdownGracePeriodCriticalPods%3A%200s%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AstreamingConnectionIdleTimeout%3A%200s%0AsyncFrequency%3A%200s%0AsystemReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0AtlsCipherSuites%3A%0A-%20TLS_AES_128_GCM_SHA256%0A-%20TLS_AES_256_GCM_SHA384%0A-%20TLS_CHACHA20_POLY1305_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305%0A-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305%0AvolumePluginDir%3A%20%2Fvar%2Flib%2Fkubelet%2Fvolumeplugins%0AvolumeStatsAggPeriod%3A%200s%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/load-kernel-modules.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aset%20-euo%20pipefail%0A%0Amodprobe%20ip_vs%0Amodprobe%20ip_vs_rr%0Amodprobe%20ip_vs_wrr%0Amodprobe%20ip_vs_sh%0A%0Aif%20modinfo%20nf_conntrack_ipv4%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20modprobe%20nf_conntrack_ipv4%0Aelse%0A%20%20modprobe%20nf_conntrack%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/sysctl.d/k8s.conf","contents":{"source":"data:,net.bridge.bridge-nf-call-ip6tables%20%3D%201%0Anet.bridge.bridge-nf-call-iptables%20%3D%201%0Akernel.panic_on_oops%20%3D%201%0Akernel.panic%20%3D%2010%0Anet.ipv4.ip_forward%20%3D%201%0Avm.overcommit_memory%20%3D%201%0Afs.inotify.max_user_watches%20%3D%201048576%0Afs.inotify.max_user_instances%20%3D%208192%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic_on_oops","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic","contents":{"source":"data:,10%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/vm/overcommit_memory","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/bin/setup_net_env.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aechodate()%20%7B%0A%20%20echo%20%22%5B%24(date%20-Is)%5D%22%20%22%24%40%22%0A%7D%0A%0A%23%20get%20the%20default%20interface%20IP%20address%0ADEFAULT_IFC_IP%3D%24(ip%20-o%20%20route%20get%201%20%7C%20grep%20-oP%20%22src%20%5CK%5CS%2B%22)%0A%0A%23%20get%20the%20full%20hostname%0AFULL_HOSTNAME%3D%24(hostname%20-f)%0A%0Aif%20%5B%20-z%20%22%24%7BDEFAULT_IFC_IP%7D%22%20%5D%0Athen%0A%09echodate%20%22Failed%20to%20get%20IP%20address%20for%20the%20default%20route%20interface%22%0A%09exit%201%0Afi%0A%0A%23%20write%20the%20nodeip_env%20file%0A%23%20we%20need%20the%20line%20below%20because%20flatcar%20has%20the%20same%20string%20%22coreos%22%20in%20that%20file%0Aif%20grep%20-q%20coreos%20%2Fetc%2Fos-release%0Athen%0A%20%20echo%20-e%20%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5CnKUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%22%20%3E%20%2Fetc%2Fkubernetes%2Fnodeip.conf%0Aelif%20%5B%20!%20-d%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%20%5D%0Athen%0A%09echodate%20%22Can't%20find%20kubelet%20service%20extras%20directory%22%0A%09exit%201%0Aelse%0A%20%20echo%20-e%20%22%5BService%5D%5CnEnvironment%3D%5C%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5C%22%5CnEnvironment%3D%5C%22KUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%5C%22%22%20%3E%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%2Fnodeip.conf%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/kubernetes/bootstrap-kubelet.conf","contents":{"source":"data:,apiVersion%3A%20v1%0Aclusters%3A%0A-%20cluster%3A%0A%20%20%20%20certificate-authority-data%3A%20LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t%0A%20%20%20%20server%3A%20https%3A%2F%2Fserver%3A443%0A%20%20name%3A%20%22%22%0Acontexts%3A%20null%0Acurrent-context%3A%20%22%22%0Akind%3A%20Config%0Apreferences%3A%20%7B%7D%0Ausers%3A%0A-%20name%3A%20%22%22%0A%20%20user%3A%0A%20%20%20%20token%3A%20my-token%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/cloud-config","contents":{"source":"data:,%7Bvsphere-config%3Atrue%7D%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/pki/ca.crt","contents":{"source":"data:,-----BEGIN%20CERTIFICATE-----%0AMIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV%0ABAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG%0AA1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3%0ADQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0%0ANjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG%0AcmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv%0Ac3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B%0AAQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS%0AR8Od0%2B9Q62Hyny%2BGFwMTb4A%2FKU8mssoHvcceSAAbwfbxFK%2F%2Bs51TobqUnORZrOoT%0AZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk%0AJfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS%2FPlPbUj2q7YnoVLposUBMlgUb%2FCykX3%0AmOoLb4yJJQyA%2FiST6ZxiIEj36D4yWZ5lg7YJl%2BUiiBQHGCnPdGyipqV06ex0heYW%0AcaiW8LWZSUQ93jQ%2BWVCH8hT7DQO1dmsvUmXlq%2FJeAlwQ%2FQIDAQABo4HgMIHdMB0G%0AA1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt%0AhS4P4U7vTfjByC569R7E6KF%2FpH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB%0AMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES%0AMBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv%0AbYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h%0AU9f9sNH0%2F6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k%2FXkDjQm%2B3lzjT0iGR4IxE%2FAo%0AeU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb%2FLnDUjs5Yj9brP0NWzXfYU4%0AUK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm%2Bje6voD%0A58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj%2Bqvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n%0AsH9BBH38%2FSzUmAN4QHSPy1gjqm00OAE8NaYDkh%2FbzE4d7mLGGMWp%2FWE3KPSu82HF%0AkPe6XoSbiLm%2Fkxk32T0%3D%0A-----END%20CERTIFICATE-----%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/hostname","contents":{"source":"data:,node1","verification":{}},"mode":384},{"filesystem":"root","group":{"id":0},"path":"/etc/ssh/sshd_config","user":{"id":0},"contents":{"source":"data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0APasswordAuthentication%20no%0AChallengeResponseAuthentication%20no%0A","verification":{}},"mode":384},{"filesystem":"root","path":"/opt/bin/setup.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0A%23%20We%20stop%20these%20services%20here%20explicitly%20since%20masking%20only%20removes%20the%20symlinks%20for%20these%20services%20so%20that%20they%20can't%20be%20started.%0A%23%20But%20that%20wouldn't%20%22stop%22%20the%20already%20running%20services%20on%20the%20first%20boot.%0Asystemctl%20stop%20update-engine.service%0Asystemctl%20stop%20locksmithd.service%0Asystemctl%20disable%20setup.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/opt/bin/download.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0Aopt_bin%3D%2Fopt%2Fbin%0Ausr_local_bin%3D%2Fusr%2Flocal%2Fbin%0Acni_bin_dir%3D%2Fopt%2Fcni%2Fbin%0Amkdir%20-p%20%2Fetc%2Fcni%2Fnet.d%20%2Fetc%2Fkubernetes%2Fdynamic-config-dir%20%2Fetc%2Fkubernetes%2Fmanifests%20%22%24opt_bin%22%20%22%24cni_bin_dir%22%0Aarch%3D%24%7BHOST_ARCH-%7D%0Aif%20%5B%20-z%20%22%24arch%22%20%5D%0Athen%0Acase%20%24(uname%20-m)%20in%0Ax86_64)%0A%20%20%20%20arch%3D%22amd64%22%0A%20%20%20%20%3B%3B%0Aaarch64)%0A%20%20%20%20arch%3D%22arm64%22%0A%20%20%20%20%3B%3B%0A*)%0A%20%20%20%20echo%20%22unsupported%20CPU%20architecture%2C%20exiting%22%0A%20%20%20%20exit%201%0A%20%20%20%20%3B%3B%0Aesac%0Afi%0ACNI_VERSION%3D%22%24%7BCNI_VERSION%3A-v0.8.7%7D%22%0Acni_base_url%3D%22https%3A%2F%2Fgithub.com%2Fcontainernetworking%2Fplugins%2Freleases%2Fdownload%2F%24CNI_VERSION%22%0Acni_filename%3D%22cni-plugins-linux-%24arch-%24CNI_VERSION.tgz%22%0Acurl%20-Lfo%20%22%24cni_bin_dir%2F%24cni_filename%22%20%22%24cni_base_url%2F%24cni_filename%22%0Acni_sum%3D%24(curl%20-Lf%20%22%24cni_base_url%2F%24cni_filename.sha256%22)%0Acd%20%22%24cni_bin_dir%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cni_sum%22%0Atar%20xvf%20%22%24cni_filename%22%0Arm%20-f%20%22%24cni_filename%22%0Acd%20-%0ACRI_TOOLS_RELEASE%3D%22%24%7BCRI_TOOLS_RELEASE%3A-v1.22.0%7D%22%0Acri_tools_base_url%3D%22https%3A%2F%2Fgithub.com%2Fkubernetes-sigs%2Fcri-tools%2Freleases%2Fdownload%2F%24%7BCRI_TOOLS_RELEASE%7D%22%0Acri_tools_filename%3D%22crictl-%24%7BCRI_TOOLS_RELEASE%7D-linux-%24%7Barch%7D.tar.gz%22%0Acurl%20-Lfo%20%22%24opt_bin%2F%24cri_tools_filename%22%20%22%24cri_tools_base_url%2F%24cri_tools_filename%22%0Acri_tools_sum%3D%24(curl%20-Lf%20%22%24cri_tools_base_url%2F%24cri_tools_filename.sha256%22%20%7C%20sed%20's%2F%5C*%5C%2F%2F%2F')%0Acd%20%22%24opt_bin%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cri_tools_sum%22%0Atar%20xvf%20%22%24cri_tools_filename%22%0Arm%20-f%20%22%24cri_tools_filename%22%0Aln%20-sf%20%22%24opt_bin%2Fcrictl%22%20%22%24usr_local_bin%22%2Fcrictl%20%7C%7C%20echo%20%22symbolic%20link%20is%20skipped%22%0Acd%20-%0AKUBE_VERSION%3D%22%24%7BKUBE_VERSION%3A-v1.22.7%7D%22%0Akube_dir%3D%22%24opt_bin%2Fkubernetes-%24KUBE_VERSION%22%0Akube_base_url%3D%22https%3A%2F%2Fstorage.googleapis.com%2Fkubernetes-release%2Frelease%2F%24KUBE_VERSION%2Fbin%2Flinux%2F%24arch%22%0Akube_sum_file%3D%22%24kube_dir%2Fsha256%22%0Amkdir%20-p%20%22%24kube_dir%22%0A%3A%20%3E%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20curl%20-Lfo%20%22%24kube_dir%2F%24bin%22%20%22%24kube_base_url%2F%24bin%22%0A%20%20%20%20chmod%20%2Bx%20%22%24kube_dir%2F%24bin%22%0A%20%20%20%20sum%3D%24(curl%20-Lf%20%22%24kube_base_url%2F%24bin.sha256%22)%0A%20%20%20%20echo%20%22%24sum%20%20%24kube_dir%2F%24bin%22%20%3E%3E%22%24kube_sum_file%22%0Adone%0Asha256sum%20-c%20%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20ln%20-sf%20%22%24kube_dir%2F%24bin%22%20%22%24opt_bin%22%2F%24bin%0Adone%0A%0Aif%20%5B%5B%20!%20-x%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20%5D%5D%3B%20then%0A%20%20%20%20curl%20-Lfo%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20https%3A%2F%2Fraw.githubusercontent.com%2Fkubermatic%2Fmachine-controller%2F7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde%2Fpkg%2Fuserdata%2Fscripts%2Fhealth-monitor.sh%0A%20%20%20%20chmod%20%2Bx%20%2Fopt%2Fbin%2Fhealth-monitor.sh%0Afi%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2Fenvironment.conf%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%2Fenvironment.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironmentFile%3D-%2Fetc%2Fenvironment%0AEOF%0A%0Asystemctl%20daemon-reload%0Asystemctl%20enable%20--now%20docker%0A%0Asystemctl%20disable%20download-script.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/docker/daemon.json","contents":{"source":"data:,%7B%22exec-opts%22%3A%5B%22native.cgroupdriver%3Dsystemd%22%5D%2C%22storage-driver%22%3A%22overlay2%22%2C%22log-driver%22%3A%22json-file%22%2C%22log-opts%22%3A%7B%22max-file%22%3A%225%22%2C%22max-size%22%3A%22100m%22%7D%7D%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/crictl.yaml","contents":{"source":"data:,runtime-endpoint%3A%20unix%3A%2F%2F%2Frun%2Fcontainerd%2Fcontainerd.sock%0A","verification":{}},"mode":420}]},"systemd":{"units":[{"mask":true,"name":"update-engine.service"},{"mask":true,"name":"locksmithd.service"},{"contents":"[Install]\nWantedBy=multi-user.target\n\n[Unit]\nRequires=network-online.target\nRequires=nodeip.service\nAfter=network-online.target\nAfter=nodeip.service\n\nDescription=Service responsible for configuring the flatcar machine\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/setup.sh\n","enabled":true,"name":"setup.service"},{"contents":"[Unit]\nRequires=network-online.target\nRequires=setup.service\nAfter=network-online.target\nAfter=setup.service\n[Service]\nType=oneshot\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/download.sh\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"download-script.service"},{"contents":"[Unit]\nRequires=kubelet.service\nAfter=kubelet.service\n\n[Service]\nExecStart=/opt/bin/health-monitor.sh kubelet\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet-healthcheck.service"},{"contents":"[Unit]\nDescription=Setup Kubelet Node IP Env\nRequires=network-online.target\nAfter=network-online.target\n\n[Service]\nExecStart=/opt/bin/setup_net_env.sh\nRemainAfterExit=yes\nType=oneshot\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"nodeip.service"},{"contents":"[Unit]\nAfter=docker.service\nRequires=docker.service\n\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https://kubernetes.io/docs/home/\n\n[Service]\nUser=root\nRestart=always\nStartLimitInterval=0\nRestartSec=10\nCPUAccounting=true\nMemoryAccounting=true\n\nEnvironment=\"PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/\"\nEnvironmentFile=-/etc/environment\n\nExecStartPre=/bin/bash /opt/load-kernel-modules.sh\n\nExecStartPre=/bin/bash /opt/bin/setup_net_env.sh\nExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --config=/etc/kubernetes/kubelet.conf \\\n --cert-dir=/etc/kubernetes/pki \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --container-runtime=docker \\\n --container-runtime-endpoint=unix:///var/run/dockershim.sock \\\n --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \\\n --feature-gates=DynamicKubeletConfig=true \\\n --network-plugin=cni \\\n --node-ip ${KUBELET_NODE_IP}\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Service]\nEnvironmentFile=/etc/kubernetes/nodeip.conf\n","name":"10-nodeip.conf"},{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet.service"}]}} \ No newline at end of file +{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{"units":[{"contents":"[Match]\n# Because of difficulty predicting specific NIC names on different cloud providers,\n# we only support static addressing on VSphere. There should be a single NIC attached\n# that we will match by name prefix 'en' which denotes ethernet devices.\nName=en*\n\n[Network]\nDHCP=no\nAddress=192.168.81.4/24\nGateway=192.168.81.1\nDNS=8.8.8.8\n","name":"static-nic.network"}]},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAABBB","ssh-rsa CCCDDD"]}]},"storage":{"files":[{"filesystem":"root","path":"/etc/systemd/journald.conf.d/max_disk_use.conf","contents":{"source":"data:,%5BJournal%5D%0ASystemMaxUse%3D5G%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/kubernetes/kubelet.conf","contents":{"source":"data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0A%20%20webhook%3A%0A%20%20%20%20cacheTTL%3A%200s%0A%20%20%20%20enabled%3A%20true%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fpki%2Fca.crt%0Aauthorization%3A%0A%20%20mode%3A%20Webhook%0A%20%20webhook%3A%0A%20%20%20%20cacheAuthorizedTTL%3A%200s%0A%20%20%20%20cacheUnauthorizedTTL%3A%200s%0AcgroupDriver%3A%20systemd%0AclusterDNS%3A%0A-%2010.10.10.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%20100Mi%0AcpuManagerReconcilePeriod%3A%200s%0AevictionHard%3A%0A%20%20imagefs.available%3A%2015%25%0A%20%20memory.available%3A%20100Mi%0A%20%20nodefs.available%3A%2010%25%0A%20%20nodefs.inodesFree%3A%205%25%0AevictionPressureTransitionPeriod%3A%200s%0AfeatureGates%3A%0A%20%20RotateKubeletServerCertificate%3A%20true%0AfileCheckFrequency%3A%200s%0AhttpCheckFrequency%3A%200s%0AimageMinimumGCAge%3A%200s%0Akind%3A%20KubeletConfiguration%0AkubeReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0Alogging%3A%0A%20%20flushFrequency%3A%200%0A%20%20options%3A%0A%20%20%20%20json%3A%0A%20%20%20%20%20%20infoBufferSize%3A%20%220%22%0A%20%20verbosity%3A%200%0AmemorySwap%3A%20%7B%7D%0AnodeStatusReportFrequency%3A%200s%0AnodeStatusUpdateFrequency%3A%200s%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AruntimeRequestTimeout%3A%200s%0AserverTLSBootstrap%3A%20true%0AshutdownGracePeriod%3A%200s%0AshutdownGracePeriodCriticalPods%3A%200s%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AstreamingConnectionIdleTimeout%3A%200s%0AsyncFrequency%3A%200s%0AsystemReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0AtlsCipherSuites%3A%0A-%20TLS_AES_128_GCM_SHA256%0A-%20TLS_AES_256_GCM_SHA384%0A-%20TLS_CHACHA20_POLY1305_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305%0A-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305%0AvolumePluginDir%3A%20%2Fvar%2Flib%2Fkubelet%2Fvolumeplugins%0AvolumeStatsAggPeriod%3A%200s%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/load-kernel-modules.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aset%20-euo%20pipefail%0A%0Amodprobe%20ip_vs%0Amodprobe%20ip_vs_rr%0Amodprobe%20ip_vs_wrr%0Amodprobe%20ip_vs_sh%0A%0Aif%20modinfo%20nf_conntrack_ipv4%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20modprobe%20nf_conntrack_ipv4%0Aelse%0A%20%20modprobe%20nf_conntrack%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/sysctl.d/k8s.conf","contents":{"source":"data:,net.bridge.bridge-nf-call-ip6tables%20%3D%201%0Anet.bridge.bridge-nf-call-iptables%20%3D%201%0Akernel.panic_on_oops%20%3D%201%0Akernel.panic%20%3D%2010%0Anet.ipv4.ip_forward%20%3D%201%0Avm.overcommit_memory%20%3D%201%0Afs.inotify.max_user_watches%20%3D%201048576%0Afs.inotify.max_user_instances%20%3D%208192%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic_on_oops","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic","contents":{"source":"data:,10%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/vm/overcommit_memory","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/bin/setup_net_env.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aechodate()%20%7B%0A%20%20echo%20%22%5B%24(date%20-Is)%5D%22%20%22%24%40%22%0A%7D%0A%0A%23%20get%20the%20default%20interface%20IP%20address%0ADEFAULT_IFC_IP%3D%24(ip%20-o%20%20route%20get%201%20%7C%20grep%20-oP%20%22src%20%5CK%5CS%2B%22)%0A%0A%23%20get%20the%20full%20hostname%0AFULL_HOSTNAME%3D%24(hostname%20-f)%0A%0Aif%20%5B%20-z%20%22%24%7BDEFAULT_IFC_IP%7D%22%20%5D%0Athen%0A%09echodate%20%22Failed%20to%20get%20IP%20address%20for%20the%20default%20route%20interface%22%0A%09exit%201%0Afi%0A%0A%23%20write%20the%20nodeip_env%20file%0A%23%20we%20need%20the%20line%20below%20because%20flatcar%20has%20the%20same%20string%20%22coreos%22%20in%20that%20file%0Aif%20grep%20-q%20coreos%20%2Fetc%2Fos-release%0Athen%0A%20%20echo%20-e%20%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5CnKUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%22%20%3E%20%2Fetc%2Fkubernetes%2Fnodeip.conf%0Aelif%20%5B%20!%20-d%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%20%5D%0Athen%0A%09echodate%20%22Can't%20find%20kubelet%20service%20extras%20directory%22%0A%09exit%201%0Aelse%0A%20%20echo%20-e%20%22%5BService%5D%5CnEnvironment%3D%5C%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5C%22%5CnEnvironment%3D%5C%22KUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%5C%22%22%20%3E%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%2Fnodeip.conf%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/kubernetes/bootstrap-kubelet.conf","contents":{"source":"data:,apiVersion%3A%20v1%0Aclusters%3A%0A-%20cluster%3A%0A%20%20%20%20certificate-authority-data%3A%20LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t%0A%20%20%20%20server%3A%20https%3A%2F%2Fserver%3A443%0A%20%20name%3A%20%22%22%0Acontexts%3A%20null%0Acurrent-context%3A%20%22%22%0Akind%3A%20Config%0Apreferences%3A%20%7B%7D%0Ausers%3A%0A-%20name%3A%20%22%22%0A%20%20user%3A%0A%20%20%20%20token%3A%20my-token%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/cloud-config","contents":{"source":"data:,%7Bvsphere-config%3Atrue%7D%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/pki/ca.crt","contents":{"source":"data:,-----BEGIN%20CERTIFICATE-----%0AMIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV%0ABAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG%0AA1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3%0ADQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0%0ANjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG%0AcmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv%0Ac3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B%0AAQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS%0AR8Od0%2B9Q62Hyny%2BGFwMTb4A%2FKU8mssoHvcceSAAbwfbxFK%2F%2Bs51TobqUnORZrOoT%0AZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk%0AJfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS%2FPlPbUj2q7YnoVLposUBMlgUb%2FCykX3%0AmOoLb4yJJQyA%2FiST6ZxiIEj36D4yWZ5lg7YJl%2BUiiBQHGCnPdGyipqV06ex0heYW%0AcaiW8LWZSUQ93jQ%2BWVCH8hT7DQO1dmsvUmXlq%2FJeAlwQ%2FQIDAQABo4HgMIHdMB0G%0AA1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt%0AhS4P4U7vTfjByC569R7E6KF%2FpH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB%0AMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES%0AMBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv%0AbYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h%0AU9f9sNH0%2F6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k%2FXkDjQm%2B3lzjT0iGR4IxE%2FAo%0AeU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb%2FLnDUjs5Yj9brP0NWzXfYU4%0AUK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm%2Bje6voD%0A58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj%2Bqvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n%0AsH9BBH38%2FSzUmAN4QHSPy1gjqm00OAE8NaYDkh%2FbzE4d7mLGGMWp%2FWE3KPSu82HF%0AkPe6XoSbiLm%2Fkxk32T0%3D%0A-----END%20CERTIFICATE-----%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/hostname","contents":{"source":"data:,node1","verification":{}},"mode":384},{"filesystem":"root","group":{"id":0},"path":"/etc/ssh/sshd_config","user":{"id":0},"contents":{"source":"data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0APasswordAuthentication%20no%0AChallengeResponseAuthentication%20no%0A","verification":{}},"mode":384},{"filesystem":"root","path":"/opt/bin/setup.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0A%23%20We%20stop%20these%20services%20here%20explicitly%20since%20masking%20only%20removes%20the%20symlinks%20for%20these%20services%20so%20that%20they%20can't%20be%20started.%0A%23%20But%20that%20wouldn't%20%22stop%22%20the%20already%20running%20services%20on%20the%20first%20boot.%0Asystemctl%20stop%20update-engine.service%0Asystemctl%20stop%20locksmithd.service%0Asystemctl%20disable%20setup.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/opt/bin/download.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0Aopt_bin%3D%2Fopt%2Fbin%0Ausr_local_bin%3D%2Fusr%2Flocal%2Fbin%0Acni_bin_dir%3D%2Fopt%2Fcni%2Fbin%0Amkdir%20-p%20%2Fetc%2Fcni%2Fnet.d%20%2Fetc%2Fkubernetes%2Fdynamic-config-dir%20%2Fetc%2Fkubernetes%2Fmanifests%20%22%24opt_bin%22%20%22%24cni_bin_dir%22%0Aarch%3D%24%7BHOST_ARCH-%7D%0Aif%20%5B%20-z%20%22%24arch%22%20%5D%0Athen%0Acase%20%24(uname%20-m)%20in%0Ax86_64)%0A%20%20%20%20arch%3D%22amd64%22%0A%20%20%20%20%3B%3B%0Aaarch64)%0A%20%20%20%20arch%3D%22arm64%22%0A%20%20%20%20%3B%3B%0A*)%0A%20%20%20%20echo%20%22unsupported%20CPU%20architecture%2C%20exiting%22%0A%20%20%20%20exit%201%0A%20%20%20%20%3B%3B%0Aesac%0Afi%0ACNI_VERSION%3D%22%24%7BCNI_VERSION%3A-v0.8.7%7D%22%0Acni_base_url%3D%22https%3A%2F%2Fgithub.com%2Fcontainernetworking%2Fplugins%2Freleases%2Fdownload%2F%24CNI_VERSION%22%0Acni_filename%3D%22cni-plugins-linux-%24arch-%24CNI_VERSION.tgz%22%0Acurl%20-Lfo%20%22%24cni_bin_dir%2F%24cni_filename%22%20%22%24cni_base_url%2F%24cni_filename%22%0Acni_sum%3D%24(curl%20-Lf%20%22%24cni_base_url%2F%24cni_filename.sha256%22)%0Acd%20%22%24cni_bin_dir%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cni_sum%22%0Atar%20xvf%20%22%24cni_filename%22%0Arm%20-f%20%22%24cni_filename%22%0Acd%20-%0ACRI_TOOLS_RELEASE%3D%22%24%7BCRI_TOOLS_RELEASE%3A-v1.22.0%7D%22%0Acri_tools_base_url%3D%22https%3A%2F%2Fgithub.com%2Fkubernetes-sigs%2Fcri-tools%2Freleases%2Fdownload%2F%24%7BCRI_TOOLS_RELEASE%7D%22%0Acri_tools_filename%3D%22crictl-%24%7BCRI_TOOLS_RELEASE%7D-linux-%24%7Barch%7D.tar.gz%22%0Acurl%20-Lfo%20%22%24opt_bin%2F%24cri_tools_filename%22%20%22%24cri_tools_base_url%2F%24cri_tools_filename%22%0Acri_tools_sum%3D%24(curl%20-Lf%20%22%24cri_tools_base_url%2F%24cri_tools_filename.sha256%22%20%7C%20sed%20's%2F%5C*%5C%2F%2F%2F')%0Acd%20%22%24opt_bin%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cri_tools_sum%22%0Atar%20xvf%20%22%24cri_tools_filename%22%0Arm%20-f%20%22%24cri_tools_filename%22%0Aln%20-sf%20%22%24opt_bin%2Fcrictl%22%20%22%24usr_local_bin%22%2Fcrictl%20%7C%7C%20echo%20%22symbolic%20link%20is%20skipped%22%0Acd%20-%0AKUBE_VERSION%3D%22%24%7BKUBE_VERSION%3A-v1.22.7%7D%22%0Akube_dir%3D%22%24opt_bin%2Fkubernetes-%24KUBE_VERSION%22%0Akube_base_url%3D%22https%3A%2F%2Fstorage.googleapis.com%2Fkubernetes-release%2Frelease%2F%24KUBE_VERSION%2Fbin%2Flinux%2F%24arch%22%0Akube_sum_file%3D%22%24kube_dir%2Fsha256%22%0Amkdir%20-p%20%22%24kube_dir%22%0A%3A%20%3E%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20curl%20-Lfo%20%22%24kube_dir%2F%24bin%22%20%22%24kube_base_url%2F%24bin%22%0A%20%20%20%20chmod%20%2Bx%20%22%24kube_dir%2F%24bin%22%0A%20%20%20%20sum%3D%24(curl%20-Lf%20%22%24kube_base_url%2F%24bin.sha256%22)%0A%20%20%20%20echo%20%22%24sum%20%20%24kube_dir%2F%24bin%22%20%3E%3E%22%24kube_sum_file%22%0Adone%0Asha256sum%20-c%20%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20ln%20-sf%20%22%24kube_dir%2F%24bin%22%20%22%24opt_bin%22%2F%24bin%0Adone%0A%0Aif%20%5B%5B%20!%20-x%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20%5D%5D%3B%20then%0A%20%20%20%20curl%20-Lfo%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20https%3A%2F%2Fraw.githubusercontent.com%2Fkubermatic%2Fmachine-controller%2F7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde%2Fpkg%2Fuserdata%2Fscripts%2Fhealth-monitor.sh%0A%20%20%20%20chmod%20%2Bx%20%2Fopt%2Fbin%2Fhealth-monitor.sh%0Afi%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2Fenvironment.conf%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%2Fenvironment.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironmentFile%3D-%2Fetc%2Fenvironment%0AEOF%0A%0Asystemctl%20daemon-reload%0Asystemctl%20enable%20--now%20docker%0A%0Asystemctl%20disable%20download-script.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/docker/daemon.json","contents":{"source":"data:,%7B%22exec-opts%22%3A%5B%22native.cgroupdriver%3Dsystemd%22%5D%2C%22storage-driver%22%3A%22overlay2%22%2C%22log-driver%22%3A%22json-file%22%2C%22log-opts%22%3A%7B%22max-file%22%3A%225%22%2C%22max-size%22%3A%22100m%22%7D%7D%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/crictl.yaml","contents":{"source":"data:,runtime-endpoint%3A%20unix%3A%2F%2F%2Frun%2Fcontainerd%2Fcontainerd.sock%0A","verification":{}},"mode":420}]},"systemd":{"units":[{"mask":true,"name":"update-engine.service"},{"mask":true,"name":"locksmithd.service"},{"contents":"[Install]\nWantedBy=multi-user.target\n\n[Unit]\nRequires=network-online.target\nRequires=nodeip.service\nAfter=network-online.target\nAfter=nodeip.service\n\nDescription=Service responsible for configuring the flatcar machine\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/setup.sh\n","enabled":true,"name":"setup.service"},{"contents":"[Unit]\nRequires=network-online.target\nRequires=setup.service\nAfter=network-online.target\nAfter=setup.service\n[Service]\nType=oneshot\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/download.sh\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"download-script.service"},{"contents":"[Unit]\nRequires=kubelet.service\nAfter=kubelet.service\n\n[Service]\nExecStart=/opt/bin/health-monitor.sh kubelet\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet-healthcheck.service"},{"contents":"[Unit]\nDescription=Setup Kubelet Node IP Env\nRequires=network-online.target\nAfter=network-online.target\n\n[Service]\nExecStart=/opt/bin/setup_net_env.sh\nRemainAfterExit=yes\nType=oneshot\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"nodeip.service"},{"contents":"[Unit]\nAfter=docker.service\nRequires=docker.service\n\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https://kubernetes.io/docs/home/\n\n[Service]\nUser=root\nRestart=always\nStartLimitInterval=0\nRestartSec=10\nCPUAccounting=true\nMemoryAccounting=true\n\nEnvironment=\"PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/\"\nEnvironmentFile=-/etc/environment\n\nExecStartPre=/bin/bash /opt/load-kernel-modules.sh\n\nExecStartPre=/bin/bash /opt/bin/setup_net_env.sh\nExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --config=/etc/kubernetes/kubelet.conf \\\n --cert-dir=/etc/kubernetes/pki \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --container-runtime=docker \\\n --container-runtime-endpoint=unix:///var/run/dockershim.sock \\\n --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \\\n --feature-gates=DynamicKubeletConfig=true \\\n --network-plugin=cni \\\n --node-ip ${KUBELET_NODE_IP}\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Service]\nEnvironmentFile=/etc/kubernetes/nodeip.conf\n","name":"10-nodeip.conf"},{"contents":"[Service]\nEnvironment=\"KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf\"\n","name":"resolv.conf"},{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet.service"}]}} \ No newline at end of file diff --git a/pkg/userdata/flatcar/testdata/ignition_v1.23.5.json b/pkg/userdata/flatcar/testdata/ignition_v1.23.5.json index 80f86b6a2..806865ad8 100644 --- a/pkg/userdata/flatcar/testdata/ignition_v1.23.5.json +++ b/pkg/userdata/flatcar/testdata/ignition_v1.23.5.json @@ -1 +1 @@ -{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{"units":[{"contents":"[Match]\n# Because of difficulty predicting specific NIC names on different cloud providers,\n# we only support static addressing on VSphere. There should be a single NIC attached\n# that we will match by name prefix 'en' which denotes ethernet devices.\nName=en*\n\n[Network]\nDHCP=no\nAddress=192.168.81.4/24\nGateway=192.168.81.1\nDNS=8.8.8.8\n","name":"static-nic.network"}]},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAABBB","ssh-rsa CCCDDD"]}]},"storage":{"files":[{"filesystem":"root","path":"/etc/systemd/journald.conf.d/max_disk_use.conf","contents":{"source":"data:,%5BJournal%5D%0ASystemMaxUse%3D5G%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/kubernetes/kubelet.conf","contents":{"source":"data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0A%20%20webhook%3A%0A%20%20%20%20cacheTTL%3A%200s%0A%20%20%20%20enabled%3A%20true%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fpki%2Fca.crt%0Aauthorization%3A%0A%20%20mode%3A%20Webhook%0A%20%20webhook%3A%0A%20%20%20%20cacheAuthorizedTTL%3A%200s%0A%20%20%20%20cacheUnauthorizedTTL%3A%200s%0AcgroupDriver%3A%20systemd%0AclusterDNS%3A%0A-%2010.10.10.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%20100Mi%0AcpuManagerReconcilePeriod%3A%200s%0AevictionHard%3A%0A%20%20imagefs.available%3A%2015%25%0A%20%20memory.available%3A%20100Mi%0A%20%20nodefs.available%3A%2010%25%0A%20%20nodefs.inodesFree%3A%205%25%0AevictionPressureTransitionPeriod%3A%200s%0AfeatureGates%3A%0A%20%20RotateKubeletServerCertificate%3A%20true%0AfileCheckFrequency%3A%200s%0AhttpCheckFrequency%3A%200s%0AimageMinimumGCAge%3A%200s%0Akind%3A%20KubeletConfiguration%0AkubeReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0Alogging%3A%0A%20%20flushFrequency%3A%200%0A%20%20options%3A%0A%20%20%20%20json%3A%0A%20%20%20%20%20%20infoBufferSize%3A%20%220%22%0A%20%20verbosity%3A%200%0AmemorySwap%3A%20%7B%7D%0AnodeStatusReportFrequency%3A%200s%0AnodeStatusUpdateFrequency%3A%200s%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AruntimeRequestTimeout%3A%200s%0AserverTLSBootstrap%3A%20true%0AshutdownGracePeriod%3A%200s%0AshutdownGracePeriodCriticalPods%3A%200s%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AstreamingConnectionIdleTimeout%3A%200s%0AsyncFrequency%3A%200s%0AsystemReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0AtlsCipherSuites%3A%0A-%20TLS_AES_128_GCM_SHA256%0A-%20TLS_AES_256_GCM_SHA384%0A-%20TLS_CHACHA20_POLY1305_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305%0A-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305%0AvolumePluginDir%3A%20%2Fvar%2Flib%2Fkubelet%2Fvolumeplugins%0AvolumeStatsAggPeriod%3A%200s%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/load-kernel-modules.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aset%20-euo%20pipefail%0A%0Amodprobe%20ip_vs%0Amodprobe%20ip_vs_rr%0Amodprobe%20ip_vs_wrr%0Amodprobe%20ip_vs_sh%0A%0Aif%20modinfo%20nf_conntrack_ipv4%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20modprobe%20nf_conntrack_ipv4%0Aelse%0A%20%20modprobe%20nf_conntrack%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/sysctl.d/k8s.conf","contents":{"source":"data:,net.bridge.bridge-nf-call-ip6tables%20%3D%201%0Anet.bridge.bridge-nf-call-iptables%20%3D%201%0Akernel.panic_on_oops%20%3D%201%0Akernel.panic%20%3D%2010%0Anet.ipv4.ip_forward%20%3D%201%0Avm.overcommit_memory%20%3D%201%0Afs.inotify.max_user_watches%20%3D%201048576%0Afs.inotify.max_user_instances%20%3D%208192%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic_on_oops","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic","contents":{"source":"data:,10%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/vm/overcommit_memory","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/bin/setup_net_env.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aechodate()%20%7B%0A%20%20echo%20%22%5B%24(date%20-Is)%5D%22%20%22%24%40%22%0A%7D%0A%0A%23%20get%20the%20default%20interface%20IP%20address%0ADEFAULT_IFC_IP%3D%24(ip%20-o%20%20route%20get%201%20%7C%20grep%20-oP%20%22src%20%5CK%5CS%2B%22)%0A%0A%23%20get%20the%20full%20hostname%0AFULL_HOSTNAME%3D%24(hostname%20-f)%0A%0Aif%20%5B%20-z%20%22%24%7BDEFAULT_IFC_IP%7D%22%20%5D%0Athen%0A%09echodate%20%22Failed%20to%20get%20IP%20address%20for%20the%20default%20route%20interface%22%0A%09exit%201%0Afi%0A%0A%23%20write%20the%20nodeip_env%20file%0A%23%20we%20need%20the%20line%20below%20because%20flatcar%20has%20the%20same%20string%20%22coreos%22%20in%20that%20file%0Aif%20grep%20-q%20coreos%20%2Fetc%2Fos-release%0Athen%0A%20%20echo%20-e%20%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5CnKUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%22%20%3E%20%2Fetc%2Fkubernetes%2Fnodeip.conf%0Aelif%20%5B%20!%20-d%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%20%5D%0Athen%0A%09echodate%20%22Can't%20find%20kubelet%20service%20extras%20directory%22%0A%09exit%201%0Aelse%0A%20%20echo%20-e%20%22%5BService%5D%5CnEnvironment%3D%5C%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5C%22%5CnEnvironment%3D%5C%22KUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%5C%22%22%20%3E%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%2Fnodeip.conf%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/kubernetes/bootstrap-kubelet.conf","contents":{"source":"data:,apiVersion%3A%20v1%0Aclusters%3A%0A-%20cluster%3A%0A%20%20%20%20certificate-authority-data%3A%20LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t%0A%20%20%20%20server%3A%20https%3A%2F%2Fserver%3A443%0A%20%20name%3A%20%22%22%0Acontexts%3A%20null%0Acurrent-context%3A%20%22%22%0Akind%3A%20Config%0Apreferences%3A%20%7B%7D%0Ausers%3A%0A-%20name%3A%20%22%22%0A%20%20user%3A%0A%20%20%20%20token%3A%20my-token%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/cloud-config","contents":{"source":"data:,%7Bvsphere-config%3Atrue%7D%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/pki/ca.crt","contents":{"source":"data:,-----BEGIN%20CERTIFICATE-----%0AMIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV%0ABAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG%0AA1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3%0ADQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0%0ANjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG%0AcmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv%0Ac3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B%0AAQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS%0AR8Od0%2B9Q62Hyny%2BGFwMTb4A%2FKU8mssoHvcceSAAbwfbxFK%2F%2Bs51TobqUnORZrOoT%0AZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk%0AJfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS%2FPlPbUj2q7YnoVLposUBMlgUb%2FCykX3%0AmOoLb4yJJQyA%2FiST6ZxiIEj36D4yWZ5lg7YJl%2BUiiBQHGCnPdGyipqV06ex0heYW%0AcaiW8LWZSUQ93jQ%2BWVCH8hT7DQO1dmsvUmXlq%2FJeAlwQ%2FQIDAQABo4HgMIHdMB0G%0AA1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt%0AhS4P4U7vTfjByC569R7E6KF%2FpH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB%0AMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES%0AMBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv%0AbYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h%0AU9f9sNH0%2F6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k%2FXkDjQm%2B3lzjT0iGR4IxE%2FAo%0AeU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb%2FLnDUjs5Yj9brP0NWzXfYU4%0AUK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm%2Bje6voD%0A58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj%2Bqvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n%0AsH9BBH38%2FSzUmAN4QHSPy1gjqm00OAE8NaYDkh%2FbzE4d7mLGGMWp%2FWE3KPSu82HF%0AkPe6XoSbiLm%2Fkxk32T0%3D%0A-----END%20CERTIFICATE-----%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/hostname","contents":{"source":"data:,node1","verification":{}},"mode":384},{"filesystem":"root","group":{"id":0},"path":"/etc/ssh/sshd_config","user":{"id":0},"contents":{"source":"data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0APasswordAuthentication%20no%0AChallengeResponseAuthentication%20no%0A","verification":{}},"mode":384},{"filesystem":"root","path":"/opt/bin/setup.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0A%23%20We%20stop%20these%20services%20here%20explicitly%20since%20masking%20only%20removes%20the%20symlinks%20for%20these%20services%20so%20that%20they%20can't%20be%20started.%0A%23%20But%20that%20wouldn't%20%22stop%22%20the%20already%20running%20services%20on%20the%20first%20boot.%0Asystemctl%20stop%20update-engine.service%0Asystemctl%20stop%20locksmithd.service%0Asystemctl%20disable%20setup.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/opt/bin/download.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0Aopt_bin%3D%2Fopt%2Fbin%0Ausr_local_bin%3D%2Fusr%2Flocal%2Fbin%0Acni_bin_dir%3D%2Fopt%2Fcni%2Fbin%0Amkdir%20-p%20%2Fetc%2Fcni%2Fnet.d%20%2Fetc%2Fkubernetes%2Fdynamic-config-dir%20%2Fetc%2Fkubernetes%2Fmanifests%20%22%24opt_bin%22%20%22%24cni_bin_dir%22%0Aarch%3D%24%7BHOST_ARCH-%7D%0Aif%20%5B%20-z%20%22%24arch%22%20%5D%0Athen%0Acase%20%24(uname%20-m)%20in%0Ax86_64)%0A%20%20%20%20arch%3D%22amd64%22%0A%20%20%20%20%3B%3B%0Aaarch64)%0A%20%20%20%20arch%3D%22arm64%22%0A%20%20%20%20%3B%3B%0A*)%0A%20%20%20%20echo%20%22unsupported%20CPU%20architecture%2C%20exiting%22%0A%20%20%20%20exit%201%0A%20%20%20%20%3B%3B%0Aesac%0Afi%0ACNI_VERSION%3D%22%24%7BCNI_VERSION%3A-v0.8.7%7D%22%0Acni_base_url%3D%22https%3A%2F%2Fgithub.com%2Fcontainernetworking%2Fplugins%2Freleases%2Fdownload%2F%24CNI_VERSION%22%0Acni_filename%3D%22cni-plugins-linux-%24arch-%24CNI_VERSION.tgz%22%0Acurl%20-Lfo%20%22%24cni_bin_dir%2F%24cni_filename%22%20%22%24cni_base_url%2F%24cni_filename%22%0Acni_sum%3D%24(curl%20-Lf%20%22%24cni_base_url%2F%24cni_filename.sha256%22)%0Acd%20%22%24cni_bin_dir%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cni_sum%22%0Atar%20xvf%20%22%24cni_filename%22%0Arm%20-f%20%22%24cni_filename%22%0Acd%20-%0ACRI_TOOLS_RELEASE%3D%22%24%7BCRI_TOOLS_RELEASE%3A-v1.22.0%7D%22%0Acri_tools_base_url%3D%22https%3A%2F%2Fgithub.com%2Fkubernetes-sigs%2Fcri-tools%2Freleases%2Fdownload%2F%24%7BCRI_TOOLS_RELEASE%7D%22%0Acri_tools_filename%3D%22crictl-%24%7BCRI_TOOLS_RELEASE%7D-linux-%24%7Barch%7D.tar.gz%22%0Acurl%20-Lfo%20%22%24opt_bin%2F%24cri_tools_filename%22%20%22%24cri_tools_base_url%2F%24cri_tools_filename%22%0Acri_tools_sum%3D%24(curl%20-Lf%20%22%24cri_tools_base_url%2F%24cri_tools_filename.sha256%22%20%7C%20sed%20's%2F%5C*%5C%2F%2F%2F')%0Acd%20%22%24opt_bin%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cri_tools_sum%22%0Atar%20xvf%20%22%24cri_tools_filename%22%0Arm%20-f%20%22%24cri_tools_filename%22%0Aln%20-sf%20%22%24opt_bin%2Fcrictl%22%20%22%24usr_local_bin%22%2Fcrictl%20%7C%7C%20echo%20%22symbolic%20link%20is%20skipped%22%0Acd%20-%0AKUBE_VERSION%3D%22%24%7BKUBE_VERSION%3A-v1.23.5%7D%22%0Akube_dir%3D%22%24opt_bin%2Fkubernetes-%24KUBE_VERSION%22%0Akube_base_url%3D%22https%3A%2F%2Fstorage.googleapis.com%2Fkubernetes-release%2Frelease%2F%24KUBE_VERSION%2Fbin%2Flinux%2F%24arch%22%0Akube_sum_file%3D%22%24kube_dir%2Fsha256%22%0Amkdir%20-p%20%22%24kube_dir%22%0A%3A%20%3E%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20curl%20-Lfo%20%22%24kube_dir%2F%24bin%22%20%22%24kube_base_url%2F%24bin%22%0A%20%20%20%20chmod%20%2Bx%20%22%24kube_dir%2F%24bin%22%0A%20%20%20%20sum%3D%24(curl%20-Lf%20%22%24kube_base_url%2F%24bin.sha256%22)%0A%20%20%20%20echo%20%22%24sum%20%20%24kube_dir%2F%24bin%22%20%3E%3E%22%24kube_sum_file%22%0Adone%0Asha256sum%20-c%20%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20ln%20-sf%20%22%24kube_dir%2F%24bin%22%20%22%24opt_bin%22%2F%24bin%0Adone%0A%0Aif%20%5B%5B%20!%20-x%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20%5D%5D%3B%20then%0A%20%20%20%20curl%20-Lfo%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20https%3A%2F%2Fraw.githubusercontent.com%2Fkubermatic%2Fmachine-controller%2F7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde%2Fpkg%2Fuserdata%2Fscripts%2Fhealth-monitor.sh%0A%20%20%20%20chmod%20%2Bx%20%2Fopt%2Fbin%2Fhealth-monitor.sh%0Afi%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2Fenvironment.conf%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%2Fenvironment.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironmentFile%3D-%2Fetc%2Fenvironment%0AEOF%0A%0Asystemctl%20daemon-reload%0Asystemctl%20enable%20--now%20docker%0A%0Asystemctl%20disable%20download-script.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/docker/daemon.json","contents":{"source":"data:,%7B%22exec-opts%22%3A%5B%22native.cgroupdriver%3Dsystemd%22%5D%2C%22storage-driver%22%3A%22overlay2%22%2C%22log-driver%22%3A%22json-file%22%2C%22log-opts%22%3A%7B%22max-file%22%3A%225%22%2C%22max-size%22%3A%22100m%22%7D%7D%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/crictl.yaml","contents":{"source":"data:,runtime-endpoint%3A%20unix%3A%2F%2F%2Frun%2Fcontainerd%2Fcontainerd.sock%0A","verification":{}},"mode":420}]},"systemd":{"units":[{"mask":true,"name":"update-engine.service"},{"mask":true,"name":"locksmithd.service"},{"contents":"[Install]\nWantedBy=multi-user.target\n\n[Unit]\nRequires=network-online.target\nRequires=nodeip.service\nAfter=network-online.target\nAfter=nodeip.service\n\nDescription=Service responsible for configuring the flatcar machine\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/setup.sh\n","enabled":true,"name":"setup.service"},{"contents":"[Unit]\nRequires=network-online.target\nRequires=setup.service\nAfter=network-online.target\nAfter=setup.service\n[Service]\nType=oneshot\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/download.sh\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"download-script.service"},{"contents":"[Unit]\nRequires=kubelet.service\nAfter=kubelet.service\n\n[Service]\nExecStart=/opt/bin/health-monitor.sh kubelet\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet-healthcheck.service"},{"contents":"[Unit]\nDescription=Setup Kubelet Node IP Env\nRequires=network-online.target\nAfter=network-online.target\n\n[Service]\nExecStart=/opt/bin/setup_net_env.sh\nRemainAfterExit=yes\nType=oneshot\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"nodeip.service"},{"contents":"[Unit]\nAfter=docker.service\nRequires=docker.service\n\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https://kubernetes.io/docs/home/\n\n[Service]\nUser=root\nRestart=always\nStartLimitInterval=0\nRestartSec=10\nCPUAccounting=true\nMemoryAccounting=true\n\nEnvironment=\"PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/\"\nEnvironmentFile=-/etc/environment\n\nExecStartPre=/bin/bash /opt/load-kernel-modules.sh\n\nExecStartPre=/bin/bash /opt/bin/setup_net_env.sh\nExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --config=/etc/kubernetes/kubelet.conf \\\n --cert-dir=/etc/kubernetes/pki \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --container-runtime=docker \\\n --container-runtime-endpoint=unix:///var/run/dockershim.sock \\\n --network-plugin=cni \\\n --node-ip ${KUBELET_NODE_IP}\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Service]\nEnvironmentFile=/etc/kubernetes/nodeip.conf\n","name":"10-nodeip.conf"},{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet.service"}]}} \ No newline at end of file +{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{"units":[{"contents":"[Match]\n# Because of difficulty predicting specific NIC names on different cloud providers,\n# we only support static addressing on VSphere. There should be a single NIC attached\n# that we will match by name prefix 'en' which denotes ethernet devices.\nName=en*\n\n[Network]\nDHCP=no\nAddress=192.168.81.4/24\nGateway=192.168.81.1\nDNS=8.8.8.8\n","name":"static-nic.network"}]},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAABBB","ssh-rsa CCCDDD"]}]},"storage":{"files":[{"filesystem":"root","path":"/etc/systemd/journald.conf.d/max_disk_use.conf","contents":{"source":"data:,%5BJournal%5D%0ASystemMaxUse%3D5G%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/kubernetes/kubelet.conf","contents":{"source":"data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0A%20%20webhook%3A%0A%20%20%20%20cacheTTL%3A%200s%0A%20%20%20%20enabled%3A%20true%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fpki%2Fca.crt%0Aauthorization%3A%0A%20%20mode%3A%20Webhook%0A%20%20webhook%3A%0A%20%20%20%20cacheAuthorizedTTL%3A%200s%0A%20%20%20%20cacheUnauthorizedTTL%3A%200s%0AcgroupDriver%3A%20systemd%0AclusterDNS%3A%0A-%2010.10.10.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%20100Mi%0AcpuManagerReconcilePeriod%3A%200s%0AevictionHard%3A%0A%20%20imagefs.available%3A%2015%25%0A%20%20memory.available%3A%20100Mi%0A%20%20nodefs.available%3A%2010%25%0A%20%20nodefs.inodesFree%3A%205%25%0AevictionPressureTransitionPeriod%3A%200s%0AfeatureGates%3A%0A%20%20RotateKubeletServerCertificate%3A%20true%0AfileCheckFrequency%3A%200s%0AhttpCheckFrequency%3A%200s%0AimageMinimumGCAge%3A%200s%0Akind%3A%20KubeletConfiguration%0AkubeReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0Alogging%3A%0A%20%20flushFrequency%3A%200%0A%20%20options%3A%0A%20%20%20%20json%3A%0A%20%20%20%20%20%20infoBufferSize%3A%20%220%22%0A%20%20verbosity%3A%200%0AmemorySwap%3A%20%7B%7D%0AnodeStatusReportFrequency%3A%200s%0AnodeStatusUpdateFrequency%3A%200s%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AruntimeRequestTimeout%3A%200s%0AserverTLSBootstrap%3A%20true%0AshutdownGracePeriod%3A%200s%0AshutdownGracePeriodCriticalPods%3A%200s%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AstreamingConnectionIdleTimeout%3A%200s%0AsyncFrequency%3A%200s%0AsystemReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0AtlsCipherSuites%3A%0A-%20TLS_AES_128_GCM_SHA256%0A-%20TLS_AES_256_GCM_SHA384%0A-%20TLS_CHACHA20_POLY1305_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305%0A-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305%0AvolumePluginDir%3A%20%2Fvar%2Flib%2Fkubelet%2Fvolumeplugins%0AvolumeStatsAggPeriod%3A%200s%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/load-kernel-modules.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aset%20-euo%20pipefail%0A%0Amodprobe%20ip_vs%0Amodprobe%20ip_vs_rr%0Amodprobe%20ip_vs_wrr%0Amodprobe%20ip_vs_sh%0A%0Aif%20modinfo%20nf_conntrack_ipv4%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20modprobe%20nf_conntrack_ipv4%0Aelse%0A%20%20modprobe%20nf_conntrack%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/sysctl.d/k8s.conf","contents":{"source":"data:,net.bridge.bridge-nf-call-ip6tables%20%3D%201%0Anet.bridge.bridge-nf-call-iptables%20%3D%201%0Akernel.panic_on_oops%20%3D%201%0Akernel.panic%20%3D%2010%0Anet.ipv4.ip_forward%20%3D%201%0Avm.overcommit_memory%20%3D%201%0Afs.inotify.max_user_watches%20%3D%201048576%0Afs.inotify.max_user_instances%20%3D%208192%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic_on_oops","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic","contents":{"source":"data:,10%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/vm/overcommit_memory","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/bin/setup_net_env.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aechodate()%20%7B%0A%20%20echo%20%22%5B%24(date%20-Is)%5D%22%20%22%24%40%22%0A%7D%0A%0A%23%20get%20the%20default%20interface%20IP%20address%0ADEFAULT_IFC_IP%3D%24(ip%20-o%20%20route%20get%201%20%7C%20grep%20-oP%20%22src%20%5CK%5CS%2B%22)%0A%0A%23%20get%20the%20full%20hostname%0AFULL_HOSTNAME%3D%24(hostname%20-f)%0A%0Aif%20%5B%20-z%20%22%24%7BDEFAULT_IFC_IP%7D%22%20%5D%0Athen%0A%09echodate%20%22Failed%20to%20get%20IP%20address%20for%20the%20default%20route%20interface%22%0A%09exit%201%0Afi%0A%0A%23%20write%20the%20nodeip_env%20file%0A%23%20we%20need%20the%20line%20below%20because%20flatcar%20has%20the%20same%20string%20%22coreos%22%20in%20that%20file%0Aif%20grep%20-q%20coreos%20%2Fetc%2Fos-release%0Athen%0A%20%20echo%20-e%20%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5CnKUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%22%20%3E%20%2Fetc%2Fkubernetes%2Fnodeip.conf%0Aelif%20%5B%20!%20-d%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%20%5D%0Athen%0A%09echodate%20%22Can't%20find%20kubelet%20service%20extras%20directory%22%0A%09exit%201%0Aelse%0A%20%20echo%20-e%20%22%5BService%5D%5CnEnvironment%3D%5C%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5C%22%5CnEnvironment%3D%5C%22KUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%5C%22%22%20%3E%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%2Fnodeip.conf%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/kubernetes/bootstrap-kubelet.conf","contents":{"source":"data:,apiVersion%3A%20v1%0Aclusters%3A%0A-%20cluster%3A%0A%20%20%20%20certificate-authority-data%3A%20LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t%0A%20%20%20%20server%3A%20https%3A%2F%2Fserver%3A443%0A%20%20name%3A%20%22%22%0Acontexts%3A%20null%0Acurrent-context%3A%20%22%22%0Akind%3A%20Config%0Apreferences%3A%20%7B%7D%0Ausers%3A%0A-%20name%3A%20%22%22%0A%20%20user%3A%0A%20%20%20%20token%3A%20my-token%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/cloud-config","contents":{"source":"data:,%7Bvsphere-config%3Atrue%7D%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/pki/ca.crt","contents":{"source":"data:,-----BEGIN%20CERTIFICATE-----%0AMIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV%0ABAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG%0AA1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3%0ADQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0%0ANjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG%0AcmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv%0Ac3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B%0AAQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS%0AR8Od0%2B9Q62Hyny%2BGFwMTb4A%2FKU8mssoHvcceSAAbwfbxFK%2F%2Bs51TobqUnORZrOoT%0AZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk%0AJfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS%2FPlPbUj2q7YnoVLposUBMlgUb%2FCykX3%0AmOoLb4yJJQyA%2FiST6ZxiIEj36D4yWZ5lg7YJl%2BUiiBQHGCnPdGyipqV06ex0heYW%0AcaiW8LWZSUQ93jQ%2BWVCH8hT7DQO1dmsvUmXlq%2FJeAlwQ%2FQIDAQABo4HgMIHdMB0G%0AA1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt%0AhS4P4U7vTfjByC569R7E6KF%2FpH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB%0AMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES%0AMBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv%0AbYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h%0AU9f9sNH0%2F6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k%2FXkDjQm%2B3lzjT0iGR4IxE%2FAo%0AeU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb%2FLnDUjs5Yj9brP0NWzXfYU4%0AUK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm%2Bje6voD%0A58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj%2Bqvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n%0AsH9BBH38%2FSzUmAN4QHSPy1gjqm00OAE8NaYDkh%2FbzE4d7mLGGMWp%2FWE3KPSu82HF%0AkPe6XoSbiLm%2Fkxk32T0%3D%0A-----END%20CERTIFICATE-----%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/hostname","contents":{"source":"data:,node1","verification":{}},"mode":384},{"filesystem":"root","group":{"id":0},"path":"/etc/ssh/sshd_config","user":{"id":0},"contents":{"source":"data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0APasswordAuthentication%20no%0AChallengeResponseAuthentication%20no%0A","verification":{}},"mode":384},{"filesystem":"root","path":"/opt/bin/setup.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0A%23%20We%20stop%20these%20services%20here%20explicitly%20since%20masking%20only%20removes%20the%20symlinks%20for%20these%20services%20so%20that%20they%20can't%20be%20started.%0A%23%20But%20that%20wouldn't%20%22stop%22%20the%20already%20running%20services%20on%20the%20first%20boot.%0Asystemctl%20stop%20update-engine.service%0Asystemctl%20stop%20locksmithd.service%0Asystemctl%20disable%20setup.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/opt/bin/download.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0Aopt_bin%3D%2Fopt%2Fbin%0Ausr_local_bin%3D%2Fusr%2Flocal%2Fbin%0Acni_bin_dir%3D%2Fopt%2Fcni%2Fbin%0Amkdir%20-p%20%2Fetc%2Fcni%2Fnet.d%20%2Fetc%2Fkubernetes%2Fdynamic-config-dir%20%2Fetc%2Fkubernetes%2Fmanifests%20%22%24opt_bin%22%20%22%24cni_bin_dir%22%0Aarch%3D%24%7BHOST_ARCH-%7D%0Aif%20%5B%20-z%20%22%24arch%22%20%5D%0Athen%0Acase%20%24(uname%20-m)%20in%0Ax86_64)%0A%20%20%20%20arch%3D%22amd64%22%0A%20%20%20%20%3B%3B%0Aaarch64)%0A%20%20%20%20arch%3D%22arm64%22%0A%20%20%20%20%3B%3B%0A*)%0A%20%20%20%20echo%20%22unsupported%20CPU%20architecture%2C%20exiting%22%0A%20%20%20%20exit%201%0A%20%20%20%20%3B%3B%0Aesac%0Afi%0ACNI_VERSION%3D%22%24%7BCNI_VERSION%3A-v0.8.7%7D%22%0Acni_base_url%3D%22https%3A%2F%2Fgithub.com%2Fcontainernetworking%2Fplugins%2Freleases%2Fdownload%2F%24CNI_VERSION%22%0Acni_filename%3D%22cni-plugins-linux-%24arch-%24CNI_VERSION.tgz%22%0Acurl%20-Lfo%20%22%24cni_bin_dir%2F%24cni_filename%22%20%22%24cni_base_url%2F%24cni_filename%22%0Acni_sum%3D%24(curl%20-Lf%20%22%24cni_base_url%2F%24cni_filename.sha256%22)%0Acd%20%22%24cni_bin_dir%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cni_sum%22%0Atar%20xvf%20%22%24cni_filename%22%0Arm%20-f%20%22%24cni_filename%22%0Acd%20-%0ACRI_TOOLS_RELEASE%3D%22%24%7BCRI_TOOLS_RELEASE%3A-v1.22.0%7D%22%0Acri_tools_base_url%3D%22https%3A%2F%2Fgithub.com%2Fkubernetes-sigs%2Fcri-tools%2Freleases%2Fdownload%2F%24%7BCRI_TOOLS_RELEASE%7D%22%0Acri_tools_filename%3D%22crictl-%24%7BCRI_TOOLS_RELEASE%7D-linux-%24%7Barch%7D.tar.gz%22%0Acurl%20-Lfo%20%22%24opt_bin%2F%24cri_tools_filename%22%20%22%24cri_tools_base_url%2F%24cri_tools_filename%22%0Acri_tools_sum%3D%24(curl%20-Lf%20%22%24cri_tools_base_url%2F%24cri_tools_filename.sha256%22%20%7C%20sed%20's%2F%5C*%5C%2F%2F%2F')%0Acd%20%22%24opt_bin%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cri_tools_sum%22%0Atar%20xvf%20%22%24cri_tools_filename%22%0Arm%20-f%20%22%24cri_tools_filename%22%0Aln%20-sf%20%22%24opt_bin%2Fcrictl%22%20%22%24usr_local_bin%22%2Fcrictl%20%7C%7C%20echo%20%22symbolic%20link%20is%20skipped%22%0Acd%20-%0AKUBE_VERSION%3D%22%24%7BKUBE_VERSION%3A-v1.23.5%7D%22%0Akube_dir%3D%22%24opt_bin%2Fkubernetes-%24KUBE_VERSION%22%0Akube_base_url%3D%22https%3A%2F%2Fstorage.googleapis.com%2Fkubernetes-release%2Frelease%2F%24KUBE_VERSION%2Fbin%2Flinux%2F%24arch%22%0Akube_sum_file%3D%22%24kube_dir%2Fsha256%22%0Amkdir%20-p%20%22%24kube_dir%22%0A%3A%20%3E%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20curl%20-Lfo%20%22%24kube_dir%2F%24bin%22%20%22%24kube_base_url%2F%24bin%22%0A%20%20%20%20chmod%20%2Bx%20%22%24kube_dir%2F%24bin%22%0A%20%20%20%20sum%3D%24(curl%20-Lf%20%22%24kube_base_url%2F%24bin.sha256%22)%0A%20%20%20%20echo%20%22%24sum%20%20%24kube_dir%2F%24bin%22%20%3E%3E%22%24kube_sum_file%22%0Adone%0Asha256sum%20-c%20%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20ln%20-sf%20%22%24kube_dir%2F%24bin%22%20%22%24opt_bin%22%2F%24bin%0Adone%0A%0Aif%20%5B%5B%20!%20-x%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20%5D%5D%3B%20then%0A%20%20%20%20curl%20-Lfo%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20https%3A%2F%2Fraw.githubusercontent.com%2Fkubermatic%2Fmachine-controller%2F7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde%2Fpkg%2Fuserdata%2Fscripts%2Fhealth-monitor.sh%0A%20%20%20%20chmod%20%2Bx%20%2Fopt%2Fbin%2Fhealth-monitor.sh%0Afi%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2Fenvironment.conf%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%2Fenvironment.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironmentFile%3D-%2Fetc%2Fenvironment%0AEOF%0A%0Asystemctl%20daemon-reload%0Asystemctl%20enable%20--now%20docker%0A%0Asystemctl%20disable%20download-script.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/docker/daemon.json","contents":{"source":"data:,%7B%22exec-opts%22%3A%5B%22native.cgroupdriver%3Dsystemd%22%5D%2C%22storage-driver%22%3A%22overlay2%22%2C%22log-driver%22%3A%22json-file%22%2C%22log-opts%22%3A%7B%22max-file%22%3A%225%22%2C%22max-size%22%3A%22100m%22%7D%7D%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/crictl.yaml","contents":{"source":"data:,runtime-endpoint%3A%20unix%3A%2F%2F%2Frun%2Fcontainerd%2Fcontainerd.sock%0A","verification":{}},"mode":420}]},"systemd":{"units":[{"mask":true,"name":"update-engine.service"},{"mask":true,"name":"locksmithd.service"},{"contents":"[Install]\nWantedBy=multi-user.target\n\n[Unit]\nRequires=network-online.target\nRequires=nodeip.service\nAfter=network-online.target\nAfter=nodeip.service\n\nDescription=Service responsible for configuring the flatcar machine\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/setup.sh\n","enabled":true,"name":"setup.service"},{"contents":"[Unit]\nRequires=network-online.target\nRequires=setup.service\nAfter=network-online.target\nAfter=setup.service\n[Service]\nType=oneshot\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/download.sh\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"download-script.service"},{"contents":"[Unit]\nRequires=kubelet.service\nAfter=kubelet.service\n\n[Service]\nExecStart=/opt/bin/health-monitor.sh kubelet\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet-healthcheck.service"},{"contents":"[Unit]\nDescription=Setup Kubelet Node IP Env\nRequires=network-online.target\nAfter=network-online.target\n\n[Service]\nExecStart=/opt/bin/setup_net_env.sh\nRemainAfterExit=yes\nType=oneshot\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"nodeip.service"},{"contents":"[Unit]\nAfter=docker.service\nRequires=docker.service\n\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https://kubernetes.io/docs/home/\n\n[Service]\nUser=root\nRestart=always\nStartLimitInterval=0\nRestartSec=10\nCPUAccounting=true\nMemoryAccounting=true\n\nEnvironment=\"PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/\"\nEnvironmentFile=-/etc/environment\n\nExecStartPre=/bin/bash /opt/load-kernel-modules.sh\n\nExecStartPre=/bin/bash /opt/bin/setup_net_env.sh\nExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --config=/etc/kubernetes/kubelet.conf \\\n --cert-dir=/etc/kubernetes/pki \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --container-runtime=docker \\\n --container-runtime-endpoint=unix:///var/run/dockershim.sock \\\n --network-plugin=cni \\\n --node-ip ${KUBELET_NODE_IP}\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Service]\nEnvironmentFile=/etc/kubernetes/nodeip.conf\n","name":"10-nodeip.conf"},{"contents":"[Service]\nEnvironment=\"KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf\"\n","name":"resolv.conf"},{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet.service"}]}} \ No newline at end of file diff --git a/pkg/userdata/flatcar/testdata/ignition_v1.24.0.json b/pkg/userdata/flatcar/testdata/ignition_v1.24.0.json index c678da203..62a14b9df 100644 --- a/pkg/userdata/flatcar/testdata/ignition_v1.24.0.json +++ b/pkg/userdata/flatcar/testdata/ignition_v1.24.0.json @@ -1 +1 @@ -{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{"units":[{"contents":"[Match]\n# Because of difficulty predicting specific NIC names on different cloud providers,\n# we only support static addressing on VSphere. There should be a single NIC attached\n# that we will match by name prefix 'en' which denotes ethernet devices.\nName=en*\n\n[Network]\nDHCP=no\nAddress=192.168.81.4/24\nGateway=192.168.81.1\nDNS=8.8.8.8\n","name":"static-nic.network"}]},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAABBB","ssh-rsa CCCDDD"]}]},"storage":{"files":[{"filesystem":"root","path":"/etc/systemd/journald.conf.d/max_disk_use.conf","contents":{"source":"data:,%5BJournal%5D%0ASystemMaxUse%3D5G%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/kubernetes/kubelet.conf","contents":{"source":"data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0A%20%20webhook%3A%0A%20%20%20%20cacheTTL%3A%200s%0A%20%20%20%20enabled%3A%20true%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fpki%2Fca.crt%0Aauthorization%3A%0A%20%20mode%3A%20Webhook%0A%20%20webhook%3A%0A%20%20%20%20cacheAuthorizedTTL%3A%200s%0A%20%20%20%20cacheUnauthorizedTTL%3A%200s%0AcgroupDriver%3A%20systemd%0AclusterDNS%3A%0A-%2010.10.10.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%20100Mi%0AcpuManagerReconcilePeriod%3A%200s%0AevictionHard%3A%0A%20%20imagefs.available%3A%2015%25%0A%20%20memory.available%3A%20100Mi%0A%20%20nodefs.available%3A%2010%25%0A%20%20nodefs.inodesFree%3A%205%25%0AevictionPressureTransitionPeriod%3A%200s%0AfeatureGates%3A%0A%20%20RotateKubeletServerCertificate%3A%20true%0AfileCheckFrequency%3A%200s%0AhttpCheckFrequency%3A%200s%0AimageMinimumGCAge%3A%200s%0Akind%3A%20KubeletConfiguration%0AkubeReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0Alogging%3A%0A%20%20flushFrequency%3A%200%0A%20%20options%3A%0A%20%20%20%20json%3A%0A%20%20%20%20%20%20infoBufferSize%3A%20%220%22%0A%20%20verbosity%3A%200%0AmemorySwap%3A%20%7B%7D%0AnodeStatusReportFrequency%3A%200s%0AnodeStatusUpdateFrequency%3A%200s%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AruntimeRequestTimeout%3A%200s%0AserverTLSBootstrap%3A%20true%0AshutdownGracePeriod%3A%200s%0AshutdownGracePeriodCriticalPods%3A%200s%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AstreamingConnectionIdleTimeout%3A%200s%0AsyncFrequency%3A%200s%0AsystemReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0AtlsCipherSuites%3A%0A-%20TLS_AES_128_GCM_SHA256%0A-%20TLS_AES_256_GCM_SHA384%0A-%20TLS_CHACHA20_POLY1305_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305%0A-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305%0AvolumePluginDir%3A%20%2Fvar%2Flib%2Fkubelet%2Fvolumeplugins%0AvolumeStatsAggPeriod%3A%200s%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/load-kernel-modules.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aset%20-euo%20pipefail%0A%0Amodprobe%20ip_vs%0Amodprobe%20ip_vs_rr%0Amodprobe%20ip_vs_wrr%0Amodprobe%20ip_vs_sh%0A%0Aif%20modinfo%20nf_conntrack_ipv4%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20modprobe%20nf_conntrack_ipv4%0Aelse%0A%20%20modprobe%20nf_conntrack%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/sysctl.d/k8s.conf","contents":{"source":"data:,net.bridge.bridge-nf-call-ip6tables%20%3D%201%0Anet.bridge.bridge-nf-call-iptables%20%3D%201%0Akernel.panic_on_oops%20%3D%201%0Akernel.panic%20%3D%2010%0Anet.ipv4.ip_forward%20%3D%201%0Avm.overcommit_memory%20%3D%201%0Afs.inotify.max_user_watches%20%3D%201048576%0Afs.inotify.max_user_instances%20%3D%208192%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic_on_oops","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic","contents":{"source":"data:,10%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/vm/overcommit_memory","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/bin/setup_net_env.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aechodate()%20%7B%0A%20%20echo%20%22%5B%24(date%20-Is)%5D%22%20%22%24%40%22%0A%7D%0A%0A%23%20get%20the%20default%20interface%20IP%20address%0ADEFAULT_IFC_IP%3D%24(ip%20-o%20%20route%20get%201%20%7C%20grep%20-oP%20%22src%20%5CK%5CS%2B%22)%0A%0A%23%20get%20the%20full%20hostname%0AFULL_HOSTNAME%3D%24(hostname%20-f)%0A%0Aif%20%5B%20-z%20%22%24%7BDEFAULT_IFC_IP%7D%22%20%5D%0Athen%0A%09echodate%20%22Failed%20to%20get%20IP%20address%20for%20the%20default%20route%20interface%22%0A%09exit%201%0Afi%0A%0A%23%20write%20the%20nodeip_env%20file%0A%23%20we%20need%20the%20line%20below%20because%20flatcar%20has%20the%20same%20string%20%22coreos%22%20in%20that%20file%0Aif%20grep%20-q%20coreos%20%2Fetc%2Fos-release%0Athen%0A%20%20echo%20-e%20%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5CnKUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%22%20%3E%20%2Fetc%2Fkubernetes%2Fnodeip.conf%0Aelif%20%5B%20!%20-d%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%20%5D%0Athen%0A%09echodate%20%22Can't%20find%20kubelet%20service%20extras%20directory%22%0A%09exit%201%0Aelse%0A%20%20echo%20-e%20%22%5BService%5D%5CnEnvironment%3D%5C%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5C%22%5CnEnvironment%3D%5C%22KUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%5C%22%22%20%3E%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%2Fnodeip.conf%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/kubernetes/bootstrap-kubelet.conf","contents":{"source":"data:,apiVersion%3A%20v1%0Aclusters%3A%0A-%20cluster%3A%0A%20%20%20%20certificate-authority-data%3A%20LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t%0A%20%20%20%20server%3A%20https%3A%2F%2Fserver%3A443%0A%20%20name%3A%20%22%22%0Acontexts%3A%20null%0Acurrent-context%3A%20%22%22%0Akind%3A%20Config%0Apreferences%3A%20%7B%7D%0Ausers%3A%0A-%20name%3A%20%22%22%0A%20%20user%3A%0A%20%20%20%20token%3A%20my-token%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/cloud-config","contents":{"source":"data:,%7Bvsphere-config%3Atrue%7D%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/pki/ca.crt","contents":{"source":"data:,-----BEGIN%20CERTIFICATE-----%0AMIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV%0ABAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG%0AA1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3%0ADQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0%0ANjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG%0AcmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv%0Ac3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B%0AAQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS%0AR8Od0%2B9Q62Hyny%2BGFwMTb4A%2FKU8mssoHvcceSAAbwfbxFK%2F%2Bs51TobqUnORZrOoT%0AZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk%0AJfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS%2FPlPbUj2q7YnoVLposUBMlgUb%2FCykX3%0AmOoLb4yJJQyA%2FiST6ZxiIEj36D4yWZ5lg7YJl%2BUiiBQHGCnPdGyipqV06ex0heYW%0AcaiW8LWZSUQ93jQ%2BWVCH8hT7DQO1dmsvUmXlq%2FJeAlwQ%2FQIDAQABo4HgMIHdMB0G%0AA1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt%0AhS4P4U7vTfjByC569R7E6KF%2FpH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB%0AMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES%0AMBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv%0AbYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h%0AU9f9sNH0%2F6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k%2FXkDjQm%2B3lzjT0iGR4IxE%2FAo%0AeU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb%2FLnDUjs5Yj9brP0NWzXfYU4%0AUK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm%2Bje6voD%0A58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj%2Bqvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n%0AsH9BBH38%2FSzUmAN4QHSPy1gjqm00OAE8NaYDkh%2FbzE4d7mLGGMWp%2FWE3KPSu82HF%0AkPe6XoSbiLm%2Fkxk32T0%3D%0A-----END%20CERTIFICATE-----%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/hostname","contents":{"source":"data:,node1","verification":{}},"mode":384},{"filesystem":"root","group":{"id":0},"path":"/etc/ssh/sshd_config","user":{"id":0},"contents":{"source":"data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0APasswordAuthentication%20no%0AChallengeResponseAuthentication%20no%0A","verification":{}},"mode":384},{"filesystem":"root","path":"/opt/bin/setup.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0A%23%20We%20stop%20these%20services%20here%20explicitly%20since%20masking%20only%20removes%20the%20symlinks%20for%20these%20services%20so%20that%20they%20can't%20be%20started.%0A%23%20But%20that%20wouldn't%20%22stop%22%20the%20already%20running%20services%20on%20the%20first%20boot.%0Asystemctl%20stop%20update-engine.service%0Asystemctl%20stop%20locksmithd.service%0Asystemctl%20disable%20setup.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/opt/bin/download.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0Aopt_bin%3D%2Fopt%2Fbin%0Ausr_local_bin%3D%2Fusr%2Flocal%2Fbin%0Acni_bin_dir%3D%2Fopt%2Fcni%2Fbin%0Amkdir%20-p%20%2Fetc%2Fcni%2Fnet.d%20%2Fetc%2Fkubernetes%2Fdynamic-config-dir%20%2Fetc%2Fkubernetes%2Fmanifests%20%22%24opt_bin%22%20%22%24cni_bin_dir%22%0Aarch%3D%24%7BHOST_ARCH-%7D%0Aif%20%5B%20-z%20%22%24arch%22%20%5D%0Athen%0Acase%20%24(uname%20-m)%20in%0Ax86_64)%0A%20%20%20%20arch%3D%22amd64%22%0A%20%20%20%20%3B%3B%0Aaarch64)%0A%20%20%20%20arch%3D%22arm64%22%0A%20%20%20%20%3B%3B%0A*)%0A%20%20%20%20echo%20%22unsupported%20CPU%20architecture%2C%20exiting%22%0A%20%20%20%20exit%201%0A%20%20%20%20%3B%3B%0Aesac%0Afi%0ACNI_VERSION%3D%22%24%7BCNI_VERSION%3A-v0.8.7%7D%22%0Acni_base_url%3D%22https%3A%2F%2Fgithub.com%2Fcontainernetworking%2Fplugins%2Freleases%2Fdownload%2F%24CNI_VERSION%22%0Acni_filename%3D%22cni-plugins-linux-%24arch-%24CNI_VERSION.tgz%22%0Acurl%20-Lfo%20%22%24cni_bin_dir%2F%24cni_filename%22%20%22%24cni_base_url%2F%24cni_filename%22%0Acni_sum%3D%24(curl%20-Lf%20%22%24cni_base_url%2F%24cni_filename.sha256%22)%0Acd%20%22%24cni_bin_dir%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cni_sum%22%0Atar%20xvf%20%22%24cni_filename%22%0Arm%20-f%20%22%24cni_filename%22%0Acd%20-%0ACRI_TOOLS_RELEASE%3D%22%24%7BCRI_TOOLS_RELEASE%3A-v1.22.0%7D%22%0Acri_tools_base_url%3D%22https%3A%2F%2Fgithub.com%2Fkubernetes-sigs%2Fcri-tools%2Freleases%2Fdownload%2F%24%7BCRI_TOOLS_RELEASE%7D%22%0Acri_tools_filename%3D%22crictl-%24%7BCRI_TOOLS_RELEASE%7D-linux-%24%7Barch%7D.tar.gz%22%0Acurl%20-Lfo%20%22%24opt_bin%2F%24cri_tools_filename%22%20%22%24cri_tools_base_url%2F%24cri_tools_filename%22%0Acri_tools_sum%3D%24(curl%20-Lf%20%22%24cri_tools_base_url%2F%24cri_tools_filename.sha256%22%20%7C%20sed%20's%2F%5C*%5C%2F%2F%2F')%0Acd%20%22%24opt_bin%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cri_tools_sum%22%0Atar%20xvf%20%22%24cri_tools_filename%22%0Arm%20-f%20%22%24cri_tools_filename%22%0Aln%20-sf%20%22%24opt_bin%2Fcrictl%22%20%22%24usr_local_bin%22%2Fcrictl%20%7C%7C%20echo%20%22symbolic%20link%20is%20skipped%22%0Acd%20-%0AKUBE_VERSION%3D%22%24%7BKUBE_VERSION%3A-v1.24.0%7D%22%0Akube_dir%3D%22%24opt_bin%2Fkubernetes-%24KUBE_VERSION%22%0Akube_base_url%3D%22https%3A%2F%2Fstorage.googleapis.com%2Fkubernetes-release%2Frelease%2F%24KUBE_VERSION%2Fbin%2Flinux%2F%24arch%22%0Akube_sum_file%3D%22%24kube_dir%2Fsha256%22%0Amkdir%20-p%20%22%24kube_dir%22%0A%3A%20%3E%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20curl%20-Lfo%20%22%24kube_dir%2F%24bin%22%20%22%24kube_base_url%2F%24bin%22%0A%20%20%20%20chmod%20%2Bx%20%22%24kube_dir%2F%24bin%22%0A%20%20%20%20sum%3D%24(curl%20-Lf%20%22%24kube_base_url%2F%24bin.sha256%22)%0A%20%20%20%20echo%20%22%24sum%20%20%24kube_dir%2F%24bin%22%20%3E%3E%22%24kube_sum_file%22%0Adone%0Asha256sum%20-c%20%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20ln%20-sf%20%22%24kube_dir%2F%24bin%22%20%22%24opt_bin%22%2F%24bin%0Adone%0A%0Aif%20%5B%5B%20!%20-x%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20%5D%5D%3B%20then%0A%20%20%20%20curl%20-Lfo%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20https%3A%2F%2Fraw.githubusercontent.com%2Fkubermatic%2Fmachine-controller%2F7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde%2Fpkg%2Fuserdata%2Fscripts%2Fhealth-monitor.sh%0A%20%20%20%20chmod%20%2Bx%20%2Fopt%2Fbin%2Fhealth-monitor.sh%0Afi%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2Fenvironment.conf%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%2Fenvironment.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironmentFile%3D-%2Fetc%2Fenvironment%0AEOF%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%0A%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2F10-machine-controller.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironment%3DCONTAINERD_CONFIG%3D%2Fetc%2Fcontainerd%2Fconfig.toml%0AExecStart%3D%0AExecStart%3D%2Fusr%2Fbin%2Fenv%20PATH%3D%5C%24%7BTORCX_BINDIR%7D%3A%5C%24%7BPATH%7D%20%5C%24%7BTORCX_BINDIR%7D%2Fcontainerd%20--config%20%5C%24%7BCONTAINERD_CONFIG%7D%0AEOF%0A%0Asystemctl%20daemon-reload%0Asystemctl%20enable%20--now%20containerd%0A%0Asystemctl%20disable%20download-script.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/containerd/config.toml","contents":{"source":"data:,version%20%3D%202%0A%0A%5Bmetrics%5D%0Aaddress%20%3D%20%22127.0.0.1%3A1338%22%0A%0A%5Bplugins%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd.runtimes%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd.runtimes.runc%5D%0Aruntime_type%20%3D%20%22io.containerd.runc.v2%22%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd.runtimes.runc.options%5D%0ASystemdCgroup%20%3D%20true%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.registry%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.registry.mirrors%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.registry.mirrors.%22docker.io%22%5D%0Aendpoint%20%3D%20%5B%22https%3A%2F%2Fregistry-1.docker.io%22%5D%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/crictl.yaml","contents":{"source":"data:,runtime-endpoint%3A%20unix%3A%2F%2F%2Frun%2Fcontainerd%2Fcontainerd.sock%0A","verification":{}},"mode":420}]},"systemd":{"units":[{"mask":true,"name":"update-engine.service"},{"mask":true,"name":"locksmithd.service"},{"contents":"[Install]\nWantedBy=multi-user.target\n\n[Unit]\nRequires=network-online.target\nRequires=nodeip.service\nAfter=network-online.target\nAfter=nodeip.service\n\nDescription=Service responsible for configuring the flatcar machine\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/setup.sh\n","enabled":true,"name":"setup.service"},{"contents":"[Unit]\nRequires=network-online.target\nRequires=setup.service\nAfter=network-online.target\nAfter=setup.service\n[Service]\nType=oneshot\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/download.sh\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"download-script.service"},{"contents":"[Unit]\nRequires=kubelet.service\nAfter=kubelet.service\n\n[Service]\nExecStart=/opt/bin/health-monitor.sh kubelet\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet-healthcheck.service"},{"contents":"[Unit]\nDescription=Setup Kubelet Node IP Env\nRequires=network-online.target\nAfter=network-online.target\n\n[Service]\nExecStart=/opt/bin/setup_net_env.sh\nRemainAfterExit=yes\nType=oneshot\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"nodeip.service"},{"contents":"[Unit]\nAfter=containerd.service\nRequires=containerd.service\n\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https://kubernetes.io/docs/home/\n\n[Service]\nUser=root\nRestart=always\nStartLimitInterval=0\nRestartSec=10\nCPUAccounting=true\nMemoryAccounting=true\n\nEnvironment=\"PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/\"\nEnvironmentFile=-/etc/environment\n\nExecStartPre=/bin/bash /opt/load-kernel-modules.sh\n\nExecStartPre=/bin/bash /opt/bin/setup_net_env.sh\nExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --config=/etc/kubernetes/kubelet.conf \\\n --cert-dir=/etc/kubernetes/pki \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --container-runtime=remote \\\n --container-runtime-endpoint=unix:///run/containerd/containerd.sock \\\n --node-ip ${KUBELET_NODE_IP}\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Service]\nEnvironmentFile=/etc/kubernetes/nodeip.conf\n","name":"10-nodeip.conf"},{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet.service"}]}} \ No newline at end of file +{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{"units":[{"contents":"[Match]\n# Because of difficulty predicting specific NIC names on different cloud providers,\n# we only support static addressing on VSphere. There should be a single NIC attached\n# that we will match by name prefix 'en' which denotes ethernet devices.\nName=en*\n\n[Network]\nDHCP=no\nAddress=192.168.81.4/24\nGateway=192.168.81.1\nDNS=8.8.8.8\n","name":"static-nic.network"}]},"passwd":{"users":[{"name":"core","sshAuthorizedKeys":["ssh-rsa AAABBB","ssh-rsa CCCDDD"]}]},"storage":{"files":[{"filesystem":"root","path":"/etc/systemd/journald.conf.d/max_disk_use.conf","contents":{"source":"data:,%5BJournal%5D%0ASystemMaxUse%3D5G%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/kubernetes/kubelet.conf","contents":{"source":"data:,apiVersion%3A%20kubelet.config.k8s.io%2Fv1beta1%0Aauthentication%3A%0A%20%20anonymous%3A%0A%20%20%20%20enabled%3A%20false%0A%20%20webhook%3A%0A%20%20%20%20cacheTTL%3A%200s%0A%20%20%20%20enabled%3A%20true%0A%20%20x509%3A%0A%20%20%20%20clientCAFile%3A%20%2Fetc%2Fkubernetes%2Fpki%2Fca.crt%0Aauthorization%3A%0A%20%20mode%3A%20Webhook%0A%20%20webhook%3A%0A%20%20%20%20cacheAuthorizedTTL%3A%200s%0A%20%20%20%20cacheUnauthorizedTTL%3A%200s%0AcgroupDriver%3A%20systemd%0AclusterDNS%3A%0A-%2010.10.10.10%0AclusterDomain%3A%20cluster.local%0AcontainerLogMaxSize%3A%20100Mi%0AcpuManagerReconcilePeriod%3A%200s%0AevictionHard%3A%0A%20%20imagefs.available%3A%2015%25%0A%20%20memory.available%3A%20100Mi%0A%20%20nodefs.available%3A%2010%25%0A%20%20nodefs.inodesFree%3A%205%25%0AevictionPressureTransitionPeriod%3A%200s%0AfeatureGates%3A%0A%20%20RotateKubeletServerCertificate%3A%20true%0AfileCheckFrequency%3A%200s%0AhttpCheckFrequency%3A%200s%0AimageMinimumGCAge%3A%200s%0Akind%3A%20KubeletConfiguration%0AkubeReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0Alogging%3A%0A%20%20flushFrequency%3A%200%0A%20%20options%3A%0A%20%20%20%20json%3A%0A%20%20%20%20%20%20infoBufferSize%3A%20%220%22%0A%20%20verbosity%3A%200%0AmemorySwap%3A%20%7B%7D%0AnodeStatusReportFrequency%3A%200s%0AnodeStatusUpdateFrequency%3A%200s%0AprotectKernelDefaults%3A%20true%0ArotateCertificates%3A%20true%0AruntimeRequestTimeout%3A%200s%0AserverTLSBootstrap%3A%20true%0AshutdownGracePeriod%3A%200s%0AshutdownGracePeriodCriticalPods%3A%200s%0AstaticPodPath%3A%20%2Fetc%2Fkubernetes%2Fmanifests%0AstreamingConnectionIdleTimeout%3A%200s%0AsyncFrequency%3A%200s%0AsystemReserved%3A%0A%20%20cpu%3A%20200m%0A%20%20ephemeral-storage%3A%201Gi%0A%20%20memory%3A%20200Mi%0AtlsCipherSuites%3A%0A-%20TLS_AES_128_GCM_SHA256%0A-%20TLS_AES_256_GCM_SHA384%0A-%20TLS_CHACHA20_POLY1305_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305%0A-%20TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256%0A-%20TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384%0A-%20TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305%0AvolumePluginDir%3A%20%2Fvar%2Flib%2Fkubelet%2Fvolumeplugins%0AvolumeStatsAggPeriod%3A%200s%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/load-kernel-modules.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aset%20-euo%20pipefail%0A%0Amodprobe%20ip_vs%0Amodprobe%20ip_vs_rr%0Amodprobe%20ip_vs_wrr%0Amodprobe%20ip_vs_sh%0A%0Aif%20modinfo%20nf_conntrack_ipv4%20%26%3E%20%2Fdev%2Fnull%3B%20then%0A%20%20modprobe%20nf_conntrack_ipv4%0Aelse%0A%20%20modprobe%20nf_conntrack%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/sysctl.d/k8s.conf","contents":{"source":"data:,net.bridge.bridge-nf-call-ip6tables%20%3D%201%0Anet.bridge.bridge-nf-call-iptables%20%3D%201%0Akernel.panic_on_oops%20%3D%201%0Akernel.panic%20%3D%2010%0Anet.ipv4.ip_forward%20%3D%201%0Avm.overcommit_memory%20%3D%201%0Afs.inotify.max_user_watches%20%3D%201048576%0Afs.inotify.max_user_instances%20%3D%208192%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic_on_oops","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/kernel/panic","contents":{"source":"data:,10%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/proc/sys/vm/overcommit_memory","contents":{"source":"data:,1%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/opt/bin/setup_net_env.sh","contents":{"source":"data:,%23!%2Fusr%2Fbin%2Fenv%20bash%0Aechodate()%20%7B%0A%20%20echo%20%22%5B%24(date%20-Is)%5D%22%20%22%24%40%22%0A%7D%0A%0A%23%20get%20the%20default%20interface%20IP%20address%0ADEFAULT_IFC_IP%3D%24(ip%20-o%20%20route%20get%201%20%7C%20grep%20-oP%20%22src%20%5CK%5CS%2B%22)%0A%0A%23%20get%20the%20full%20hostname%0AFULL_HOSTNAME%3D%24(hostname%20-f)%0A%0Aif%20%5B%20-z%20%22%24%7BDEFAULT_IFC_IP%7D%22%20%5D%0Athen%0A%09echodate%20%22Failed%20to%20get%20IP%20address%20for%20the%20default%20route%20interface%22%0A%09exit%201%0Afi%0A%0A%23%20write%20the%20nodeip_env%20file%0A%23%20we%20need%20the%20line%20below%20because%20flatcar%20has%20the%20same%20string%20%22coreos%22%20in%20that%20file%0Aif%20grep%20-q%20coreos%20%2Fetc%2Fos-release%0Athen%0A%20%20echo%20-e%20%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5CnKUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%22%20%3E%20%2Fetc%2Fkubernetes%2Fnodeip.conf%0Aelif%20%5B%20!%20-d%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%20%5D%0Athen%0A%09echodate%20%22Can't%20find%20kubelet%20service%20extras%20directory%22%0A%09exit%201%0Aelse%0A%20%20echo%20-e%20%22%5BService%5D%5CnEnvironment%3D%5C%22KUBELET_NODE_IP%3D%24%7BDEFAULT_IFC_IP%7D%5C%22%5CnEnvironment%3D%5C%22KUBELET_HOSTNAME%3D%24%7BFULL_HOSTNAME%7D%5C%22%22%20%3E%20%2Fetc%2Fsystemd%2Fsystem%2Fkubelet.service.d%2Fnodeip.conf%0Afi%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/kubernetes/bootstrap-kubelet.conf","contents":{"source":"data:,apiVersion%3A%20v1%0Aclusters%3A%0A-%20cluster%3A%0A%20%20%20%20certificate-authority-data%3A%20LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t%0A%20%20%20%20server%3A%20https%3A%2F%2Fserver%3A443%0A%20%20name%3A%20%22%22%0Acontexts%3A%20null%0Acurrent-context%3A%20%22%22%0Akind%3A%20Config%0Apreferences%3A%20%7B%7D%0Ausers%3A%0A-%20name%3A%20%22%22%0A%20%20user%3A%0A%20%20%20%20token%3A%20my-token%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/cloud-config","contents":{"source":"data:,%7Bvsphere-config%3Atrue%7D%0A","verification":{}},"mode":256},{"filesystem":"root","path":"/etc/kubernetes/pki/ca.crt","contents":{"source":"data:,-----BEGIN%20CERTIFICATE-----%0AMIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV%0ABAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG%0AA1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3%0ADQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0%0ANjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG%0AcmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv%0Ac3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B%0AAQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS%0AR8Od0%2B9Q62Hyny%2BGFwMTb4A%2FKU8mssoHvcceSAAbwfbxFK%2F%2Bs51TobqUnORZrOoT%0AZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk%0AJfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS%2FPlPbUj2q7YnoVLposUBMlgUb%2FCykX3%0AmOoLb4yJJQyA%2FiST6ZxiIEj36D4yWZ5lg7YJl%2BUiiBQHGCnPdGyipqV06ex0heYW%0AcaiW8LWZSUQ93jQ%2BWVCH8hT7DQO1dmsvUmXlq%2FJeAlwQ%2FQIDAQABo4HgMIHdMB0G%0AA1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt%0AhS4P4U7vTfjByC569R7E6KF%2FpH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB%0AMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES%0AMBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv%0AbYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h%0AU9f9sNH0%2F6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k%2FXkDjQm%2B3lzjT0iGR4IxE%2FAo%0AeU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb%2FLnDUjs5Yj9brP0NWzXfYU4%0AUK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm%2Bje6voD%0A58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj%2Bqvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n%0AsH9BBH38%2FSzUmAN4QHSPy1gjqm00OAE8NaYDkh%2FbzE4d7mLGGMWp%2FWE3KPSu82HF%0AkPe6XoSbiLm%2Fkxk32T0%3D%0A-----END%20CERTIFICATE-----%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/hostname","contents":{"source":"data:,node1","verification":{}},"mode":384},{"filesystem":"root","group":{"id":0},"path":"/etc/ssh/sshd_config","user":{"id":0},"contents":{"source":"data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0APasswordAuthentication%20no%0AChallengeResponseAuthentication%20no%0A","verification":{}},"mode":384},{"filesystem":"root","path":"/opt/bin/setup.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0A%23%20We%20stop%20these%20services%20here%20explicitly%20since%20masking%20only%20removes%20the%20symlinks%20for%20these%20services%20so%20that%20they%20can't%20be%20started.%0A%23%20But%20that%20wouldn't%20%22stop%22%20the%20already%20running%20services%20on%20the%20first%20boot.%0Asystemctl%20stop%20update-engine.service%0Asystemctl%20stop%20locksmithd.service%0Asystemctl%20disable%20setup.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/opt/bin/download.sh","contents":{"source":"data:,%23!%2Fbin%2Fbash%0Aset%20-xeuo%20pipefail%0A%0Aopt_bin%3D%2Fopt%2Fbin%0Ausr_local_bin%3D%2Fusr%2Flocal%2Fbin%0Acni_bin_dir%3D%2Fopt%2Fcni%2Fbin%0Amkdir%20-p%20%2Fetc%2Fcni%2Fnet.d%20%2Fetc%2Fkubernetes%2Fdynamic-config-dir%20%2Fetc%2Fkubernetes%2Fmanifests%20%22%24opt_bin%22%20%22%24cni_bin_dir%22%0Aarch%3D%24%7BHOST_ARCH-%7D%0Aif%20%5B%20-z%20%22%24arch%22%20%5D%0Athen%0Acase%20%24(uname%20-m)%20in%0Ax86_64)%0A%20%20%20%20arch%3D%22amd64%22%0A%20%20%20%20%3B%3B%0Aaarch64)%0A%20%20%20%20arch%3D%22arm64%22%0A%20%20%20%20%3B%3B%0A*)%0A%20%20%20%20echo%20%22unsupported%20CPU%20architecture%2C%20exiting%22%0A%20%20%20%20exit%201%0A%20%20%20%20%3B%3B%0Aesac%0Afi%0ACNI_VERSION%3D%22%24%7BCNI_VERSION%3A-v0.8.7%7D%22%0Acni_base_url%3D%22https%3A%2F%2Fgithub.com%2Fcontainernetworking%2Fplugins%2Freleases%2Fdownload%2F%24CNI_VERSION%22%0Acni_filename%3D%22cni-plugins-linux-%24arch-%24CNI_VERSION.tgz%22%0Acurl%20-Lfo%20%22%24cni_bin_dir%2F%24cni_filename%22%20%22%24cni_base_url%2F%24cni_filename%22%0Acni_sum%3D%24(curl%20-Lf%20%22%24cni_base_url%2F%24cni_filename.sha256%22)%0Acd%20%22%24cni_bin_dir%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cni_sum%22%0Atar%20xvf%20%22%24cni_filename%22%0Arm%20-f%20%22%24cni_filename%22%0Acd%20-%0ACRI_TOOLS_RELEASE%3D%22%24%7BCRI_TOOLS_RELEASE%3A-v1.22.0%7D%22%0Acri_tools_base_url%3D%22https%3A%2F%2Fgithub.com%2Fkubernetes-sigs%2Fcri-tools%2Freleases%2Fdownload%2F%24%7BCRI_TOOLS_RELEASE%7D%22%0Acri_tools_filename%3D%22crictl-%24%7BCRI_TOOLS_RELEASE%7D-linux-%24%7Barch%7D.tar.gz%22%0Acurl%20-Lfo%20%22%24opt_bin%2F%24cri_tools_filename%22%20%22%24cri_tools_base_url%2F%24cri_tools_filename%22%0Acri_tools_sum%3D%24(curl%20-Lf%20%22%24cri_tools_base_url%2F%24cri_tools_filename.sha256%22%20%7C%20sed%20's%2F%5C*%5C%2F%2F%2F')%0Acd%20%22%24opt_bin%22%0Asha256sum%20-c%20%3C%3C%3C%22%24cri_tools_sum%22%0Atar%20xvf%20%22%24cri_tools_filename%22%0Arm%20-f%20%22%24cri_tools_filename%22%0Aln%20-sf%20%22%24opt_bin%2Fcrictl%22%20%22%24usr_local_bin%22%2Fcrictl%20%7C%7C%20echo%20%22symbolic%20link%20is%20skipped%22%0Acd%20-%0AKUBE_VERSION%3D%22%24%7BKUBE_VERSION%3A-v1.24.0%7D%22%0Akube_dir%3D%22%24opt_bin%2Fkubernetes-%24KUBE_VERSION%22%0Akube_base_url%3D%22https%3A%2F%2Fstorage.googleapis.com%2Fkubernetes-release%2Frelease%2F%24KUBE_VERSION%2Fbin%2Flinux%2F%24arch%22%0Akube_sum_file%3D%22%24kube_dir%2Fsha256%22%0Amkdir%20-p%20%22%24kube_dir%22%0A%3A%20%3E%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20curl%20-Lfo%20%22%24kube_dir%2F%24bin%22%20%22%24kube_base_url%2F%24bin%22%0A%20%20%20%20chmod%20%2Bx%20%22%24kube_dir%2F%24bin%22%0A%20%20%20%20sum%3D%24(curl%20-Lf%20%22%24kube_base_url%2F%24bin.sha256%22)%0A%20%20%20%20echo%20%22%24sum%20%20%24kube_dir%2F%24bin%22%20%3E%3E%22%24kube_sum_file%22%0Adone%0Asha256sum%20-c%20%22%24kube_sum_file%22%0A%0Afor%20bin%20in%20kubelet%20kubeadm%20kubectl%3B%20do%0A%20%20%20%20ln%20-sf%20%22%24kube_dir%2F%24bin%22%20%22%24opt_bin%22%2F%24bin%0Adone%0A%0Aif%20%5B%5B%20!%20-x%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20%5D%5D%3B%20then%0A%20%20%20%20curl%20-Lfo%20%2Fopt%2Fbin%2Fhealth-monitor.sh%20https%3A%2F%2Fraw.githubusercontent.com%2Fkubermatic%2Fmachine-controller%2F7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde%2Fpkg%2Fuserdata%2Fscripts%2Fhealth-monitor.sh%0A%20%20%20%20chmod%20%2Bx%20%2Fopt%2Fbin%2Fhealth-monitor.sh%0Afi%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2Fenvironment.conf%20%2Fetc%2Fsystemd%2Fsystem%2Fdocker.service.d%2Fenvironment.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironmentFile%3D-%2Fetc%2Fenvironment%0AEOF%0A%0Amkdir%20-p%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%0A%0Acat%20%3C%3CEOF%20%7C%20tee%20%2Fetc%2Fsystemd%2Fsystem%2Fcontainerd.service.d%2F10-machine-controller.conf%0A%5BService%5D%0ARestart%3Dalways%0AEnvironment%3DCONTAINERD_CONFIG%3D%2Fetc%2Fcontainerd%2Fconfig.toml%0AExecStart%3D%0AExecStart%3D%2Fusr%2Fbin%2Fenv%20PATH%3D%5C%24%7BTORCX_BINDIR%7D%3A%5C%24%7BPATH%7D%20%5C%24%7BTORCX_BINDIR%7D%2Fcontainerd%20--config%20%5C%24%7BCONTAINERD_CONFIG%7D%0AEOF%0A%0Asystemctl%20daemon-reload%0Asystemctl%20enable%20--now%20containerd%0A%0Asystemctl%20disable%20download-script.service%0A","verification":{}},"mode":493},{"filesystem":"root","path":"/etc/containerd/config.toml","contents":{"source":"data:,version%20%3D%202%0A%0A%5Bmetrics%5D%0Aaddress%20%3D%20%22127.0.0.1%3A1338%22%0A%0A%5Bplugins%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd.runtimes%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd.runtimes.runc%5D%0Aruntime_type%20%3D%20%22io.containerd.runc.v2%22%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.containerd.runtimes.runc.options%5D%0ASystemdCgroup%20%3D%20true%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.registry%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.registry.mirrors%5D%0A%5Bplugins.%22io.containerd.grpc.v1.cri%22.registry.mirrors.%22docker.io%22%5D%0Aendpoint%20%3D%20%5B%22https%3A%2F%2Fregistry-1.docker.io%22%5D%0A","verification":{}},"mode":420},{"filesystem":"root","path":"/etc/crictl.yaml","contents":{"source":"data:,runtime-endpoint%3A%20unix%3A%2F%2F%2Frun%2Fcontainerd%2Fcontainerd.sock%0A","verification":{}},"mode":420}]},"systemd":{"units":[{"mask":true,"name":"update-engine.service"},{"mask":true,"name":"locksmithd.service"},{"contents":"[Install]\nWantedBy=multi-user.target\n\n[Unit]\nRequires=network-online.target\nRequires=nodeip.service\nAfter=network-online.target\nAfter=nodeip.service\n\nDescription=Service responsible for configuring the flatcar machine\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/setup.sh\n","enabled":true,"name":"setup.service"},{"contents":"[Unit]\nRequires=network-online.target\nRequires=setup.service\nAfter=network-online.target\nAfter=setup.service\n[Service]\nType=oneshot\nEnvironmentFile=-/etc/environment\nExecStart=/opt/bin/download.sh\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"download-script.service"},{"contents":"[Unit]\nRequires=kubelet.service\nAfter=kubelet.service\n\n[Service]\nExecStart=/opt/bin/health-monitor.sh kubelet\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet-healthcheck.service"},{"contents":"[Unit]\nDescription=Setup Kubelet Node IP Env\nRequires=network-online.target\nAfter=network-online.target\n\n[Service]\nExecStart=/opt/bin/setup_net_env.sh\nRemainAfterExit=yes\nType=oneshot\n[Install]\nWantedBy=multi-user.target\n","enabled":true,"name":"nodeip.service"},{"contents":"[Unit]\nAfter=containerd.service\nRequires=containerd.service\n\nDescription=kubelet: The Kubernetes Node Agent\nDocumentation=https://kubernetes.io/docs/home/\n\n[Service]\nUser=root\nRestart=always\nStartLimitInterval=0\nRestartSec=10\nCPUAccounting=true\nMemoryAccounting=true\n\nEnvironment=\"PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/\"\nEnvironmentFile=-/etc/environment\n\nExecStartPre=/bin/bash /opt/load-kernel-modules.sh\n\nExecStartPre=/bin/bash /opt/bin/setup_net_env.sh\nExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/var/lib/kubelet/kubeconfig \\\n --config=/etc/kubernetes/kubelet.conf \\\n --cert-dir=/etc/kubernetes/pki \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --container-runtime=remote \\\n --container-runtime-endpoint=unix:///run/containerd/containerd.sock \\\n --node-ip ${KUBELET_NODE_IP}\n\n[Install]\nWantedBy=multi-user.target\n","dropins":[{"contents":"[Service]\nEnvironmentFile=/etc/kubernetes/nodeip.conf\n","name":"10-nodeip.conf"},{"contents":"[Service]\nEnvironment=\"KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf\"\n","name":"resolv.conf"},{"contents":"[Unit]\nRequires=download-script.service\nAfter=download-script.service\n","name":"40-download.conf"}],"enabled":true,"name":"kubelet.service"}]}} \ No newline at end of file diff --git a/pkg/userdata/helper/common_test.go b/pkg/userdata/helper/common_test.go index 1c1d61897..082a49bc6 100644 --- a/pkg/userdata/helper/common_test.go +++ b/pkg/userdata/helper/common_test.go @@ -26,7 +26,6 @@ var update = flag.Bool("update", false, "update testdata files") var ( versions = []*semver.Version{ - semver.MustParse("v1.21.10"), semver.MustParse("v1.22.7"), semver.MustParse("v1.23.5"), semver.MustParse("v1.24.0"), diff --git a/pkg/userdata/helper/helper.go b/pkg/userdata/helper/helper.go index 9ee7ab926..63155f26c 100644 --- a/pkg/userdata/helper/helper.go +++ b/pkg/userdata/helper/helper.go @@ -21,6 +21,8 @@ import ( "fmt" "strings" + "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" + "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -30,17 +32,6 @@ const ( DefaultDockerContainerLogMaxSize = "100m" ) -func GetServerAddressFromKubeconfig(kubeconfig *clientcmdapi.Config) (string, error) { - if len(kubeconfig.Clusters) != 1 { - return "", fmt.Errorf("kubeconfig does not contain exactly one cluster, can not extract server address") - } - // Clusters is a map so we have to use range here - for _, clusterConfig := range kubeconfig.Clusters { - return strings.Replace(clusterConfig.Server, "https://", "", -1), nil - } - return "", fmt.Errorf("no server address found") -} - func GetCACert(kubeconfig *clientcmdapi.Config) (string, error) { if len(kubeconfig.Clusters) != 1 { return "", fmt.Errorf("kubeconfig does not contain exactly one cluster, can not extract server address") @@ -157,14 +148,29 @@ NO_PROXY=%s no_proxy=%s`, proxy, proxy, proxy, proxy, noProxy, noProxy) } -func SetupNodeIPEnvScript() string { +func SetupNodeIPEnvScript(ipFamily util.IPFamily) string { + const defaultIfcIPv4 = `DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+")` + + var defaultIfcIP string + switch ipFamily { + case util.IPv4: + defaultIfcIP = defaultIfcIPv4 + case util.IPv6: + defaultIfcIP = `DEFAULT_IFC_IP=$(ip -o -6 route get 1:: | grep -oP "src \K\S+")` + case util.DualStack: + defaultIfcIP = `DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") +DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") +DEFAULT_IFC_IP=$DEFAULT_IFC_IP,$DEFAULT_IFC_IP6` + default: + defaultIfcIP = defaultIfcIPv4 + } return `#!/usr/bin/env bash echodate() { echo "[$(date -Is)]" "$@" } # get the default interface IP address -DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") +` + defaultIfcIP + ` # get the full hostname FULL_HOSTNAME=$(hostname -f) diff --git a/pkg/userdata/helper/kubelet.go b/pkg/userdata/helper/kubelet.go index 07f6691da..2a3ad59d5 100644 --- a/pkg/userdata/helper/kubelet.go +++ b/pkg/userdata/helper/kubelet.go @@ -26,6 +26,7 @@ import ( "github.com/Masterminds/semver/v3" "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" + "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,15 +40,18 @@ const ( defaultKubeletContainerLogMaxSize = "100Mi" ) -const ( - kubeletFlagsTpl = `--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ +func kubeletFlagsTpl(withNodeIP bool) string { + flagsTemplate := `--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ --kubeconfig=/var/lib/kubelet/kubeconfig \ --config=/etc/kubernetes/kubelet.conf \ ---cert-dir=/etc/kubernetes/pki \ +--cert-dir=/etc/kubernetes/pki \` + + flagsTemplate += ` {{- if or (.CloudProvider) (.IsExternal) }} {{ cloudProviderFlags .CloudProvider .IsExternal }} \ -{{- end }} -{{- if and (.Hostname) (ne .CloudProvider "aws") }} +{{- end }}` + + flagsTemplate += `{{- if and (.Hostname) (ne .CloudProvider "aws") }} --hostname-override={{ .Hostname }} \ {{- else if and (eq .CloudProvider "aws") (.IsExternal) }} --hostname-override=${KUBELET_HOSTNAME} \ @@ -62,9 +66,17 @@ const ( {{- end }} {{- range .ExtraKubeletFlags }} {{ . }} \ -{{- end }} +{{- end }}` + + if withNodeIP { + flagsTemplate += ` --node-ip ${KUBELET_NODE_IP}` + } + return flagsTemplate +} + +const ( kubeletSystemdUnitTpl = `[Unit] After={{ .ContainerRuntime }}.service Requires={{ .ContainerRuntime }}.service @@ -89,7 +101,7 @@ ExecStartPre=/bin/bash /opt/disable-swap.sh {{ end }} ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ -{{ kubeletFlags .KubeletVersion .CloudProvider .Hostname .ClusterDNSIPs .IsExternal .PauseImage .InitialTaints .ExtraKubeletFlags | indent 2 }} +{{ kubeletFlags .KubeletVersion .CloudProvider .Hostname .ClusterDNSIPs .IsExternal .IPFamily .PauseImage .InitialTaints .ExtraKubeletFlags | indent 2 }} [Install] WantedBy=multi-user.target` @@ -123,20 +135,32 @@ var kubeletTLSCipherSuites = []string{ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", } +func withNodeIPFlag(ipFamily util.IPFamily, cloudProvider string, external bool) bool { + // If external or in-tree CCM is in use we don't need to set --node-ip + // as the cloud provider will know what IPs to return. + if ipFamily == util.DualStack { + if external || cloudProvider != "" { + return false + } + } + return true +} + // CloudProviderFlags returns --cloud-provider and --cloud-config flags. -func CloudProviderFlags(cpName string, external bool) (string, error) { +func CloudProviderFlags(cpName string, external bool) string { if cpName == "" && !external { - return "", nil + return "" } if external { - return "--cloud-provider=external", nil + return `--cloud-provider=external` } - return fmt.Sprintf(cpFlags, cpName), nil + + return fmt.Sprintf(cpFlags, cpName) } // KubeletSystemdUnit returns the systemd unit for the kubelet. -func KubeletSystemdUnit(containerRuntime, kubeletVersion, cloudProvider, hostname string, dnsIPs []net.IP, external bool, pauseImage string, initialTaints []corev1.Taint, extraKubeletFlags []string, disableSwap bool) (string, error) { +func KubeletSystemdUnit(containerRuntime, kubeletVersion, cloudProvider, hostname string, dnsIPs []net.IP, external bool, ipFamily util.IPFamily, pauseImage string, initialTaints []corev1.Taint, extraKubeletFlags []string, disableSwap bool) (string, error) { tmpl, err := template.New("kubelet-systemd-unit").Funcs(TxtFuncMap()).Parse(kubeletSystemdUnitTpl) if err != nil { return "", fmt.Errorf("failed to parse kubelet-systemd-unit template: %w", err) @@ -149,6 +173,7 @@ func KubeletSystemdUnit(containerRuntime, kubeletVersion, cloudProvider, hostnam Hostname string ClusterDNSIPs []net.IP IsExternal bool + IPFamily util.IPFamily PauseImage string InitialTaints []corev1.Taint ExtraKubeletFlags []string @@ -160,6 +185,7 @@ func KubeletSystemdUnit(containerRuntime, kubeletVersion, cloudProvider, hostnam Hostname: hostname, ClusterDNSIPs: dnsIPs, IsExternal: external, + IPFamily: ipFamily, PauseImage: pauseImage, InitialTaints: initialTaints, ExtraKubeletFlags: extraKubeletFlags, @@ -279,8 +305,16 @@ func kubeletConfiguration(clusterDomain string, clusterDNS []net.IP, featureGate } // KubeletFlags returns the kubelet flags. -func KubeletFlags(version, cloudProvider, hostname string, dnsIPs []net.IP, external bool, pauseImage string, initialTaints []corev1.Taint, extraKubeletFlags []string) (string, error) { - tmpl, err := template.New("kubelet-flags").Funcs(TxtFuncMap()).Parse(kubeletFlagsTpl) +// --node-ip and --cloud-provider kubelet flags conflict in the dualstack setup. +// In general, it is not expected to need to use --node-ip with external CCMs, +// as the cloud provider is expected to know the correct IPs to return. +// For details read kubernetes/sig-networking channel discussion +// https://kubernetes.slack.com/archives/C09QYUH5W/p1654003958331739 +func KubeletFlags(version, cloudProvider, hostname string, dnsIPs []net.IP, external bool, ipFamily util.IPFamily, pauseImage string, initialTaints []corev1.Taint, extraKubeletFlags []string) (string, error) { + withNodeIPFlag := withNodeIPFlag(ipFamily, cloudProvider, external) + + tmpl, err := template.New("kubelet-flags").Funcs(TxtFuncMap()). + Parse(kubeletFlagsTpl(withNodeIPFlag)) if err != nil { return "", fmt.Errorf("failed to parse kubelet-flags template: %w", err) } @@ -328,6 +362,7 @@ func KubeletFlags(version, cloudProvider, hostname string, dnsIPs []net.IP, exte ClusterDNSIPs []net.IP KubeletVersion string IsExternal bool + IPFamily util.IPFamily PauseImage string InitialTaints string ExtraKubeletFlags []string @@ -337,6 +372,7 @@ func KubeletFlags(version, cloudProvider, hostname string, dnsIPs []net.IP, exte ClusterDNSIPs: dnsIPs, KubeletVersion: version, IsExternal: external, + IPFamily: ipFamily, PauseImage: pauseImage, InitialTaints: strings.Join(initialTaintsArgs, ","), ExtraKubeletFlags: kubeletFlags, diff --git a/pkg/userdata/helper/kubelet_test.go b/pkg/userdata/helper/kubelet_test.go index 5b7326a1c..18b02effc 100644 --- a/pkg/userdata/helper/kubelet_test.go +++ b/pkg/userdata/helper/kubelet_test.go @@ -23,6 +23,7 @@ import ( "github.com/Masterminds/semver/v3" + "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" testhelper "github.com/kubermatic/machine-controller/pkg/test" corev1 "k8s.io/api/core/v1" @@ -36,6 +37,7 @@ type kubeletFlagTestCase struct { hostname string cloudProvider string external bool + ipFamily util.IPFamily pauseImage string initialTaints []corev1.Taint extraFlags []string @@ -117,6 +119,7 @@ func TestKubeletSystemdUnit(t *testing.T) { test.hostname, test.dnsIPs, test.external, + test.ipFamily, test.pauseImage, test.initialTaints, test.extraFlags, diff --git a/pkg/userdata/helper/testdata/download_binaries_v1.21.10.golden b/pkg/userdata/helper/testdata/download_binaries_v1.21.10.golden deleted file mode 100644 index 75ed990ec..000000000 --- a/pkg/userdata/helper/testdata/download_binaries_v1.21.10.golden +++ /dev/null @@ -1,17 +0,0 @@ -mkdir -p /opt/bin/ -mkdir -p /var/lib/calico -mkdir -p /etc/kubernetes/manifests -mkdir -p /etc/cni/net.d -mkdir -p /opt/cni/bin -if [ ! -f /opt/cni/bin/loopback ]; then - curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz | tar -xvzC /opt/cni/bin -f - -fi -if [ ! -f /opt/bin/kubelet ]; then - curl -Lfo /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.21.10/bin/linux/amd64/kubelet - chmod +x /opt/bin/kubelet -fi - -if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh -fi diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.21.10-external.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.21.10-external.golden deleted file mode 100644 index 435cefdf8..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.21.10-external.golden +++ /dev/null @@ -1,39 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.21.10.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.21.10.golden deleted file mode 100644 index e1c0b6225..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.21.10.golden +++ /dev/null @@ -1,38 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/rhel/provider.go b/pkg/userdata/rhel/provider.go index b898e3094..46f3b3046 100644 --- a/pkg/userdata/rhel/provider.go +++ b/pkg/userdata/rhel/provider.go @@ -66,11 +66,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) } - serverAddr, err := userdatahelper.GetServerAddressFromKubeconfig(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting server address from kubeconfig: %w", err) - } - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) if err != nil { return "", err @@ -102,7 +97,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { ProviderSpec *providerconfigtypes.Config OSConfig *Config KubeletVersion string - ServerAddr string Kubeconfig string KubernetesCACert string NodeIPScript string @@ -118,10 +112,9 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { ProviderSpec: pconfig, OSConfig: rhelConfig, KubeletVersion: kubeletVersion.String(), - ServerAddr: serverAddr, Kubeconfig: kubeconfigString, KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(), + NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), ExtraKubeletFlags: crEngine.KubeletFlags(), ContainerRuntimeScript: crScript, ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), @@ -280,7 +273,7 @@ write_files: - path: "/etc/systemd/system/kubelet.service" content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} +{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - path: "/etc/kubernetes/cloud-config" permissions: "0600" diff --git a/pkg/userdata/rhel/provider_test.go b/pkg/userdata/rhel/provider_test.go index 82c1bcae6..018a06f4d 100644 --- a/pkg/userdata/rhel/provider_test.go +++ b/pkg/userdata/rhel/provider_test.go @@ -99,15 +99,6 @@ func TestUserDataGeneration(t *testing.T) { t.Parallel() tests := []userDataTestCase{ - { - name: "kubelet-v1.21-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", - }, - }, - }, { name: "kubelet-v1.22-aws", spec: clusterv1alpha1.MachineSpec{ @@ -136,25 +127,6 @@ func TestUserDataGeneration(t *testing.T) { }, externalCloudProvider: true, }, - { - name: "kubelet-v1.24-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.0", - }, - }, - }, - { - name: "kubelet-v1.24-aws-external", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.0", - }, - }, - externalCloudProvider: true, - }, { name: "kubelet-v1.23-vsphere", spec: clusterv1alpha1.MachineSpec{ @@ -193,6 +165,25 @@ func TestUserDataGeneration(t *testing.T) { registryMirrors: "https://registry.docker-cn.com", pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", }, + { + name: "kubelet-v1.24-aws", + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.24.0", + }, + }, + }, + { + name: "kubelet-v1.24-aws-external", + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.24.0", + }, + }, + externalCloudProvider: true, + }, { name: "kubelet-v1.22-nutanix", spec: clusterv1alpha1.MachineSpec{ @@ -241,10 +232,10 @@ func TestUserDataGeneration(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - emtpyProviderSpec := clusterv1alpha1.ProviderSpec{ + emptyProviderSpec := clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{}, } - test.spec.ProviderSpec = emtpyProviderSpec + test.spec.ProviderSpec = emptyProviderSpec var cloudProvider *fakeCloudConfigProvider if test.cloudProviderName != nil { cloudProvider = &fakeCloudConfigProvider{ diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.21-aws.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.21-aws.yaml deleted file mode 100644 index 9568a5550..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.21-aws.yaml +++ /dev/null @@ -1,481 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - echo NETWORKING_IPV6=yes >> /etc/sysconfig/network - echo IPV6INIT=yes >> /etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - echo DHCPV6C=yes >> /etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - ifdown $DEFAULT_IFC_NAME && ifup $DEFAULT_IFC_NAME - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.22-aws.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.22-aws.yaml index 81fb687e8..dbdb97a14 100644 --- a/pkg/userdata/rhel/testdata/kubelet-v1.22-aws.yaml +++ b/pkg/userdata/rhel/testdata/kubelet-v1.22-aws.yaml @@ -93,9 +93,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.22-nutanix.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.22-nutanix.yaml index da5d9155c..82ee10299 100644 --- a/pkg/userdata/rhel/testdata/kubelet-v1.22-nutanix.yaml +++ b/pkg/userdata/rhel/testdata/kubelet-v1.22-nutanix.yaml @@ -101,9 +101,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.23-aws-external.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.23-aws-external.yaml index c46206267..98a5f9b61 100644 --- a/pkg/userdata/rhel/testdata/kubelet-v1.23-aws-external.yaml +++ b/pkg/userdata/rhel/testdata/kubelet-v1.23-aws-external.yaml @@ -93,9 +93,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.23-aws.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.23-aws.yaml index 50fd4811f..deeef0ad8 100644 --- a/pkg/userdata/rhel/testdata/kubelet-v1.23-aws.yaml +++ b/pkg/userdata/rhel/testdata/kubelet-v1.23-aws.yaml @@ -93,9 +93,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-mirrors.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-mirrors.yaml index 467e63141..749268ce1 100644 --- a/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-mirrors.yaml +++ b/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-mirrors.yaml @@ -107,9 +107,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-proxy.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-proxy.yaml index aa9bc6081..1ec73c6a7 100644 --- a/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-proxy.yaml +++ b/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere-proxy.yaml @@ -107,9 +107,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere.yaml index a193c46f3..49a601c3d 100644 --- a/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere.yaml +++ b/pkg/userdata/rhel/testdata/kubelet-v1.23-vsphere.yaml @@ -99,9 +99,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rhel/testdata/pod-cidr-azure-rhel.yaml b/pkg/userdata/rhel/testdata/pod-cidr-azure-rhel.yaml index 055f69919..0a661c056 100644 --- a/pkg/userdata/rhel/testdata/pod-cidr-azure-rhel.yaml +++ b/pkg/userdata/rhel/testdata/pod-cidr-azure-rhel.yaml @@ -98,9 +98,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rockylinux/provider.go b/pkg/userdata/rockylinux/provider.go index c2a130600..930984309 100644 --- a/pkg/userdata/rockylinux/provider.go +++ b/pkg/userdata/rockylinux/provider.go @@ -66,11 +66,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) } - serverAddr, err := userdatahelper.GetServerAddressFromKubeconfig(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting server address from kubeconfig: %w", err) - } - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) if err != nil { return "", err @@ -102,7 +97,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { ProviderSpec *providerconfigtypes.Config OSConfig *Config KubeletVersion string - ServerAddr string Kubeconfig string KubernetesCACert string NodeIPScript string @@ -118,10 +112,9 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { ProviderSpec: pconfig, OSConfig: rockyLinuxConfig, KubeletVersion: kubeletVersion.String(), - ServerAddr: serverAddr, Kubeconfig: kubeconfigString, KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(), + NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), ExtraKubeletFlags: crEngine.KubeletFlags(), ContainerRuntimeScript: crScript, ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), @@ -271,7 +264,7 @@ write_files: - path: "/etc/systemd/system/kubelet.service" content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} +{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - path: "/etc/kubernetes/cloud-config" permissions: "0600" diff --git a/pkg/userdata/rockylinux/provider_test.go b/pkg/userdata/rockylinux/provider_test.go index 3927c846f..de1fc01e8 100644 --- a/pkg/userdata/rockylinux/provider_test.go +++ b/pkg/userdata/rockylinux/provider_test.go @@ -100,40 +100,49 @@ func TestUserDataGeneration(t *testing.T) { tests := []userDataTestCase{ { - name: "kubelet-v1.21-aws", + name: "kubelet-v1.22-aws", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.22.7", }, }, }, { - name: "kubelet-v1.21-aws-external", + name: "kubelet-v1.23-aws", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", + }, + }, + }, + { + name: "kubelet-v1.23-aws-external", + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.23.5", }, }, externalCloudProvider: true, }, { - name: "kubelet-v1.21-vsphere", + name: "kubelet-v1.23-vsphere", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, cloudProviderName: stringPtr("vsphere"), }, { - name: "kubelet-v1.21-vsphere-proxy", + name: "kubelet-v1.23-vsphere-proxy", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, cloudProviderName: stringPtr("vsphere"), @@ -143,11 +152,11 @@ func TestUserDataGeneration(t *testing.T) { pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", }, { - name: "kubelet-v1.21-vsphere-mirrors", + name: "kubelet-v1.23-vsphere-mirrors", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, cloudProviderName: stringPtr("vsphere"), @@ -157,22 +166,14 @@ func TestUserDataGeneration(t *testing.T) { pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", }, { - name: "kubelet-v1.22-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - }, - { - name: "kubelet-v1.23-aws", + name: "kubelet-v1.23-nutanix", spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Versions: clusterv1alpha1.MachineVersionInfo{ Kubelet: "1.23.5", }, }, + cloudProviderName: stringPtr("nutanix"), }, { name: "kubelet-v1.24-aws", @@ -183,16 +184,6 @@ func TestUserDataGeneration(t *testing.T) { }, }, }, - { - name: "kubelet-v1.23-nutanix", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.23.5", - }, - }, - cloudProviderName: stringPtr("nutanix"), - }, } defaultCloudProvider := &fakeCloudConfigProvider{ @@ -221,10 +212,10 @@ func TestUserDataGeneration(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - emtpyProviderSpec := clusterv1alpha1.ProviderSpec{ + emptyProviderSpec := clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{}, } - test.spec.ProviderSpec = emtpyProviderSpec + test.spec.ProviderSpec = emptyProviderSpec var cloudProvider *fakeCloudConfigProvider if test.cloudProviderName != nil { cloudProvider = &fakeCloudConfigProvider{ diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-aws.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.21-aws.yaml deleted file mode 100644 index 048093fd3..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-aws.yaml +++ /dev/null @@ -1,441 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.22-aws.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.22-aws.yaml index 85cc06d59..2e8b1aba4 100644 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.22-aws.yaml +++ b/pkg/userdata/rockylinux/testdata/kubelet-v1.22-aws.yaml @@ -94,9 +94,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-aws-external.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-aws-external.yaml similarity index 98% rename from pkg/userdata/rockylinux/testdata/kubelet-v1.21-aws-external.yaml rename to pkg/userdata/rockylinux/testdata/kubelet-v1.23-aws-external.yaml index ffe97a9d4..1621348e7 100644 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-aws-external.yaml +++ b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-aws-external.yaml @@ -94,9 +94,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -144,7 +144,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -230,8 +230,6 @@ write_files: --lock-file=/tmp/kubelet.lock \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.23-aws.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-aws.yaml index 6c8dea179..587dc6d17 100644 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.23-aws.yaml +++ b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-aws.yaml @@ -94,9 +94,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.23-nutanix.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-nutanix.yaml index d8b9a49a3..bf9f1047e 100644 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.23-nutanix.yaml +++ b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-nutanix.yaml @@ -101,9 +101,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere-mirrors.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere-mirrors.yaml similarity index 98% rename from pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere-mirrors.yaml rename to pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere-mirrors.yaml index 66de513af..5524b8cde 100644 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere-mirrors.yaml +++ b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere-mirrors.yaml @@ -107,9 +107,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -157,7 +157,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -247,8 +247,6 @@ write_files: --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere-proxy.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere-proxy.yaml similarity index 98% rename from pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere-proxy.yaml rename to pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere-proxy.yaml index 43b97a65e..931fdde4f 100644 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere-proxy.yaml +++ b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere-proxy.yaml @@ -107,9 +107,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -157,7 +157,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -247,8 +247,6 @@ write_files: --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere.yaml similarity index 98% rename from pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere.yaml rename to pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere.yaml index 34da2675a..e701f5963 100644 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.21-vsphere.yaml +++ b/pkg/userdata/rockylinux/testdata/kubelet-v1.23-vsphere.yaml @@ -99,9 +99,9 @@ write_files: EOF yum install -y \ - docker-ce-cli-19.03* \ + docker-ce-cli-20.10* \ containerd.io-1.4* \ - docker-ce-19.03* \ + docker-ce-20.10* \ yum-plugin-versionlock yum versionlock add docker-ce* containerd.io @@ -149,7 +149,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -238,8 +238,6 @@ write_files: --lock-file=/tmp/kubelet.lock \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/sles/provider.go b/pkg/userdata/sles/provider.go index 50b14f888..41fbf06a5 100644 --- a/pkg/userdata/sles/provider.go +++ b/pkg/userdata/sles/provider.go @@ -66,11 +66,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { return "", fmt.Errorf("failed to get sles config from provider config: %w", err) } - serverAddr, err := userdatahelper.GetServerAddressFromKubeconfig(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting server address from kubeconfig: %w", err) - } - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) if err != nil { return "", err @@ -96,7 +91,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { plugin.UserDataRequest ProviderSpec *providerconfigtypes.Config OSConfig *Config - ServerAddr string KubeletVersion string Kubeconfig string KubernetesCACert string @@ -111,11 +105,10 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { UserDataRequest: req, ProviderSpec: pconfig, OSConfig: slesConfig, - ServerAddr: serverAddr, KubeletVersion: kubeletVersion.String(), Kubeconfig: kubeconfigString, KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(), + NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), ExtraKubeletFlags: crEngine.KubeletFlags(), ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), ContainerRuntimeConfig: crConfig, @@ -223,7 +216,7 @@ write_files: - path: "/etc/systemd/system/kubelet.service" content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} +{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - path: "/etc/systemd/system/kubelet.service.d/extras.conf" content: | diff --git a/pkg/userdata/sles/provider_test.go b/pkg/userdata/sles/provider_test.go index 302344db9..ba589e583 100644 --- a/pkg/userdata/sles/provider_test.go +++ b/pkg/userdata/sles/provider_test.go @@ -126,7 +126,6 @@ type userDataTestCase struct { func simpleVersionTests() []userDataTestCase { versions := []*semver.Version{ - semver.MustParse("v1.21.10"), semver.MustParse("v1.22.7"), semver.MustParse("v1.23.5"), semver.MustParse("v1.24.0"), diff --git a/pkg/userdata/sles/testdata/version-1.21.10.yaml b/pkg/userdata/sles/testdata/version-1.21.10.yaml deleted file mode 100644 index 2c781afee..000000000 --- a/pkg/userdata/sles/testdata/version-1.21.10.yaml +++ /dev/null @@ -1,425 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - systemctl restart systemd-modules-load.service - sysctl --system - - zypper --non-interactive --quiet --color install ebtables \ - ceph-common \ - e2fsprogs \ - jq \ - socat \ - ipvsadm - - opt_bin=/opt/bin - usr_local_bin=/usr/local/bin - cni_bin_dir=/opt/cni/bin - mkdir -p /etc/cni/net.d /etc/kubernetes/dynamic-config-dir /etc/kubernetes/manifests "$opt_bin" "$cni_bin_dir" - arch=${HOST_ARCH-} - if [ -z "$arch" ] - then - case $(uname -m) in - x86_64) - arch="amd64" - ;; - aarch64) - arch="arm64" - ;; - *) - echo "unsupported CPU architecture, exiting" - exit 1 - ;; - esac - fi - CNI_VERSION="${CNI_VERSION:-v0.8.7}" - cni_base_url="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION" - cni_filename="cni-plugins-linux-$arch-$CNI_VERSION.tgz" - curl -Lfo "$cni_bin_dir/$cni_filename" "$cni_base_url/$cni_filename" - cni_sum=$(curl -Lf "$cni_base_url/$cni_filename.sha256") - cd "$cni_bin_dir" - sha256sum -c <<<"$cni_sum" - tar xvf "$cni_filename" - rm -f "$cni_filename" - cd - - CRI_TOOLS_RELEASE="${CRI_TOOLS_RELEASE:-v1.22.0}" - cri_tools_base_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRI_TOOLS_RELEASE}" - cri_tools_filename="crictl-${CRI_TOOLS_RELEASE}-linux-${arch}.tar.gz" - curl -Lfo "$opt_bin/$cri_tools_filename" "$cri_tools_base_url/$cri_tools_filename" - cri_tools_sum=$(curl -Lf "$cri_tools_base_url/$cri_tools_filename.sha256" | sed 's/\*\///') - cd "$opt_bin" - sha256sum -c <<<"$cri_tools_sum" - tar xvf "$cri_tools_filename" - rm -f "$cri_tools_filename" - ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" - cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" - kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" - kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" - kube_sum_file="$kube_dir/sha256" - mkdir -p "$kube_dir" - : >"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now docker - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl enable --now --no-block docker-healthcheck.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - cp /etc/fstab /etc/fstab.orig - cat /etc/fstab.orig | awk '$3 ~ /^swap$/ && $1 !~ /^#/ {$0="# commented out by cloudinit\n#"$0} 1' > /etc/fstab.noswap - mv /etc/fstab.noswap /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ - --network-plugin=cni \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/var/run/netconfig/resolv.conf" - -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: /etc/systemd/system/docker-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=docker.service - After=docker.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh container-runtime - - [Install] - WantedBy=multi-user.target - -- path: /etc/systemd/system/docker.service.d/environment.conf - permissions: "0644" - content: | - [Service] - EnvironmentFile=-/etc/environment - -runcmd: -- systemctl start setup.service diff --git a/pkg/userdata/ubuntu/provider.go b/pkg/userdata/ubuntu/provider.go index 034f4d88d..5a83a8a1b 100644 --- a/pkg/userdata/ubuntu/provider.go +++ b/pkg/userdata/ubuntu/provider.go @@ -66,11 +66,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { return "", fmt.Errorf("failed to get ubuntu config from provider config: %w", err) } - serverAddr, err := userdatahelper.GetServerAddressFromKubeconfig(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting server address from kubeconfig: %w", err) - } - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) if err != nil { return "", err @@ -101,7 +96,6 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { plugin.UserDataRequest ProviderSpec *providerconfigtypes.Config OSConfig *Config - ServerAddr string KubeletVersion string Kubeconfig string KubernetesCACert string @@ -117,11 +111,10 @@ func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { UserDataRequest: req, ProviderSpec: pconfig, OSConfig: ubuntuConfig, - ServerAddr: serverAddr, KubeletVersion: kubeletVersion.String(), Kubeconfig: kubeconfigString, KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(), + NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), ExtraKubeletFlags: crEngine.KubeletFlags(), ContainerRuntimeScript: crScript, ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), @@ -270,7 +263,7 @@ write_files: - path: "/etc/systemd/system/kubelet.service" content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} +{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - path: "/etc/systemd/system/kubelet.service.d/extras.conf" content: | diff --git a/pkg/userdata/ubuntu/provider_test.go b/pkg/userdata/ubuntu/provider_test.go index 47914c90b..14c230c61 100644 --- a/pkg/userdata/ubuntu/provider_test.go +++ b/pkg/userdata/ubuntu/provider_test.go @@ -31,6 +31,7 @@ import ( clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" "github.com/kubermatic/machine-controller/pkg/apis/plugin" + "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" "github.com/kubermatic/machine-controller/pkg/containerruntime" providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" testhelper "github.com/kubermatic/machine-controller/pkg/test" @@ -127,7 +128,6 @@ type userDataTestCase struct { func simpleVersionTests() []userDataTestCase { versions := []*semver.Version{ - semver.MustParse("v1.21.10"), semver.MustParse("v1.22.7"), semver.MustParse("v1.23.5"), semver.MustParse("v1.24.0"), @@ -297,6 +297,62 @@ func TestUserDataGeneration(t *testing.T) { DistUpgradeOnBoot: false, }, }, + { + name: "openstack-dualstack", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "openstack", + SSHPublicKeys: []string{"ssh-rsa AAABBB"}, + Network: &providerconfigtypes.NetworkConfig{ + IPFamily: util.DualStack, + }, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: defaultVersion, + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "openstack", + config: "{openstack-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, + kubernetesCACert: "CACert", + osConfig: &Config{ + DistUpgradeOnBoot: false, + }, + externalCloudProvider: true, + }, + { + name: "digitalocean-dualstack", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "digitalocean", + SSHPublicKeys: []string{"ssh-rsa AAABBB"}, + Network: &providerconfigtypes.NetworkConfig{ + IPFamily: util.DualStack, + }, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: defaultVersion, + }, + }, + ccProvider: &fakeCloudConfigProvider{ + config: "{digitalocean-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, + kubernetesCACert: "CACert", + osConfig: &Config{ + DistUpgradeOnBoot: false, + }, + }, { name: "openstack-overwrite-cloud-config", providerSpec: &providerconfigtypes.Config{ @@ -489,7 +545,7 @@ func TestUserDataGeneration(t *testing.T) { Name: "node1", }, Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.21.10", + Kubelet: "1.23.5", }, }, ccProvider: &fakeCloudConfigProvider{ diff --git a/pkg/userdata/ubuntu/testdata/version-1.21.10.yaml b/pkg/userdata/ubuntu/testdata/digitalocean-dualstack.yaml similarity index 97% rename from pkg/userdata/ubuntu/testdata/version-1.21.10.yaml rename to pkg/userdata/ubuntu/testdata/digitalocean-dualstack.yaml index f4d99bd0b..d76ac8ad9 100644 --- a/pkg/userdata/ubuntu/testdata/version-1.21.10.yaml +++ b/pkg/userdata/ubuntu/testdata/digitalocean-dualstack.yaml @@ -100,8 +100,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload @@ -149,7 +149,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.22.7}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -246,7 +246,7 @@ write_files: - path: "/etc/kubernetes/cloud-config" permissions: "0600" content: | - + {digitalocean-config:true} - path: "/opt/bin/setup_net_env.sh" permissions: "0755" @@ -257,7 +257,9 @@ write_files: } # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") + DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") + DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") + DEFAULT_IFC_IP=$DEFAULT_IFC_IP,$DEFAULT_IFC_IP6 # get the full hostname FULL_HOSTNAME=$(hostname -f) @@ -375,6 +377,8 @@ write_files: cgroupDriver: systemd clusterDNS: - 10.10.10.10 + - 10.10.10.11 + - 10.10.10.12 clusterDomain: cluster.local containerLogMaxSize: 100Mi cpuManagerReconcilePeriod: 0s diff --git a/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.yaml b/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.yaml index a84ab4bc3..72876104d 100644 --- a/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.yaml +++ b/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.yaml @@ -102,8 +102,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/docker.yaml b/pkg/userdata/ubuntu/testdata/docker.yaml index a8193e4d4..0f2ee1524 100644 --- a/pkg/userdata/ubuntu/testdata/docker.yaml +++ b/pkg/userdata/ubuntu/testdata/docker.yaml @@ -102,8 +102,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.yaml b/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.yaml index b5460cb68..77428740e 100644 --- a/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.yaml +++ b/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.yaml @@ -100,8 +100,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/multiple-dns-servers.yaml b/pkg/userdata/ubuntu/testdata/multiple-dns-servers.yaml index 8592ce601..f0d5f55f0 100644 --- a/pkg/userdata/ubuntu/testdata/multiple-dns-servers.yaml +++ b/pkg/userdata/ubuntu/testdata/multiple-dns-servers.yaml @@ -100,8 +100,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.yaml b/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.yaml index fb5098971..dc18f0a19 100644 --- a/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.yaml +++ b/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.yaml @@ -102,8 +102,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/nutanix.yaml b/pkg/userdata/ubuntu/testdata/nutanix.yaml index 261f731bb..58c16ae1f 100644 --- a/pkg/userdata/ubuntu/testdata/nutanix.yaml +++ b/pkg/userdata/ubuntu/testdata/nutanix.yaml @@ -103,8 +103,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload @@ -152,7 +152,7 @@ write_files: rm -f "$cri_tools_filename" ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" cd - - KUBE_VERSION="${KUBE_VERSION:-v1.21.10}" + KUBE_VERSION="${KUBE_VERSION:-v1.23.5}" kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" kube_sum_file="$kube_dir/sha256" @@ -235,8 +235,6 @@ write_files: --lock-file=/tmp/kubelet.lock \ --container-runtime=docker \ --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ - --feature-gates=DynamicKubeletConfig=true \ --network-plugin=cni \ --node-ip ${KUBELET_NODE_IP} diff --git a/pkg/userdata/ubuntu/testdata/openstack-dualstack.yaml b/pkg/userdata/ubuntu/testdata/openstack-dualstack.yaml new file mode 100644 index 000000000..77904a297 --- /dev/null +++ b/pkg/userdata/ubuntu/testdata/openstack-dualstack.yaml @@ -0,0 +1,452 @@ +#cloud-config + +hostname: node1 + + +ssh_pwauth: false +ssh_authorized_keys: +- "ssh-rsa AAABBB" + +write_files: + +- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + content: | + [Journal] + SystemMaxUse=5G + + +- path: "/opt/load-kernel-modules.sh" + permissions: "0755" + content: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + +- path: "/etc/sysctl.d/k8s.conf" + content: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + fs.inotify.max_user_instances = 8192 + + +- path: "/etc/default/grub.d/60-swap-accounting.cfg" + content: | + # Added by kubermatic machine-controller + # Enable cgroups memory and swap accounting + GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" + +- path: "/opt/bin/setup" + permissions: "0755" + content: | + #!/bin/bash + set -xeuo pipefail + if systemctl is-active ufw; then systemctl stop ufw; fi + systemctl mask ufw + systemctl restart systemd-modules-load.service + sysctl --system + apt-get update + + DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ + curl \ + ca-certificates \ + ceph-common \ + cifs-utils \ + conntrack \ + e2fsprogs \ + ebtables \ + ethtool \ + glusterfs-client \ + iptables \ + jq \ + kmod \ + openssh-client \ + nfs-common \ + socat \ + util-linux \ + ipvsadm + + # Update grub to include kernel command options to enable swap accounting. + # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 + + + apt-get update + apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + + mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d + + cat <"$kube_sum_file" + + for bin in kubelet kubeadm kubectl; do + curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" + chmod +x "$kube_dir/$bin" + sum=$(curl -Lf "$kube_base_url/$bin.sha256") + echo "$sum $kube_dir/$bin" >>"$kube_sum_file" + done + sha256sum -c "$kube_sum_file" + + for bin in kubelet kubeadm kubectl; do + ln -sf "$kube_dir/$bin" "$opt_bin"/$bin + done + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi + + # set kubelet nodeip environment variable + /opt/bin/setup_net_env.sh + + systemctl enable --now kubelet + systemctl enable --now --no-block kubelet-healthcheck.service + systemctl disable setup.service + +- path: "/opt/bin/supervise.sh" + permissions: "0755" + content: | + #!/bin/bash + set -xeuo pipefail + while ! "$@"; do + sleep 1 + done + +- path: "/opt/disable-swap.sh" + permissions: "0755" + content: | + sed -i.orig '/.*swap.*/d' /etc/fstab + swapoff -a + +- path: "/etc/systemd/system/kubelet.service" + content: | + [Unit] + After=docker.service + Requires=docker.service + + Description=kubelet: The Kubernetes Node Agent + Documentation=https://kubernetes.io/docs/home/ + + [Service] + User=root + Restart=always + StartLimitInterval=0 + RestartSec=10 + CPUAccounting=true + MemoryAccounting=true + + Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" + EnvironmentFile=-/etc/environment + + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + + ExecStartPre=/bin/bash /opt/disable-swap.sh + + ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh + ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=external \ + --hostname-override=node1 \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --container-runtime=docker \ + --container-runtime-endpoint=unix:///var/run/dockershim.sock \ + --dynamic-config-dir=/etc/kubernetes/dynamic-config-dir \ + --feature-gates=DynamicKubeletConfig=true \ + --network-plugin=cni \ + + [Install] + WantedBy=multi-user.target + +- path: "/etc/systemd/system/kubelet.service.d/extras.conf" + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" + +- path: "/etc/kubernetes/cloud-config" + permissions: "0600" + content: | + {openstack-config:true} + +- path: "/opt/bin/setup_net_env.sh" + permissions: "0755" + content: | + #!/usr/bin/env bash + echodate() { + echo "[$(date -Is)]" "$@" + } + + # get the default interface IP address + DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") + DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") + DEFAULT_IFC_IP=$DEFAULT_IFC_IP,$DEFAULT_IFC_IP6 + + # get the full hostname + FULL_HOSTNAME=$(hostname -f) + + if [ -z "${DEFAULT_IFC_IP}" ] + then + echodate "Failed to get IP address for the default route interface" + exit 1 + fi + + # write the nodeip_env file + # we need the line below because flatcar has the same string "coreos" in that file + if grep -q coreos /etc/os-release + then + echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf + elif [ ! -d /etc/systemd/system/kubelet.service.d ] + then + echodate "Can't find kubelet service extras directory" + exit 1 + else + echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf + fi + + +- path: "/etc/kubernetes/bootstrap-kubelet.conf" + permissions: "0600" + content: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: null + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + +- path: "/etc/kubernetes/pki/ca.crt" + content: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + +- path: "/etc/systemd/system/setup.service" + permissions: "0644" + content: | + [Install] + WantedBy=multi-user.target + + [Unit] + Requires=network-online.target + After=network-online.target + + [Service] + Type=oneshot + RemainAfterExit=true + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/supervise.sh /opt/bin/setup + +- path: "/etc/profile.d/opt-bin-path.sh" + permissions: "0644" + content: | + export PATH="/opt/bin:$PATH" + +- path: /etc/docker/daemon.json + permissions: "0644" + content: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} + +- path: "/etc/kubernetes/kubelet.conf" + content: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + - 10.10.10.11 + - 10.10.10.12 + clusterDomain: cluster.local + containerLogMaxSize: 100Mi + cpuManagerReconcilePeriod: 0s + evictionHard: + imagefs.available: 15% + memory.available: 100Mi + nodefs.available: 10% + nodefs.inodesFree: 5% + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + kubeReserved: + cpu: 200m + ephemeral-storage: 1Gi + memory: 200Mi + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + systemReserved: + cpu: 200m + ephemeral-storage: 1Gi + memory: 200Mi + tlsCipherSuites: + - TLS_AES_128_GCM_SHA256 + - TLS_AES_256_GCM_SHA384 + - TLS_CHACHA20_POLY1305_SHA256 + - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 + volumePluginDir: /var/lib/kubelet/volumeplugins + volumeStatsAggPeriod: 0s + + +- path: /etc/systemd/system/kubelet-healthcheck.service + permissions: "0644" + content: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + +runcmd: +- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.yaml b/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.yaml index a263c4a23..3866be991 100644 --- a/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.yaml +++ b/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.yaml @@ -100,8 +100,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/openstack.yaml b/pkg/userdata/ubuntu/testdata/openstack.yaml index 8492a8e59..6725d1b9b 100644 --- a/pkg/userdata/ubuntu/testdata/openstack.yaml +++ b/pkg/userdata/ubuntu/testdata/openstack.yaml @@ -100,8 +100,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/version-1.22.7.yaml b/pkg/userdata/ubuntu/testdata/version-1.22.7.yaml index b5460cb68..77428740e 100644 --- a/pkg/userdata/ubuntu/testdata/version-1.22.7.yaml +++ b/pkg/userdata/ubuntu/testdata/version-1.22.7.yaml @@ -100,8 +100,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/version-1.23.5.yaml b/pkg/userdata/ubuntu/testdata/version-1.23.5.yaml index 934498758..d05672a00 100644 --- a/pkg/userdata/ubuntu/testdata/version-1.23.5.yaml +++ b/pkg/userdata/ubuntu/testdata/version-1.23.5.yaml @@ -100,8 +100,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/vsphere-mirrors.yaml b/pkg/userdata/ubuntu/testdata/vsphere-mirrors.yaml index c2f9843a1..be7f75f56 100644 --- a/pkg/userdata/ubuntu/testdata/vsphere-mirrors.yaml +++ b/pkg/userdata/ubuntu/testdata/vsphere-mirrors.yaml @@ -110,8 +110,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/vsphere-proxy.yaml b/pkg/userdata/ubuntu/testdata/vsphere-proxy.yaml index c89efe60a..7dfa8d510 100644 --- a/pkg/userdata/ubuntu/testdata/vsphere-proxy.yaml +++ b/pkg/userdata/ubuntu/testdata/vsphere-proxy.yaml @@ -110,8 +110,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/pkg/userdata/ubuntu/testdata/vsphere.yaml b/pkg/userdata/ubuntu/testdata/vsphere.yaml index 6c49a76d4..f73e59b71 100644 --- a/pkg/userdata/ubuntu/testdata/vsphere.yaml +++ b/pkg/userdata/ubuntu/testdata/vsphere.yaml @@ -101,8 +101,8 @@ write_files: apt-get install --allow-downgrades -y \ containerd.io=1.4* \ - docker-ce-cli=5:19.03* \ - docker-ce=5:19.03* + docker-ce-cli=5:20.10* \ + docker-ce=5:20.10* apt-mark hold docker-ce* containerd.io systemctl daemon-reload diff --git a/test/e2e/provisioning/all_e2e_test.go b/test/e2e/provisioning/all_e2e_test.go index 2728ed658..c19c979c6 100644 --- a/test/e2e/provisioning/all_e2e_test.go +++ b/test/e2e/provisioning/all_e2e_test.go @@ -79,6 +79,8 @@ const ( nutanixManifest = "./testdata/machinedeployment-nutanix.yaml" ) +const defaultKubernetesVersion = "1.23.5" + var testRunIdentifier = flag.String("identifier", "local", "The unique identifier for this test run") func TestInvalidObjectsGetRejected(t *testing.T) { @@ -115,7 +117,7 @@ func TestCustomCAsAreApplied(t *testing.T) { osNetwork := os.Getenv("OS_NETWORK_NAME") if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" { - t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSOWRD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") + t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") } params := []string{ @@ -309,7 +311,7 @@ func TestOpenstackProvisioningE2E(t *testing.T) { osNetwork := os.Getenv("OS_NETWORK_NAME") if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" { - t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSOWRD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") + t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") } params := []string{ @@ -322,7 +324,7 @@ func TestOpenstackProvisioningE2E(t *testing.T) { fmt.Sprintf("<< NETWORK_NAME >>=%s", osNetwork), } - selector := Not(OsSelector("sles", "rhel", "amzn2")) + selector := Not(OsSelector("sles", "amzn2")) runScenarios(t, selector, params, OSManifest, fmt.Sprintf("os-%s", *testRunIdentifier)) } @@ -340,7 +342,7 @@ func TestOpenstackProjectAuthProvisioningE2E(t *testing.T) { osNetwork := os.Getenv("OS_NETWORK_NAME") if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osProject == "" { - t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSOWRD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") + t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") } params := []string{ @@ -357,7 +359,7 @@ func TestOpenstackProjectAuthProvisioningE2E(t *testing.T) { name: "MachineDeploy with project auth vars", osName: "ubuntu", containerRuntime: "containerd", - kubernetesVersion: "1.21.8", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } testScenario(t, scenario, *testRunIdentifier, params, OSManifestProjectAuth, false) @@ -388,6 +390,12 @@ func TestDigitalOceanProvisioningE2E(t *testing.T) { func TestAWSProvisioningE2E(t *testing.T) { t.Parallel() + provisioningUtility := flatcar.Ignition + // `OPERATING_SYSTEM_MANAGER` will be false when legacy machine-controller userdata should be used for E2E tests. + if v := os.Getenv("OPERATING_SYSTEM_MANAGER"); v == "false" { + provisioningUtility = flatcar.CloudInit + } + // test data awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") @@ -400,8 +408,9 @@ func TestAWSProvisioningE2E(t *testing.T) { // act params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), - fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit), + fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", provisioningUtility), } + runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) } @@ -422,14 +431,14 @@ func TestAWSAssumeRoleProvisioningE2E(t *testing.T) { // act params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), - fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit), + fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.Ignition), } scenario := scenario{ name: "AWS with AssumeRole", osName: "ubuntu", containerRuntime: "docker", - kubernetesVersion: "1.22.5", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } testScenario(t, scenario, *testRunIdentifier, params, AWSManifest, false) @@ -450,7 +459,7 @@ func TestAWSSpotInstanceProvisioningE2E(t *testing.T) { // act params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), - fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit), + fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.Ignition), } runScenarios(t, selector, params, AWSSpotInstanceManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) } @@ -531,14 +540,14 @@ func TestAWSFlatcarContainerdProvisioningE2E(t *testing.T) { params := []string{ fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), - fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.CloudInit), + fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.Ignition), } scenario := scenario{ name: "flatcar with containerd in AWS", osName: "flatcar", containerRuntime: "containerd", - kubernetesVersion: "1.22.5", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } testScenario(t, scenario, *testRunIdentifier, params, AWSManifest, false) @@ -588,7 +597,7 @@ func TestAWSEbsEncryptionEnabledProvisioningE2E(t *testing.T) { name: "AWS with ebs encryption enabled", osName: "ubuntu", containerRuntime: "containerd", - kubernetesVersion: "v1.21.8", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } testScenario(t, scenario, fmt.Sprintf("aws-%s", *testRunIdentifier), params, AWSEBSEncryptedManifest, false) @@ -608,7 +617,7 @@ func TestAzureProvisioningE2E(t *testing.T) { t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") } - selector := Not(OsSelector("sles", "amzn2")) + selector := Not(OsSelector("sles", "amzn2", "rhel", "rockylinux", "flatcar")) // act params := []string{ fmt.Sprintf("<< AZURE_TENANT_ID >>=%s", azureTenantID), @@ -677,7 +686,7 @@ func TestAzureRedhatSatelliteProvisioningE2E(t *testing.T) { name: "Azure redhat satellite server subscription", osName: "rhel", containerRuntime: "docker", - kubernetesVersion: "1.21.8", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } @@ -738,7 +747,7 @@ func TestEquinixMetalProvisioningE2E(t *testing.T) { t.Fatal("unable to run the test suite, METAL_PROJECT_ID environment variable cannot be empty") } - selector := Not(OsSelector("sles", "rhel", "amzn2", "rockylinux")) + selector := And(OsSelector("ubuntu", "centos", "rockylinux", "flatcar"), Not(NameSelector("migrateUID"))) // act params := []string{ @@ -877,7 +886,7 @@ func TestVsphereResourcePoolProvisioningE2E(t *testing.T) { name: "vSphere resource pool provisioning", osName: "flatcar", containerRuntime: "docker", - kubernetesVersion: "1.22.5", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } @@ -976,7 +985,7 @@ func TestUbuntuProvisioningWithUpgradeE2E(t *testing.T) { osNetwork := os.Getenv("OS_NETWORK_NAME") if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" { - t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSOWRD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") + t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") } params := []string{ @@ -993,7 +1002,7 @@ func TestUbuntuProvisioningWithUpgradeE2E(t *testing.T) { name: "Ubuntu upgrade", osName: "ubuntu", containerRuntime: "docker", - kubernetesVersion: "1.22.5", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } @@ -1018,7 +1027,7 @@ func TestDeploymentControllerUpgradesMachineE2E(t *testing.T) { name: "MachineDeployment upgrade", osName: "ubuntu", containerRuntime: "docker", - kubernetesVersion: "1.21.8", + kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateUpdateAndDelete, } testScenario(t, scenario, *testRunIdentifier, params, HZManifest, false) @@ -1028,13 +1037,20 @@ func TestAnexiaProvisioningE2E(t *testing.T) { t.Parallel() token := os.Getenv("ANEXIA_TOKEN") - if token == "" { - t.Fatal("unable to run the test suite, ANEXIA_TOKEN environment variable cannot be empty") + vlanID := os.Getenv("ANEXIA_VLAN_ID") + templateID := os.Getenv("ANEXIA_TEMPLATE_ID") + locationID := os.Getenv("ANEXIA_LOCATION_ID") + + if token == "" || vlanID == "" || templateID == "" || locationID == "" { + t.Fatal("unable to run test suite, all of ANEXIA_TOKEN, ANEXIA_VLAN_ID, ANEXIA_TEMPLATE_ID, and ANEXIA_LOCATION_ID must be set!") } selector := OsSelector("flatcar") params := []string{ fmt.Sprintf("<< ANEXIA_TOKEN >>=%s", token), + fmt.Sprintf("<< ANEXIA_VLAN_ID >>=%s", vlanID), + fmt.Sprintf("<< ANEXIA_TEMPLATE_ID >>=%s", templateID), + fmt.Sprintf("<< ANEXIA_LOCATION_ID >>=%s", locationID), } runScenarios(t, selector, params, anexiaManifest, fmt.Sprintf("anexia-%s", *testRunIdentifier)) diff --git a/test/e2e/provisioning/deploymentscenario.go b/test/e2e/provisioning/deploymentscenario.go index 0ccec451e..825f81f15 100644 --- a/test/e2e/provisioning/deploymentscenario.go +++ b/test/e2e/provisioning/deploymentscenario.go @@ -51,7 +51,7 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s klog.Infof("Waiting for second MachineSet to appear after updating MachineDeployment %s", machineDeployment.Name) var machineSets []clusterv1alpha1.MachineSet if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { - machineSets, err = getMachingMachineSets(machineDeployment, client) + machineSets, err = getMatchingMachineSets(machineDeployment, client) if err != nil { return false, err } diff --git a/test/e2e/provisioning/helper.go b/test/e2e/provisioning/helper.go index 2592657aa..cefad450f 100644 --- a/test/e2e/provisioning/helper.go +++ b/test/e2e/provisioning/helper.go @@ -33,7 +33,6 @@ var ( scenarios = buildScenarios() versions = []*semver.Version{ - semver.MustParse("v1.21.10"), semver.MustParse("v1.22.7"), semver.MustParse("v1.23.5"), semver.MustParse("v1.24.0"), @@ -56,6 +55,14 @@ var ( string(providerconfigtypes.OperatingSystemFlatcar): "machine-controller-e2e-flatcar-stable-2983", string(providerconfigtypes.OperatingSystemRockyLinux): "machine-controller-e2e-rockylinux", } + + vSphereOSImageTemplates = map[string]string{ + string(providerconfigtypes.OperatingSystemCentOS): "machine-controller-e2e-centos", + string(providerconfigtypes.OperatingSystemFlatcar): "machine-controller-e2e-flatcar", + string(providerconfigtypes.OperatingSystemRHEL): "machine-controller-e2e-rhel", + string(providerconfigtypes.OperatingSystemRockyLinux): "machine-controller-e2e-rockylinux", + string(providerconfigtypes.OperatingSystemUbuntu): "machine-controller-e2e-ubuntu", + } ) type scenario struct { @@ -194,29 +201,32 @@ func testScenario(t *testing.T, testCase scenario, cloudProvider string, testPar scenarioParams = append(scenarioParams, fmt.Sprintf("<< CUSTOM-IMAGE >>=%v", "rhel-8-1-custom")) scenarioParams = append(scenarioParams, fmt.Sprintf("<< AMI >>=%s", "ami-08c04369895785ac4")) scenarioParams = append(scenarioParams, fmt.Sprintf("<< MAX_PRICE >>=%s", "0.08")) - } else if testCase.osName == string(providerconfigtypes.OperatingSystemUbuntu) { - // TODO: Remove this when https://github.com/kubermatic/kubermatic/issues/10022 is marked as resolved. - scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_DISK_SIZE >>=%v", 30)) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< DATA_DISK_SIZE >>=%v", 30)) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< AMI >>=%s", "ami-092f628832a8d22a5")) // ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220523 - scenarioParams = append(scenarioParams, fmt.Sprintf("<< DISK_SIZE >>=%v", 25)) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< CUSTOM-IMAGE >>=%v", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< RHEL_SUBSCRIPTION_MANAGER_USER >>=%s", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< RHEL_SUBSCRIPTION_MANAGER_PASSWORD >>=%s", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>=%s", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< MAX_PRICE >>=%s", "0.03")) } else { scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_DISK_SIZE >>=%v", 30)) scenarioParams = append(scenarioParams, fmt.Sprintf("<< DATA_DISK_SIZE >>=%v", 30)) scenarioParams = append(scenarioParams, fmt.Sprintf("<< AMI >>=%s", "")) scenarioParams = append(scenarioParams, fmt.Sprintf("<< DISK_SIZE >>=%v", 25)) scenarioParams = append(scenarioParams, fmt.Sprintf("<< CUSTOM-IMAGE >>=%v", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< RHEL_SUBSCRIPTION_MANAGER_USER >>=%s", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< RHEL_SUBSCRIPTION_MANAGER_PASSWORD >>=%s", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>=%s", "")) scenarioParams = append(scenarioParams, fmt.Sprintf("<< MAX_PRICE >>=%s", "0.03")) } + if strings.Contains(cloudProvider, string(providerconfigtypes.CloudProviderEquinixMetal)) { + switch testCase.osName { + case string(providerconfigtypes.OperatingSystemCentOS): + scenarioParams = append(scenarioParams, fmt.Sprintf("<< INSTANCE_TYPE >>=%s", "m3.small.x86")) + scenarioParams = append(scenarioParams, fmt.Sprintf("<< METRO_CODE >>=%s", "AM")) + case string(providerconfigtypes.OperatingSystemFlatcar): + scenarioParams = append(scenarioParams, fmt.Sprintf("<< INSTANCE_TYPE >>=%s", "c3.small.x86")) + scenarioParams = append(scenarioParams, fmt.Sprintf("<< METRO_CODE >>=%s", "NY")) + case string(providerconfigtypes.OperatingSystemRockyLinux): + scenarioParams = append(scenarioParams, fmt.Sprintf("<< INSTANCE_TYPE >>=%s", "m3.small.x86")) + scenarioParams = append(scenarioParams, fmt.Sprintf("<< METRO_CODE >>=%s", "AM")) + case string(providerconfigtypes.OperatingSystemUbuntu): + scenarioParams = append(scenarioParams, fmt.Sprintf("<< INSTANCE_TYPE >>=%s", "m3.small.x86")) + scenarioParams = append(scenarioParams, fmt.Sprintf("<< METRO_CODE >>=%s", "TY")) + } + } + // only used by assume role scenario, otherwise empty (disabled) scenarioParams = append(scenarioParams, fmt.Sprintf("<< AWS_ASSUME_ROLE_ARN >>=%s", os.Getenv("AWS_ASSUME_ROLE_ARN"))) scenarioParams = append(scenarioParams, fmt.Sprintf("<< AWS_ASSUME_ROLE_EXTERNAL_ID >>=%s", os.Getenv("AWS_ASSUME_ROLE_EXTERNAL_ID"))) @@ -224,6 +234,9 @@ func testScenario(t *testing.T, testCase scenario, cloudProvider string, testPar // only used by OpenStack scenarios scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_IMAGE >>=%s", openStackImages[testCase.osName])) + // only use by vSphere scenarios + scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_Image_Template >>=%s", vSphereOSImageTemplates[testCase.osName])) + // default kubeconfig to the hardcoded path at which `make e2e-cluster` creates its new kubeconfig gopath := os.Getenv("GOPATH") projectDir := filepath.Join(gopath, "src/github.com/kubermatic/machine-controller") @@ -270,6 +283,5 @@ func buildScenarios() []scenario { osName: "ubuntu", executor: verifyMigrateUID, }) - return all } diff --git a/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml b/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml index 64fcc7aa3..cca5f3dce 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml @@ -25,9 +25,9 @@ spec: cloudProvider: anexia cloudProviderSpec: token: "<< ANEXIA_TOKEN >>" - vlanID: "e37d7134ab934f5683fabcc72d28e036" - templateID: "12c28aa7-604d-47e9-83fb-5f1d1f1837b3" - locationID: "52b5f6b2fd3a4a7eaaedf1a7c019e9ea" + vlanID: "<< ANEXIA_VLAN_ID >>" + templateID: "<< ANEXIA_TEMPLATE_ID >>" + locationID: "<< ANEXIA_LOCATION_ID >>" cpus: 2 memory: 2048 diskSize: 60 diff --git a/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml b/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml index bc81a8a15..e4c0d6375 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml @@ -34,8 +34,7 @@ spec: diskSize: 50 diskType: "gp2" ebsVolumeEncrypted: false - # TODO: Revert this to "<< AMI >>" when https://github.com/kubermatic/kubermatic/issues/10022 is marked as resolved. - ami: "ami-07d0e9bbaa6dad756" + ami: "<< AMI >>" securityGroupIDs: - "sg-a2c195ca" tags: diff --git a/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml b/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml index a1bd27bed..ba06debe1 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml @@ -34,7 +34,6 @@ spec: diskSize: 50 diskType: "gp2" ebsVolumeEncrypted: true - ami: "<< AMI >>" securityGroupIDs: - "sg-a2c195ca" tags: diff --git a/test/e2e/provisioning/testdata/machinedeployment-equinixmetal.yaml b/test/e2e/provisioning/testdata/machinedeployment-equinixmetal.yaml index 28e52c4fa..52ecd2f2f 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-equinixmetal.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-equinixmetal.yaml @@ -26,9 +26,8 @@ spec: cloudProviderSpec: token: << METAL_AUTH_TOKEN >> projectID: << METAL_PROJECT_ID >> - instanceType: "c1.small.x86" - facilities: - - "ams1" + instanceType: << INSTANCE_TYPE >> + metro: << METRO_CODE >> operatingSystem: "<< OS_NAME >>" operatingSystemSpec: distUpgradeOnBoot: false diff --git a/test/e2e/provisioning/testdata/machinedeployment-gce.yaml b/test/e2e/provisioning/testdata/machinedeployment-gce.yaml index 3548f02c5..a2d9eb4d3 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-gce.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-gce.yaml @@ -29,7 +29,7 @@ spec: # See https://cloud.google.com/compute/docs/regions-zones/ zone: "europe-west3-a" # See https://cloud.google.com/compute/docs/machine-types - machineType: "n1-standard-2" + machineType: "n1-standard-1" # In GB diskSize: 25 # Can be 'pd-standard' or 'pd-ssd' diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml index e22c05001..3242f90e0 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml @@ -24,7 +24,7 @@ spec: - "<< YOUR_PUBLIC_KEY >>" cloudProvider: "vsphere" cloudProviderSpec: - templateVMName: 'machine-controller-e2e-<< OS_NAME >>' + templateVMName: '<< OS_Image_Template >>' username: '<< VSPHERE_USERNAME >>' vsphereURL: '<< VSPHERE_ADDRESS >>' datacenter: 'dc-1' diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml index c2b42657f..a54021366 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml @@ -24,13 +24,12 @@ spec: - "<< YOUR_PUBLIC_KEY >>" cloudProvider: "vsphere" cloudProviderSpec: - templateVMName: 'machine-controller-e2e-<< OS_NAME >>' + templateVMName: '<< OS_Image_Template >>' username: '<< VSPHERE_USERNAME >>' vsphereURL: '<< VSPHERE_ADDRESS >>' datacenter: 'dc-1' folder: '/dc-1/vm/e2e-tests' password: << VSPHERE_PASSWORD >> - # example: 'https://your-vcenter:8443'. '/sdk' gets appended automatically datastoreCluster: 'dsc-1' resourcePool: 'e2e-resource-pool' cpus: 2 diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml index e46cafe48..eac0bdcfa 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml @@ -24,14 +24,14 @@ spec: - "<< YOUR_PUBLIC_KEY >>" cloudProvider: "vsphere" cloudProviderSpec: - templateVMName: '<< OS_NAME >>-template' + templateVMName: '<< OS_Image_Template >>' username: '<< VSPHERE_USERNAME >>' vsphereURL: '<< VSPHERE_ADDRESS >>' - datacenter: 'Customer-A' - folder: '/Customer-A/vm/e2e-tests' + datacenter: 'dc-1' + folder: '/dc-1/vm/e2e-tests' password: << VSPHERE_PASSWORD >> # example: 'https://your-vcenter:8443'. '/sdk' gets appended automatically - datastore: datastore1 + datastore: HS-FreeNAS allowInsecure: true cpus: 2 MemoryMB: 2048 diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml index 49101efc5..921d00669 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml @@ -24,7 +24,7 @@ spec: - "<< YOUR_PUBLIC_KEY >>" cloudProvider: "vsphere" cloudProviderSpec: - templateVMName: 'machine-controller-e2e-<< OS_NAME >>' + templateVMName: '<< OS_Image_Template >>' username: '<< VSPHERE_USERNAME >>' vsphereURL: '<< VSPHERE_ADDRESS >>' datacenter: 'dc-1' diff --git a/test/e2e/provisioning/verify.go b/test/e2e/provisioning/verify.go index 6a2f323b2..a474d37e4 100644 --- a/test/e2e/provisioning/verify.go +++ b/test/e2e/provisioning/verify.go @@ -272,7 +272,7 @@ func assureNodeForMachineDeployment(machineDeployment *clusterv1alpha1.MachineDe } for _, machine := range machines { - // Azure doesn't seem to easely expose the private IP address, there is only a PublicIPAddressClient in the sdk + // Azure doesn't seem to easily expose the private IP address, there is only a PublicIPAddressClient in the sdk providerConfig, err := providerconfigtypes.GetConfig(machine.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to get provider config: %w", err) @@ -338,7 +338,7 @@ func readAndModifyManifest(pathToManifest string, keyValuePairs []string) (strin // getMatchingMachines returns all machines that are owned by the passed machineDeployment. func getMatchingMachines(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client) ([]clusterv1alpha1.Machine, error) { - matchingMachineSets, err := getMachingMachineSets(machineDeployment, client) + matchingMachineSets, err := getMatchingMachineSets(machineDeployment, client) if err != nil { return nil, err } @@ -369,8 +369,8 @@ func getMatchingMachinesForMachineset(machineSet *clusterv1alpha1.MachineSet, cl return matchingMachines, nil } -// getMachingMachineSets returns all machineSets that are owned by the passed machineDeployment. -func getMachingMachineSets(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Reader) ([]clusterv1alpha1.MachineSet, error) { +// getMatchingMachineSets returns all machineSets that are owned by the passed machineDeployment. +func getMatchingMachineSets(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Reader) ([]clusterv1alpha1.MachineSet, error) { // Ensure we actually have an object from the KubeAPI and not just the result of the yaml parsing, as the latter // can not be the owner of anything due to missing UID. if machineDeployment.ResourceVersion == "" {