From bd474f3cddb924827ee045f686c754af2ca290b8 Mon Sep 17 00:00:00 2001 From: Leilei Hu Date: Thu, 18 Nov 2021 21:37:20 +0800 Subject: [PATCH] enhancement to clean up the control plane (#75) Signed-off-by: Leilei Hu --- SECURITY.md | 1 + cmd/clusteradm.go | 2 + pkg/cmd/clean/cmd.go | 51 +++++++++++++++ pkg/cmd/clean/exec.go | 137 +++++++++++++++++++++++++++++++++++++++ pkg/cmd/clean/options.go | 41 ++++++++++++ 5 files changed, 232 insertions(+) create mode 100644 pkg/cmd/clean/cmd.go create mode 100644 pkg/cmd/clean/exec.go create mode 100644 pkg/cmd/clean/options.go diff --git a/SECURITY.md b/SECURITY.md index 13f6a1e10..84ca333bc 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1 +1,2 @@ +[comment]: # ( Copyright Contributors to the Open Cluster Management project ) Refer to our [Community Security Response](https://github.com/open-cluster-management-io/community/blob/main/SECURITY.md). diff --git a/cmd/clusteradm.go b/cmd/clusteradm.go index b3dab46e9..9d2ab642b 100644 --- a/cmd/clusteradm.go +++ b/cmd/clusteradm.go @@ -21,6 +21,7 @@ import ( "open-cluster-management.io/clusteradm/pkg/cmd/version" acceptclusters "open-cluster-management.io/clusteradm/pkg/cmd/accept" + clean "open-cluster-management.io/clusteradm/pkg/cmd/clean" deletecmd "open-cluster-management.io/clusteradm/pkg/cmd/delete" enable "open-cluster-management.io/clusteradm/pkg/cmd/enable" "open-cluster-management.io/clusteradm/pkg/cmd/get" @@ -76,6 +77,7 @@ func main() { Commands: []*cobra.Command{ get.NewCmd(clusteradmFlags, streams), deletecmd.NewCmd(clusteradmFlags, streams), + clean.NewCmd(clusteradmFlags, streams), inithub.NewCmd(clusteradmFlags, streams), joinhub.NewCmd(clusteradmFlags, streams), unjoin.NewCmd(clusteradmFlags, streams), diff --git a/pkg/cmd/clean/cmd.go b/pkg/cmd/clean/cmd.go new file mode 100644 index 000000000..90ad6dce7 --- /dev/null +++ b/pkg/cmd/clean/cmd.go @@ -0,0 +1,51 @@ +// Copyright Contributors to the Open Cluster Management project + +package init + +import ( + "fmt" + + "open-cluster-management.io/clusteradm/pkg/helpers" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + genericclioptionsclusteradm "open-cluster-management.io/clusteradm/pkg/genericclioptions" +) + +var example = ` +# Clean up the resource from the init stage +%[1]s clean +` + +// NewCmd ... +func NewCmd(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(clusteradmFlags, streams) + + cmd := &cobra.Command{ + Use: "clean", + Short: "clean the hub", + Example: fmt.Sprintf(example, helpers.GetExampleHeader()), + SilenceUsage: true, + PreRun: func(c *cobra.Command, args []string) { + helpers.DryRunMessage(o.ClusteradmFlags.DryRun) + }, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + if err := o.run(); err != nil { + return err + } + + return nil + }, + } + + cmd.Flags().StringVar(&o.clusterManageName, "name", "cluster-manager", "The name of the cluster manager resource") + cmd.Flags().StringVar(&o.outputFile, "output-file", "", "The generated resources will be copied in the specified file") + cmd.Flags().BoolVar(&o.useBootstrapToken, "use-bootstrap-token", false, "If set then the boostrap token will used instead of a service account token") + return cmd +} diff --git a/pkg/cmd/clean/exec.go b/pkg/cmd/clean/exec.go new file mode 100644 index 000000000..ac8b009b3 --- /dev/null +++ b/pkg/cmd/clean/exec.go @@ -0,0 +1,137 @@ +// Copyright Contributors to the Open Cluster Management project + +package init + +import ( + "context" + "fmt" + "log" + "time" + + clustermanagerclient "open-cluster-management.io/api/client/operator/clientset/versioned" + "open-cluster-management.io/clusteradm/pkg/helpers" + "open-cluster-management.io/clusteradm/pkg/helpers/apply" + + "github.com/spf13/cobra" + + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +func (o *Options) complete(cmd *cobra.Command, args []string) (err error) { + klog.V(1).InfoS("clean options:", "dry-run", o.ClusteradmFlags.DryRun, "output-file", o.outputFile) + o.values = Values{ + Hub: Hub{ + TokenID: helpers.RandStringRunes_az09(6), + TokenSecret: helpers.RandStringRunes_az09(16), + }, + } + return nil +} + +func (o *Options) validate() error { + restConfig, err := o.ClusteradmFlags.KubectlFactory.ToRESTConfig() + if err != nil { + return err + } + apiExtensionsClient, err := apiextensionsclient.NewForConfig(restConfig) + if err != nil { + return err + } + installed, err := helpers.IsClusterManagerInstalled(apiExtensionsClient) + if err != nil { + return err + } + if !installed { + return fmt.Errorf("hub not be initialized") + } + return nil +} + +func (o *Options) run() error { + output := make([]string, 0) + + //Clean ClusterManager CR resource firstly + f := o.ClusteradmFlags.KubectlFactory + config, err := f.ToRESTConfig() + if err != nil { + log.Fatal(err) + } + clusterManagerClient, err := clustermanagerclient.NewForConfig(config) + if err != nil { + log.Fatal(err) + } + + if IsClusterManagerExist(clusterManagerClient) { + err = clusterManagerClient.OperatorV1().ClusterManagers().Delete(context.Background(), o.clusterManageName, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + if !o.ClusteradmFlags.DryRun { + b := retry.DefaultBackoff + b.Duration = 1 * time.Minute + + err = WaitResourceToBeDelete(context.Background(), clusterManagerClient, o.clusterManageName, b) + if err != nil { + log.Fatal("Cluster Manager resource should be deleted firstly.") + } + } + } + //Clean other resources + kubeClient, apiExtensionsClient, _, err := helpers.GetClients(f) + if err != nil { + return err + } + kubeClient.AppsV1().Deployments("open-cluster-management").Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{}) + apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), "clustermanagers.operator.open-cluster-management.io", metav1.DeleteOptions{}) + kubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{}) + kubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{}) + kubeClient.CoreV1().ServiceAccounts("open-cluster-management").Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{}) + + if o.useBootstrapToken { + kubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "system:open-cluster-management:bootstrap", metav1.DeleteOptions{}) + kubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{}) + kubeClient.CoreV1().Secrets("kube-system").Delete(context.Background(), "bootstrap-token-"+o.values.Hub.TokenID, metav1.DeleteOptions{}) + } else { + kubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "system:open-cluster-management:bootstrap", metav1.DeleteOptions{}) + kubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "cluster-bootstrap-sa", metav1.DeleteOptions{}) + kubeClient.CoreV1().ServiceAccounts("open-cluster-management").Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{}) + + } + kubeClient.CoreV1().Namespaces().Delete(context.Background(), "open-cluster-management", metav1.DeleteOptions{}) + fmt.Println("The multicluster hub control plane has been clean up successfully!") + + return apply.WriteOutput(o.outputFile, output) +} +func WaitResourceToBeDelete(context context.Context, client clustermanagerclient.Interface, name string, b wait.Backoff) error { + + errGet := retry.OnError(b, func(err error) bool { + if !errors.IsNotFound(err) { + log.Printf("Wait to delete cluster manager resource: %s.\n", name) + return true + } + return false + }, func() error { + _, err := client.OperatorV1().ClusterManagers().Get(context, name, metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("ClusterManager is still exist") + } + return err + }) + return errGet + +} +func IsClusterManagerExist(cilent clustermanagerclient.Interface) bool { + obj, err := cilent.OperatorV1().ClusterManagers().List(context.Background(), metav1.ListOptions{}) + if err != nil { + log.Fatal(err) + } + if len(obj.Items) > 0 { + return true + } + return false +} diff --git a/pkg/cmd/clean/options.go b/pkg/cmd/clean/options.go new file mode 100644 index 000000000..6b04daeb1 --- /dev/null +++ b/pkg/cmd/clean/options.go @@ -0,0 +1,41 @@ +// Copyright Contributors to the Open Cluster Management project + +package init + +import ( + "k8s.io/cli-runtime/pkg/genericclioptions" + genericclioptionsclusteradm "open-cluster-management.io/clusteradm/pkg/genericclioptions" +) + +//Options: The structure holding all the command-line options +type Options struct { + //ClusteradmFlags: The generic optiosn from the clusteradm cli-runtime. + ClusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags + values Values + //The cluster manager resource name + clusterManageName string + //The file to output the resources will be sent to the file. + outputFile string + //If true the bootstrap token will be used instead of the service account token + useBootstrapToken bool +} + +//Valus: The values used in the template +type Values struct { + //The values related to the hub + Hub Hub `json:"hub"` +} + +//Hub: The hub values for the template +type Hub struct { + //TokenID: A token id allowing the cluster to connect back to the hub + TokenID string `json:"tokenID"` + //TokenSecret: A token secret allowing the cluster to connect back to the hub + TokenSecret string `json:"tokenSecret"` +} + +func newOptions(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, streams genericclioptions.IOStreams) *Options { + return &Options{ + ClusteradmFlags: clusteradmFlags, + } +}