Skip to content

Commit

Permalink
enhancement to clean up the control plane (#75)
Browse files Browse the repository at this point in the history
Signed-off-by: Leilei Hu <[email protected]>
  • Loading branch information
xauthulei authored Nov 18, 2021
1 parent ad6d4de commit bd474f3
Show file tree
Hide file tree
Showing 5 changed files with 232 additions and 0 deletions.
1 change: 1 addition & 0 deletions SECURITY.md
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
[comment]: # ( Copyright Contributors to the Open Cluster Management project )
Refer to our [Community Security Response](https://github.com/open-cluster-management-io/community/blob/main/SECURITY.md).
2 changes: 2 additions & 0 deletions cmd/clusteradm.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"open-cluster-management.io/clusteradm/pkg/cmd/version"

acceptclusters "open-cluster-management.io/clusteradm/pkg/cmd/accept"
clean "open-cluster-management.io/clusteradm/pkg/cmd/clean"
deletecmd "open-cluster-management.io/clusteradm/pkg/cmd/delete"
enable "open-cluster-management.io/clusteradm/pkg/cmd/enable"
"open-cluster-management.io/clusteradm/pkg/cmd/get"
Expand Down Expand Up @@ -76,6 +77,7 @@ func main() {
Commands: []*cobra.Command{
get.NewCmd(clusteradmFlags, streams),
deletecmd.NewCmd(clusteradmFlags, streams),
clean.NewCmd(clusteradmFlags, streams),
inithub.NewCmd(clusteradmFlags, streams),
joinhub.NewCmd(clusteradmFlags, streams),
unjoin.NewCmd(clusteradmFlags, streams),
Expand Down
51 changes: 51 additions & 0 deletions pkg/cmd/clean/cmd.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
// Copyright Contributors to the Open Cluster Management project

package init

import (
"fmt"

"open-cluster-management.io/clusteradm/pkg/helpers"

"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
genericclioptionsclusteradm "open-cluster-management.io/clusteradm/pkg/genericclioptions"
)

var example = `
# Clean up the resource from the init stage
%[1]s clean
`

// NewCmd ...
func NewCmd(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, streams genericclioptions.IOStreams) *cobra.Command {
o := newOptions(clusteradmFlags, streams)

cmd := &cobra.Command{
Use: "clean",
Short: "clean the hub",
Example: fmt.Sprintf(example, helpers.GetExampleHeader()),
SilenceUsage: true,
PreRun: func(c *cobra.Command, args []string) {
helpers.DryRunMessage(o.ClusteradmFlags.DryRun)
},
RunE: func(c *cobra.Command, args []string) error {
if err := o.complete(c, args); err != nil {
return err
}
if err := o.validate(); err != nil {
return err
}
if err := o.run(); err != nil {
return err
}

return nil
},
}

cmd.Flags().StringVar(&o.clusterManageName, "name", "cluster-manager", "The name of the cluster manager resource")
cmd.Flags().StringVar(&o.outputFile, "output-file", "", "The generated resources will be copied in the specified file")
cmd.Flags().BoolVar(&o.useBootstrapToken, "use-bootstrap-token", false, "If set then the boostrap token will used instead of a service account token")
return cmd
}
137 changes: 137 additions & 0 deletions pkg/cmd/clean/exec.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
// Copyright Contributors to the Open Cluster Management project

package init

import (
"context"
"fmt"
"log"
"time"

clustermanagerclient "open-cluster-management.io/api/client/operator/clientset/versioned"
"open-cluster-management.io/clusteradm/pkg/helpers"
"open-cluster-management.io/clusteradm/pkg/helpers/apply"

"github.com/spf13/cobra"

apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
)

func (o *Options) complete(cmd *cobra.Command, args []string) (err error) {
klog.V(1).InfoS("clean options:", "dry-run", o.ClusteradmFlags.DryRun, "output-file", o.outputFile)
o.values = Values{
Hub: Hub{
TokenID: helpers.RandStringRunes_az09(6),
TokenSecret: helpers.RandStringRunes_az09(16),
},
}
return nil
}

func (o *Options) validate() error {
restConfig, err := o.ClusteradmFlags.KubectlFactory.ToRESTConfig()
if err != nil {
return err
}
apiExtensionsClient, err := apiextensionsclient.NewForConfig(restConfig)
if err != nil {
return err
}
installed, err := helpers.IsClusterManagerInstalled(apiExtensionsClient)
if err != nil {
return err
}
if !installed {
return fmt.Errorf("hub not be initialized")
}
return nil
}

func (o *Options) run() error {
output := make([]string, 0)

//Clean ClusterManager CR resource firstly
f := o.ClusteradmFlags.KubectlFactory
config, err := f.ToRESTConfig()
if err != nil {
log.Fatal(err)
}
clusterManagerClient, err := clustermanagerclient.NewForConfig(config)
if err != nil {
log.Fatal(err)
}

if IsClusterManagerExist(clusterManagerClient) {
err = clusterManagerClient.OperatorV1().ClusterManagers().Delete(context.Background(), o.clusterManageName, metav1.DeleteOptions{})
if err != nil {
log.Fatal(err)
}
if !o.ClusteradmFlags.DryRun {
b := retry.DefaultBackoff
b.Duration = 1 * time.Minute

err = WaitResourceToBeDelete(context.Background(), clusterManagerClient, o.clusterManageName, b)
if err != nil {
log.Fatal("Cluster Manager resource should be deleted firstly.")
}
}
}
//Clean other resources
kubeClient, apiExtensionsClient, _, err := helpers.GetClients(f)
if err != nil {
return err
}
kubeClient.AppsV1().Deployments("open-cluster-management").Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
apiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), "clustermanagers.operator.open-cluster-management.io", metav1.DeleteOptions{})
kubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
kubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})
kubeClient.CoreV1().ServiceAccounts("open-cluster-management").Delete(context.Background(), "cluster-manager", metav1.DeleteOptions{})

if o.useBootstrapToken {
kubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "system:open-cluster-management:bootstrap", metav1.DeleteOptions{})
kubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{})
kubeClient.CoreV1().Secrets("kube-system").Delete(context.Background(), "bootstrap-token-"+o.values.Hub.TokenID, metav1.DeleteOptions{})
} else {
kubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "system:open-cluster-management:bootstrap", metav1.DeleteOptions{})
kubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "cluster-bootstrap-sa", metav1.DeleteOptions{})
kubeClient.CoreV1().ServiceAccounts("open-cluster-management").Delete(context.Background(), "cluster-bootstrap", metav1.DeleteOptions{})

}
kubeClient.CoreV1().Namespaces().Delete(context.Background(), "open-cluster-management", metav1.DeleteOptions{})
fmt.Println("The multicluster hub control plane has been clean up successfully!")

return apply.WriteOutput(o.outputFile, output)
}
func WaitResourceToBeDelete(context context.Context, client clustermanagerclient.Interface, name string, b wait.Backoff) error {

errGet := retry.OnError(b, func(err error) bool {
if !errors.IsNotFound(err) {
log.Printf("Wait to delete cluster manager resource: %s.\n", name)
return true
}
return false
}, func() error {
_, err := client.OperatorV1().ClusterManagers().Get(context, name, metav1.GetOptions{})
if err == nil {
return fmt.Errorf("ClusterManager is still exist")
}
return err
})
return errGet

}
func IsClusterManagerExist(cilent clustermanagerclient.Interface) bool {
obj, err := cilent.OperatorV1().ClusterManagers().List(context.Background(), metav1.ListOptions{})
if err != nil {
log.Fatal(err)
}
if len(obj.Items) > 0 {
return true
}
return false
}
41 changes: 41 additions & 0 deletions pkg/cmd/clean/options.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Copyright Contributors to the Open Cluster Management project

package init

import (
"k8s.io/cli-runtime/pkg/genericclioptions"
genericclioptionsclusteradm "open-cluster-management.io/clusteradm/pkg/genericclioptions"
)

//Options: The structure holding all the command-line options
type Options struct {
//ClusteradmFlags: The generic optiosn from the clusteradm cli-runtime.
ClusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags
values Values
//The cluster manager resource name
clusterManageName string
//The file to output the resources will be sent to the file.
outputFile string
//If true the bootstrap token will be used instead of the service account token
useBootstrapToken bool
}

//Valus: The values used in the template
type Values struct {
//The values related to the hub
Hub Hub `json:"hub"`
}

//Hub: The hub values for the template
type Hub struct {
//TokenID: A token id allowing the cluster to connect back to the hub
TokenID string `json:"tokenID"`
//TokenSecret: A token secret allowing the cluster to connect back to the hub
TokenSecret string `json:"tokenSecret"`
}

func newOptions(clusteradmFlags *genericclioptionsclusteradm.ClusteradmFlags, streams genericclioptions.IOStreams) *Options {
return &Options{
ClusteradmFlags: clusteradmFlags,
}
}

0 comments on commit bd474f3

Please sign in to comment.