Skip to content

Commit ae2d6ae

Browse files
committed
push throttling logic and how to fix
1 parent efad894 commit ae2d6ae

File tree

1 file changed

+24
-0
lines changed

1 file changed

+24
-0
lines changed

lib/deployment/deployment.go

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,19 +73,40 @@ func ApplyManifest(kubeconfig *rest.Config, kubeclientset *kubernetes.Clientset,
7373
}
7474

7575
if _, err := dri.Create(context.Background(), unstructuredObj, metav1.CreateOptions{}); err != nil {
76+
// if creation fails, this is probably becuse it already exists or some other error
7677
if _, err := dri.Update(context.Background(), unstructuredObj, metav1.UpdateOptions{}); err != nil {
78+
// we try updating it, in case it already existed or it is updatable
79+
80+
// This sometime does not get updated. eg : In case of PVCs cause node affinity is immmutable (happens when you restart cluster after pause and we had things there)
81+
82+
// This check shouldn't happen mp if I delete ns, if i dont delete ns and do this then think later, just delete ns while infra set for now.
83+
// If not ns delete them -> we generally delete but pvc deleteion has some issues, so let's just not delete it for now and skip
7784
if unstructuredObj.GetObjectKind().GroupVersionKind().Kind == "PersistentVolumeClaim" {
7885
// Skip PVCs
7986
continue
8087
// TODO: Handle PVCs, currently on deletion of PVCs, the cluster is stuck in a loop
8188
}
89+
8290
_ = dri.Delete(context.Background(), unstructuredObj.GetName(), metav1.DeleteOptions{})
91+
92+
// here pv gets deleted but listen doesn't work or we don't get a wather ping, hence stuck in loop
93+
// hence reapplying works as 3-4 times, things get deleted and this isn't called.
94+
95+
// WATCHER CODE DOESN"T WORK RN, DOING A FIX BELOW TO BYPASS
8396
watcher, err := dri.Watch(context.Background(), metav1.ListOptions{
8497
FieldSelector: fmt.Sprintf("metadata.name=%s", unstructuredObj.GetName()),
8598
})
99+
86100
if err != nil {
87101
return err
88102
}
103+
104+
// set timout 30 sec
105+
// check if deployment is there, it should be deleted by then
106+
// if deleted, continue
107+
// else return a good error telling which resource didn't et deleted.
108+
109+
89110
defer watcher.Stop()
90111
for event := range watcher.ResultChan() {
91112
if event.Type == watch.Deleted {
@@ -119,6 +140,8 @@ func DeployCluster(kubeconfig *rest.Config, kubeclientset *kubernetes.Clientset)
119140
if err != nil {
120141
log.Println(err)
121142
}
143+
144+
// TOREAD : read if we need to acually run on one node, like what is this ?
122145
deploymentConfig.NodeAffinityValue = nodes[0].Name
123146

124147
for _, m := range clusterConfig.TemplatedManifests {
@@ -128,6 +151,7 @@ func DeployCluster(kubeconfig *rest.Config, kubeclientset *kubernetes.Clientset)
128151
if err != nil {
129152
return err
130153
}
154+
// dynamic updation in manifest
131155
if err = tmpl.Execute(manifest, deploymentConfig); err != nil {
132156
return err
133157
}

0 commit comments

Comments
 (0)