-
Notifications
You must be signed in to change notification settings - Fork 1.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
🌱 Preserve finalizers during MS/Machine reconciliation #10694
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -25,6 +25,7 @@ import ( | |
. "github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/gomega" | ||
corev1 "k8s.io/api/core/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/utils/ptr" | ||
|
||
"sigs.k8s.io/cluster-api/test/framework" | ||
|
@@ -123,6 +124,17 @@ func MachineDeploymentScaleSpec(ctx context.Context, inputGetter func() MachineD | |
Replicas: 1, | ||
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), | ||
}) | ||
|
||
By("Deleting the MachineDeployment with foreground deletion") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just added this test to verify that foreground deletion works at the moment. Long-term we would like to have the same behavior as with the Cluster (that MD is deleted "in the foreground" independent of if foreground or background deletion is used) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we have an issue to discuss "Implement forced foreground deletion"? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would create one. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. xref: #10710 |
||
foreground := metav1.DeletePropagationForeground | ||
framework.DeleteAndWaitMachineDeployment(ctx, framework.DeleteAndWaitMachineDeploymentInput{ | ||
ClusterProxy: input.BootstrapClusterProxy, | ||
Cluster: clusterResources.Cluster, | ||
MachineDeployment: clusterResources.MachineDeployments[0], | ||
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), | ||
DeletePropagationPolicy: &foreground, | ||
}) | ||
|
||
By("PASSED!") | ||
}) | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -25,6 +25,7 @@ import ( | |
. "github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/gomega" | ||
"github.com/pkg/errors" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
"k8s.io/apimachinery/pkg/util/sets" | ||
|
@@ -448,6 +449,46 @@ func machineDeploymentOptions(deployment clusterv1.MachineDeployment) []client.L | |
} | ||
} | ||
|
||
// DeleteAndWaitMachineDeploymentInput is the input for DeleteAndWaitMachineDeployment. | ||
type DeleteAndWaitMachineDeploymentInput struct { | ||
ClusterProxy ClusterProxy | ||
Cluster *clusterv1.Cluster | ||
MachineDeployment *clusterv1.MachineDeployment | ||
WaitForMachineDeployments []interface{} | ||
DeletePropagationPolicy *metav1.DeletionPropagation | ||
} | ||
|
||
// DeleteAndWaitMachineDeployment deletes MachineDeployment and waits until it is gone. | ||
func DeleteAndWaitMachineDeployment(ctx context.Context, input DeleteAndWaitMachineDeploymentInput) { | ||
Expect(ctx).NotTo(BeNil(), "ctx is required for DeleteAndWaitMachineDeployment") | ||
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling DeleteAndWaitMachineDeployment") | ||
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DeleteAndWaitMachineDeployment") | ||
Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling DeleteAndWaitMachineDeployment") | ||
|
||
log.Logf("Deleting MachineDeployment %s", klog.KObj(input.MachineDeployment)) | ||
Expect(input.ClusterProxy.GetClient().Delete(ctx, input.MachineDeployment, &client.DeleteOptions{PropagationPolicy: input.DeletePropagationPolicy})).To(Succeed()) | ||
|
||
log.Logf("Waiting for MD to be deleted") | ||
Eventually(func(g Gomega) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. q: should we also check that all the machines are gone? (so we check one layer more of the hierarchy) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done |
||
selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector) | ||
g.Expect(err).ToNot(HaveOccurred()) | ||
|
||
machines := &clusterv1.MachineList{} | ||
err = input.ClusterProxy.GetClient().List(ctx, machines, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)) | ||
g.Expect(err).ToNot(HaveOccurred()) | ||
g.Expect(machines.Items).To(BeEmpty()) | ||
|
||
ms := &clusterv1.MachineSetList{} | ||
err = input.ClusterProxy.GetClient().List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)) | ||
g.Expect(err).ToNot(HaveOccurred()) | ||
g.Expect(ms.Items).To(BeEmpty()) | ||
|
||
md := &clusterv1.MachineDeployment{} | ||
err = input.ClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(input.MachineDeployment), md) | ||
g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) | ||
}, input.WaitForMachineDeployments...).Should(Succeed(), "Timed out waiting for Machine Deployment deletion") | ||
} | ||
|
||
// ScaleAndWaitMachineDeploymentInput is the input for ScaleAndWaitMachineDeployment. | ||
type ScaleAndWaitMachineDeploymentInput struct { | ||
ClusterProxy ClusterProxy | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
in l.234
I think this code is basically unreachable. Because we never create a MachineSet after the deletionTimestamp is already set. And the finalizer and the deletionTimestamp seem to be set by the kube-apiserver through the delete call at the same time:
Technically someone could send the foregroundDeletion finalizer already earlier, but I think in any case the finalizer would be propagated down (see e2e test coverage)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
wouldn't this bit of code ensure that when you are removing a MD with foreground (via client), then foreground would be also honoured between the owned MS deletion and their child Machines?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think it doesn't change that behavior. I did the following test with this PR:
Then I:
Please note that computeDesiredMachineSet is not executed anymore after the deletionTimestamp has been set on MachineDeployment (because we run reconcileDelete instead)
So I think this is: