Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐛 MHC: fix flaky test #11471

Merged
merged 1 commit into from
Nov 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -405,14 +405,26 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) {
)
defer cleanup2()
// Unhealthy nodes and machines but already in deletion.
// Note: deletionTimestamp gets set by deletion below which also removes the skip remediation annotation.
_, unhealthyMachinesDeleting, cleanup3 := createMachinesWithNodes(g, cluster,
count(1),
createNodeRefForMachine(true),
nodeStatus(corev1.ConditionUnknown),
machineLabels(mhc.Spec.Selector.MatchLabels),
machineDeleting(),
machineFinalizers("test.cluster.io/delete-protection"),
chrischdi marked this conversation as resolved.
Show resolved Hide resolved
machineAnnotations(map[string]string{clusterv1.MachineSkipRemediationAnnotation: ""}),
)
defer cleanup3()
// Mark machines for deletion and drop skip remediation annotation
// Note: without the skip remediation annotation the MHC controller might already reconcile the condition leading to a flaky test.
for _, m := range unhealthyMachinesDeleting {
g.Expect(env.Delete(ctx, m)).To(Succeed())
g.Expect(env.Get(ctx, client.ObjectKeyFromObject(m), m)).To(Succeed())
deletingMachinePatchHelper, err := patch.NewHelper(m, env.GetClient())
g.Expect(err).ToNot(HaveOccurred())
m.Annotations = map[string]string{}
g.Expect(deletingMachinePatchHelper.Patch(ctx, m)).To(Succeed())
}
machines = append(append(machines, unhealthyMachines...), unhealthyMachinesDeleting...)
targetMachines := make([]string, len(machines))
for i, m := range machines {
Expand Down Expand Up @@ -2456,11 +2468,11 @@ type machinesWithNodes struct {
nodeStatus corev1.ConditionStatus
createNodeRefForMachine bool
firstMachineAsControlPlane bool
annotations map[string]string
labels map[string]string
failureReason string
failureMessage string
finalizers []string
deleted bool
}

type machineWithNodesOption func(m *machinesWithNodes)
Expand Down Expand Up @@ -2507,10 +2519,15 @@ func machineFailureMessage(s string) machineWithNodesOption {
}
}

func machineDeleting() machineWithNodesOption {
func machineAnnotations(a map[string]string) machineWithNodesOption {
return func(m *machinesWithNodes) {
m.finalizers = append(m.finalizers, "test.cluster.io/deleting")
m.deleted = true
m.annotations = a
}
}

func machineFinalizers(f ...string) machineWithNodesOption {
return func(m *machinesWithNodes) {
m.finalizers = append(m.finalizers, f...)
}
}

Expand Down Expand Up @@ -2556,15 +2573,12 @@ func createMachinesWithNodes(
if len(o.finalizers) > 0 {
machine.Finalizers = o.finalizers
}
if o.annotations != nil {
machine.ObjectMeta.Annotations = o.annotations
}
g.Expect(env.Create(ctx, machine)).To(Succeed())
fmt.Printf("machine created: %s\n", machine.GetName())

// Set deletiontimestamp before updating status to ensure its not reconciled
// without having the deletionTimestamp set.
if o.deleted {
g.Expect(env.Delete(ctx, machine)).To(Succeed())
}

// Before moving on we want to ensure that the machine has a valid
// status. That is, LastUpdated should not be nil.
g.Eventually(func() *metav1.Time {
Expand Down
15 changes: 2 additions & 13 deletions internal/controllers/machinehealthcheck/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,10 @@ import (

. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/api/v1beta1/index"
"sigs.k8s.io/cluster-api/controllers/clustercache"
"sigs.k8s.io/cluster-api/controllers/remote"
Expand All @@ -48,17 +44,10 @@ const (
)

var (
env *envtest.Environment
ctx = ctrl.SetupSignalHandler()
fakeScheme = runtime.NewScheme()
env *envtest.Environment
ctx = ctrl.SetupSignalHandler()
)

func init() {
_ = clientgoscheme.AddToScheme(fakeScheme)
_ = clusterv1.AddToScheme(fakeScheme)
_ = apiextensionsv1.AddToScheme(fakeScheme)
}

func TestMain(m *testing.M) {
setupIndexes := func(ctx context.Context, mgr ctrl.Manager) {
if err := index.AddDefaultIndexes(ctx, mgr); err != nil {
Expand Down