Skip to content

Commit

Permalink
serial: enhance MCP update logs using identifier
Browse files Browse the repository at this point in the history
When GinkgoHelper() was present (f56658a)
It was hard to follow the logs and understand which step failed while
waiting for MCP update. Adding unique identifiers to each call helps
track the start and end of each call and enhanced tracking down the root
cause. The default identifier is `time.Now()` as a string.
Additionally report config values and expectations of the call.

Signed-off-by: Shereen Haj <shajmakh@redhat.com>
  • Loading branch information
shajmakh committed Feb 19, 2025
1 parent eca7687 commit 1048379
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 6 deletions.
19 changes: 15 additions & 4 deletions test/e2e/serial/tests/tolerations.go
Original file line number Diff line number Diff line change
Expand Up @@ -841,11 +841,18 @@ func sriovToleration() corev1.Toleration {
}

func waitForMcpUpdate(cli client.Client, ctx context.Context, mcpsInfo []mcpInfo, updateType MCPUpdateType) {
waitForMcpUpdateWithID(cli, ctx, mcpsInfo, updateType, time.Now().String())
}

func waitForMcpUpdateWithID(cli client.Client, ctx context.Context, mcpsInfo []mcpInfo, updateType MCPUpdateType, id string) {
klog.InfoS("waitForMcpUpdate START", "ID", id)
defer klog.InfoS("waitForMcpUpdate END", "ID", id)

mcps := make([]*machineconfigv1.MachineConfigPool, 0, len(mcpsInfo))
for _, info := range mcpsInfo {
mcps = append(mcps, info.mcpObj)
}
Expect(deploy.WaitForMCPsCondition(cli, ctx, mcps, machineconfigv1.MachineConfigPoolUpdated)).To(Succeed())
Expect(deploy.WaitForMCPsCondition(cli, ctx, mcps, machineconfigv1.MachineConfigPoolUpdated)).To(Succeed(), "failed to have the condistion updated; ID %q", id)

for _, info := range mcpsInfo {
// check the sample node is updated with new config in its annotations, both for desired and current, is a must
Expand All @@ -856,16 +863,20 @@ func waitForMcpUpdate(cli client.Client, ctx context.Context, mcpsInfo []mcpInfo
// thus associated to different MC, it could be because new nodes are joining the pool so the MC update is
// happening on those nodes
updatedConfig := updatedMcp.Status.Configuration.Name
if updateType == MachineConfig {
Expect(updatedConfig).ToNot(Equal(info.initialConfig))
}
initialConfig := info.initialConfig
expectedUpdate := (updateType == MachineConfig)
klog.InfoS("config values", "old", initialConfig, "new", updatedConfig, "expectedConfigUpdate", expectedUpdate)

if expectedUpdate {
Expect(updatedConfig).ToNot(Equal(initialConfig), "waitForMcpUpdate ID %s: config was not updated", id)
}
// MachineConfig update type will also update the node currentConfig so check that anyway
klog.Info("verify mcp config is updated by ensuring the sample node has updated MC")
ok, err := verifyUpdatedMCOnNodes(cli, ctx, info.sampleNode, updatedConfig)
Expect(err).ToNot(HaveOccurred())
Expect(ok).To(BeTrue())
}

}

func verifyUpdatedMCOnNodes(cli client.Client, ctx context.Context, node corev1.Node, desired string) (bool, error) {
Expand Down
4 changes: 2 additions & 2 deletions test/internal/deploy/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,9 @@ func WaitForMCPsCondition(cli client.Client, ctx context.Context, mcps []*machin
defer GinkgoRecover()
ts := time.Now()
err := wait.With(cli).Interval(interval).Timeout(timeout).ForMachineConfigPoolCondition(ctx, mcp, condition)
klog.Infof("MCP %q condition=%s err=%v after %v", mcp.Name, condition, err, time.Since(ts))
klog.InfoS("wait.ForMachineConfigPoolCondition result", "MCP", mcp.Name, "condition", condition, "err", err, "after", time.Since(ts))
return err
})
}
return eg.Wait()
return eg.Wait() // will block the flow until goroutines are finished or return on first error
}

0 comments on commit 1048379

Please sign in to comment.