From bf8c4e328029431aafbee1651f9204d006e65b55 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 18 May 2020 16:11:26 -0700 Subject: [PATCH 1/4] Add --wait=all flag to soft start --- test/integration/functional_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 60b30113772f..02b6e6690e3a 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -83,7 +83,7 @@ func TestFunctional(t *testing.T) { {"StartWithProxy", validateStartWithProxy}, // Set everything else up for success {"SoftStart", validateSoftStart}, // do a soft start. ensure config didnt change. {"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start" - {"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up + {"KubectlGetPods", validateKubectlGetPods}, // Make sure kubectl is returning pods {"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy {"MinikubeKubectlCmd", validateMinikubeKubectl}, // Make sure `minikube kubectl` works } @@ -229,7 +229,7 @@ func validateSoftStart(ctx context.Context, t *testing.T, profile string) { t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.KubernetesConfig.NodePort) } - softStartArgs := []string{"start", "-p", profile} + softStartArgs := []string{"start", "-p", profile, "--wait=all"} c := exec.CommandContext(ctx, Target(), softStartArgs...) rr, err := Run(t, c) if err != nil { From cc40795719d98479237ad0af2d25379af21290fe Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 18 May 2020 16:51:47 -0700 Subject: [PATCH 2/4] Move status command to TestFunctional/serial I noticed that TestComponentHealth/parallel/ComponentHealth was failing with this error: ``` Error apiserver status: https://172.17.0.3:8441/healthz returned error 500: [-]etcd failed: reason withheld ``` but by the time post mortem logs were printed the etcd container was up and running. I think this test occasionally fails because apiserver healthz is not yet returning a 200 status when we run the test. We wait for healthz to return 200 on regular start, but not on soft start, which we run in `TestFunctional`. This PR adds a retry, which should give the apiserver time to become healthy. --- test/integration/functional_test.go | 89 ++++++++++++++++------------- 1 file changed, 50 insertions(+), 39 deletions(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 02b6e6690e3a..afb2954c1f12 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -38,6 +38,7 @@ import ( "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/util/retry" @@ -83,7 +84,7 @@ func TestFunctional(t *testing.T) { {"StartWithProxy", validateStartWithProxy}, // Set everything else up for success {"SoftStart", validateSoftStart}, // do a soft start. ensure config didnt change. {"KubeContext", validateKubeContext}, // Racy: must come immediately after "minikube start" - {"KubectlGetPods", validateKubectlGetPods}, // Make sure kubectl is returning pods + {"KubectlGetPods", validateKubectlGetPods}, // Make sure apiserver is up {"CacheCmd", validateCacheCmd}, // Caches images needed for subsequent tests because of proxy {"MinikubeKubectlCmd", validateMinikubeKubectl}, // Make sure `minikube kubectl` works } @@ -105,25 +106,25 @@ func TestFunctional(t *testing.T) { validator validateFunc }{ {"ComponentHealth", validateComponentHealth}, - {"ConfigCmd", validateConfigCmd}, - {"DashboardCmd", validateDashboardCmd}, - {"DNS", validateDNS}, - {"DryRun", validateDryRun}, - {"StatusCmd", validateStatusCmd}, - {"LogsCmd", validateLogsCmd}, - {"MountCmd", validateMountCmd}, - {"ProfileCmd", validateProfileCmd}, - {"ServiceCmd", validateServiceCmd}, - {"AddonsCmd", validateAddonsCmd}, - {"PersistentVolumeClaim", validatePersistentVolumeClaim}, - {"TunnelCmd", validateTunnelCmd}, - {"SSHCmd", validateSSHCmd}, - {"MySQL", validateMySQL}, - {"FileSync", validateFileSync}, - {"CertSync", validateCertSync}, - {"UpdateContextCmd", validateUpdateContextCmd}, - {"DockerEnv", validateDockerEnv}, - {"NodeLabels", validateNodeLabels}, + // {"ConfigCmd", validateConfigCmd}, + // {"DashboardCmd", validateDashboardCmd}, + // {"DNS", validateDNS}, + // {"DryRun", validateDryRun}, + // {"StatusCmd", validateStatusCmd}, + // {"LogsCmd", validateLogsCmd}, + // {"MountCmd", validateMountCmd}, + // {"ProfileCmd", validateProfileCmd}, + // {"ServiceCmd", validateServiceCmd}, + // {"AddonsCmd", validateAddonsCmd}, + // {"PersistentVolumeClaim", validatePersistentVolumeClaim}, + // {"TunnelCmd", validateTunnelCmd}, + // {"SSHCmd", validateSSHCmd}, + // {"MySQL", validateMySQL}, + // {"FileSync", validateFileSync}, + // {"CertSync", validateCertSync}, + // {"UpdateContextCmd", validateUpdateContextCmd}, + // {"DockerEnv", validateDockerEnv}, + // {"NodeLabels", validateNodeLabels}, } for _, tc := range tests { tc := tc @@ -229,7 +230,7 @@ func validateSoftStart(ctx context.Context, t *testing.T, profile string) { t.Errorf("expected cluster config node port before soft start to be %d but got %d", apiPortTest, beforeCfg.Config.KubernetesConfig.NodePort) } - softStartArgs := []string{"start", "-p", profile, "--wait=all"} + softStartArgs := []string{"start", "-p", profile} c := exec.CommandContext(ctx, Target(), softStartArgs...) rr, err := Run(t, c) if err != nil { @@ -293,27 +294,37 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { defer PostMortemLogs(t, profile) - rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) - if err != nil { - t.Fatalf("failed to get components. args %q: %v", rr.Command(), err) - } - cs := api.ComponentStatusList{} - d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) - if err := d.Decode(&cs); err != nil { - t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err) - } + f := func() (bool, error) { + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) + if err != nil { + t.Logf("failed to get components. args %q: %v", rr.Command(), err) + return false, nil + } + cs := api.ComponentStatusList{} + d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) + if err := d.Decode(&cs); err != nil { + t.Logf("failed to decode kubectl json output: args %q : %v", rr.Command(), err) + return false, nil + } - for _, i := range cs.Items { - status := api.ConditionFalse - for _, c := range i.Conditions { - if c.Type != api.ComponentHealthy { - continue + for _, i := range cs.Items { + status := api.ConditionFalse + for _, c := range i.Conditions { + if c.Type != api.ComponentHealthy { + continue + } + status = c.Status + } + if status != api.ConditionTrue { + t.Logf("unexpected status: %v - item: %+v", status, i) + return false, nil } - status = c.Status - } - if status != api.ConditionTrue { - t.Errorf("unexpected status: %v - item: %+v", status, i) } + return true, nil + } + + if err := wait.PollImmediate(10*time.Second, 40*time.Second, f); err != nil { + t.Fatalf("error: %v", err) } } From 755c924626e1a1108228856fc49aa0ba9e7185fe Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 18 May 2020 17:06:16 -0700 Subject: [PATCH 3/4] remove comments --- test/integration/functional_test.go | 38 ++++++++++++++--------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index afb2954c1f12..85e7386af8fe 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -106,25 +106,25 @@ func TestFunctional(t *testing.T) { validator validateFunc }{ {"ComponentHealth", validateComponentHealth}, - // {"ConfigCmd", validateConfigCmd}, - // {"DashboardCmd", validateDashboardCmd}, - // {"DNS", validateDNS}, - // {"DryRun", validateDryRun}, - // {"StatusCmd", validateStatusCmd}, - // {"LogsCmd", validateLogsCmd}, - // {"MountCmd", validateMountCmd}, - // {"ProfileCmd", validateProfileCmd}, - // {"ServiceCmd", validateServiceCmd}, - // {"AddonsCmd", validateAddonsCmd}, - // {"PersistentVolumeClaim", validatePersistentVolumeClaim}, - // {"TunnelCmd", validateTunnelCmd}, - // {"SSHCmd", validateSSHCmd}, - // {"MySQL", validateMySQL}, - // {"FileSync", validateFileSync}, - // {"CertSync", validateCertSync}, - // {"UpdateContextCmd", validateUpdateContextCmd}, - // {"DockerEnv", validateDockerEnv}, - // {"NodeLabels", validateNodeLabels}, + {"ConfigCmd", validateConfigCmd}, + {"DashboardCmd", validateDashboardCmd}, + {"DNS", validateDNS}, + {"DryRun", validateDryRun}, + {"StatusCmd", validateStatusCmd}, + {"LogsCmd", validateLogsCmd}, + {"MountCmd", validateMountCmd}, + {"ProfileCmd", validateProfileCmd}, + {"ServiceCmd", validateServiceCmd}, + {"AddonsCmd", validateAddonsCmd}, + {"PersistentVolumeClaim", validatePersistentVolumeClaim}, + {"TunnelCmd", validateTunnelCmd}, + {"SSHCmd", validateSSHCmd}, + {"MySQL", validateMySQL}, + {"FileSync", validateFileSync}, + {"CertSync", validateCertSync}, + {"UpdateContextCmd", validateUpdateContextCmd}, + {"DockerEnv", validateDockerEnv}, + {"NodeLabels", validateNodeLabels}, } for _, tc := range tests { tc := tc From 64e44ec02f9760a844389d86c235d753bec1ddce Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 19 May 2020 10:31:28 -0700 Subject: [PATCH 4/4] Respect --wait flag on soft start --- pkg/minikube/node/start.go | 7 ++--- test/integration/functional_test.go | 47 +++++++++++------------------ 2 files changed, 20 insertions(+), 34 deletions(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index e8b90a66f007..0601629b3263 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -153,11 +153,8 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { prepareNone() } - // TODO: existing cluster should wait for health #7597 - if !starter.PreExists { - if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { - return nil, errors.Wrap(err, "Wait failed") - } + if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil { + return nil, errors.Wrap(err, "Wait failed") } } else { if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil { diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 85e7386af8fe..60b30113772f 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -38,7 +38,6 @@ import ( "github.com/google/go-cmp/cmp" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/util/retry" @@ -294,37 +293,27 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { defer PostMortemLogs(t, profile) - f := func() (bool, error) { - rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) - if err != nil { - t.Logf("failed to get components. args %q: %v", rr.Command(), err) - return false, nil - } - cs := api.ComponentStatusList{} - d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) - if err := d.Decode(&cs); err != nil { - t.Logf("failed to decode kubectl json output: args %q : %v", rr.Command(), err) - return false, nil - } + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) + if err != nil { + t.Fatalf("failed to get components. args %q: %v", rr.Command(), err) + } + cs := api.ComponentStatusList{} + d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) + if err := d.Decode(&cs); err != nil { + t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err) + } - for _, i := range cs.Items { - status := api.ConditionFalse - for _, c := range i.Conditions { - if c.Type != api.ComponentHealthy { - continue - } - status = c.Status - } - if status != api.ConditionTrue { - t.Logf("unexpected status: %v - item: %+v", status, i) - return false, nil + for _, i := range cs.Items { + status := api.ConditionFalse + for _, c := range i.Conditions { + if c.Type != api.ComponentHealthy { + continue } + status = c.Status + } + if status != api.ConditionTrue { + t.Errorf("unexpected status: %v - item: %+v", status, i) } - return true, nil - } - - if err := wait.PollImmediate(10*time.Second, 40*time.Second, f); err != nil { - t.Fatalf("error: %v", err) } }