diff --git a/client/daemon/daemon.go b/client/daemon/daemon.go index 704652fe31a..2841c985953 100644 --- a/client/daemon/daemon.go +++ b/client/daemon/daemon.go @@ -128,7 +128,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) { } host := &schedulerv1.PeerHost{ - Id: idgen.HostIDV2(opt.Host.AdvertiseIP.String(), opt.Host.Hostname), + Id: idgen.HostIDV2(opt.Host.AdvertiseIP.String(), opt.Host.Hostname, opt.Scheduler.Manager.SeedPeer.Enable), Ip: opt.Host.AdvertiseIP.String(), RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start), DownPort: 0, diff --git a/manager/job/sync_peers.go b/manager/job/sync_peers.go index 5f4fb494a7e..e99f56404dc 100644 --- a/manager/job/sync_peers.go +++ b/manager/job/sync_peers.go @@ -33,6 +33,7 @@ import ( "d7y.io/dragonfly/v2/manager/config" "d7y.io/dragonfly/v2/manager/models" "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/types" resource "d7y.io/dragonfly/v2/scheduler/resource/standard" ) @@ -196,7 +197,11 @@ func (s *syncPeers) mergePeers(ctx context.Context, scheduler models.Scheduler, // If the peer exists in the sync peer results, update the peer data in the database with // the sync peer results and delete the sync peer from the sync peers map. - id := idgen.HostIDV2(peer.IP, peer.Hostname) + isSeedPeer := false + if types.ParseHostType(peer.Type) != types.HostTypeNormal { + isSeedPeer = true + } + id := idgen.HostIDV2(peer.IP, peer.Hostname, isSeedPeer) if syncPeer, ok := syncPeers[id]; ok { if err := s.db.WithContext(ctx).First(&models.Peer{}, peer.ID).Updates(models.Peer{ Type: syncPeer.Type.Name(), diff --git a/pkg/idgen/host_id.go b/pkg/idgen/host_id.go index 0c0861fe086..4a2eb389cc7 100644 --- a/pkg/idgen/host_id.go +++ b/pkg/idgen/host_id.go @@ -18,8 +18,6 @@ package idgen import ( "fmt" - - "d7y.io/dragonfly/v2/pkg/digest" ) // HostIDV1 generates v1 version of host id. @@ -28,6 +26,9 @@ func HostIDV1(hostname string, port int32) string { } // HostIDV2 generates v2 version of host id. -func HostIDV2(ip, hostname string) string { - return digest.SHA256FromStrings(ip, hostname) +func HostIDV2(ip, hostname string, isSeedPeer bool) string { + if isSeedPeer { + return fmt.Sprintf("%s-%s-seed", ip, hostname) + } + return fmt.Sprintf("%s-%s", ip, hostname) } diff --git a/pkg/idgen/host_id_test.go b/pkg/idgen/host_id_test.go index af500904923..2f7a6f10cbe 100644 --- a/pkg/idgen/host_id_test.go +++ b/pkg/idgen/host_id_test.go @@ -67,43 +67,57 @@ func TestHostIDV1(t *testing.T) { func TestHostIDV2(t *testing.T) { tests := []struct { - name string - ip string - hostname string - expect func(t *testing.T, d string) + name string + ip string + hostname string + isSeedPeer bool + expect func(t *testing.T, d string) }{ { - name: "generate HostID", - ip: "127.0.0.1", - hostname: "foo", + name: "generate HostID for peer", + ip: "127.0.0.1", + hostname: "foo", + isSeedPeer: false, expect: func(t *testing.T, d string) { assert := assert.New(t) - assert.Equal(d, "52727e8408e0ee1f999086f241ec43d5b3dbda666f1a06ef1fcbe75b4e90fa17") + assert.Equal(d, "127.0.0.1-foo") }, }, { - name: "generate HostID with empty ip", - ip: "", - hostname: "foo", + name: "generate HostID for seed peer", + ip: "127.0.0.1", + hostname: "foo", + isSeedPeer: true, expect: func(t *testing.T, d string) { assert := assert.New(t) - assert.Equal(d, "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae") + assert.Equal(d, "127.0.0.1-foo-seed") }, }, { - name: "generate HostID with empty host", - ip: "127.0.0.1", - hostname: "", + name: "generate HostID with empty ip for seed peer", + ip: "", + hostname: "foo", + isSeedPeer: true, + expect: func(t *testing.T, d string) { + assert := assert.New(t) + assert.Equal(d, "-foo-seed") + }, + }, + { + name: "generate HostID with empty host for seed peer", + ip: "127.0.0.1", + hostname: "", + isSeedPeer: true, expect: func(t *testing.T, d string) { assert := assert.New(t) - assert.Equal(d, "12ca17b49af2289436f303e0166030a21e525d266e209267433801a8fd4071a0") + assert.Equal(d, "127.0.0.1--seed") }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - tc.expect(t, HostIDV2(tc.ip, tc.hostname)) + tc.expect(t, HostIDV2(tc.ip, tc.hostname, tc.isSeedPeer)) }) } } diff --git a/scheduler/resource/standard/host.go b/scheduler/resource/standard/host.go index a17746106a5..515329f50ac 100644 --- a/scheduler/resource/standard/host.go +++ b/scheduler/resource/standard/host.go @@ -451,3 +451,7 @@ func (h *Host) LeavePeers() { func (h *Host) FreeUploadCount() int32 { return h.ConcurrentUploadLimit.Load() - h.ConcurrentUploadCount.Load() } + +func (h *Host) IsSeedPeer() bool { + return h.Type == types.HostTypeSuperSeed || h.Type == types.HostTypeStrongSeed || h.Type == types.HostTypeWeakSeed +} diff --git a/scheduler/resource/standard/host_test.go b/scheduler/resource/standard/host_test.go index e9b5810ec1f..d2edc49c2c2 100644 --- a/scheduler/resource/standard/host_test.go +++ b/scheduler/resource/standard/host_test.go @@ -132,8 +132,8 @@ var ( mockAnnounceInterval = 5 * time.Minute - mockHostID = idgen.HostIDV2("127.0.0.1", "foo") - mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar") + mockHostID = idgen.HostIDV2("127.0.0.1", "foo", false) + mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar", true) mockHostLocation = "baz" mockHostIDC = "bas" ) diff --git a/scheduler/resource/standard/seed_peer_client.go b/scheduler/resource/standard/seed_peer_client.go index 0d53f98fd14..ee5d662d234 100644 --- a/scheduler/resource/standard/seed_peer_client.go +++ b/scheduler/resource/standard/seed_peer_client.go @@ -21,7 +21,7 @@ package standard import ( "context" "fmt" - reflect "reflect" + "reflect" "github.com/hashicorp/go-multierror" "google.golang.org/grpc" @@ -156,7 +156,7 @@ func (sc *seedPeerClient) updateSeedPeersForHostManager(seedPeers []*managerv2.S concurrentUploadLimit = int32(config.LoadLimit) } - id := idgen.HostIDV2(seedPeer.Ip, seedPeer.Hostname) + id := idgen.HostIDV2(seedPeer.Ip, seedPeer.Hostname, true) seedPeerHost, loaded := sc.hostManager.Load(id) if !loaded { options := []HostOption{WithNetwork(Network{ diff --git a/scheduler/scheduling/evaluator/evaluator_base_test.go b/scheduler/scheduling/evaluator/evaluator_base_test.go index 0fc7ab0a51f..7d7264d74b7 100644 --- a/scheduler/scheduling/evaluator/evaluator_base_test.go +++ b/scheduler/scheduling/evaluator/evaluator_base_test.go @@ -141,8 +141,8 @@ var ( mockTaskFilteredQueryParams = []string{"bar"} mockTaskHeader = map[string]string{"content-length": "100"} mockTaskPieceLength int32 = 2048 - mockHostID = idgen.HostIDV2("127.0.0.1", "foo") - mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar") + mockHostID = idgen.HostIDV2("127.0.0.1", "foo", false) + mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar", true) mockHostLocation = "bas" mockHostIDC = "baz" mockPeerID = idgen.PeerIDV2() diff --git a/scheduler/scheduling/scheduling_test.go b/scheduler/scheduling/scheduling_test.go index 140add6f187..01668709bb0 100644 --- a/scheduler/scheduling/scheduling_test.go +++ b/scheduler/scheduling/scheduling_test.go @@ -170,8 +170,8 @@ var ( mockTaskFilteredQueryParams = []string{"bar"} mockTaskHeader = map[string]string{"content-length": "100"} mockTaskPieceLength int32 = 2048 - mockHostID = idgen.HostIDV2("127.0.0.1", "foo") - mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar") + mockHostID = idgen.HostIDV2("127.0.0.1", "foo", false) + mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar", true) mockHostLocation = "baz" mockHostIDC = "bas" mockPeerID = idgen.PeerIDV2() @@ -1040,7 +1040,7 @@ func TestScheduling_FindCandidateParents(t *testing.T) { var mockPeers []*resource.Peer for i := 0; i < 11; i++ { mockHost := resource.NewHost( - idgen.HostIDV2("127.0.0.1", uuid.New().String()), mockRawHost.IP, mockRawHost.Hostname, + idgen.HostIDV2("127.0.0.1", uuid.New().String(), false), mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) peer := resource.NewPeer(idgen.PeerIDV1(fmt.Sprintf("127.0.0.%d", i)), mockTask, mockHost) mockPeers = append(mockPeers, peer) @@ -1357,7 +1357,7 @@ func TestScheduling_FindParentAndCandidateParents(t *testing.T) { var mockPeers []*resource.Peer for i := 0; i < 11; i++ { mockHost := resource.NewHost( - idgen.HostIDV2("127.0.0.1", uuid.New().String()), mockRawHost.IP, mockRawHost.Hostname, + idgen.HostIDV2("127.0.0.1", uuid.New().String(), false), mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) peer := resource.NewPeer(idgen.PeerIDV1(fmt.Sprintf("127.0.0.%d", i)), mockTask, mockHost) mockPeers = append(mockPeers, peer) @@ -1618,7 +1618,7 @@ func TestScheduling_FindSuccessParent(t *testing.T) { var mockPeers []*resource.Peer for i := 0; i < 11; i++ { mockHost := resource.NewHost( - idgen.HostIDV2("127.0.0.1", uuid.New().String()), mockRawHost.IP, mockRawHost.Hostname, + idgen.HostIDV2("127.0.0.1", uuid.New().String(), false), mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type) peer := resource.NewPeer(idgen.PeerIDV1(fmt.Sprintf("127.0.0.%d", i)), mockTask, mockHost) mockPeers = append(mockPeers, peer) diff --git a/scheduler/service/service_v1_test.go b/scheduler/service/service_v1_test.go index 85a4651639a..3d3884aa63a 100644 --- a/scheduler/service/service_v1_test.go +++ b/scheduler/service/service_v1_test.go @@ -192,8 +192,8 @@ var ( mockTaskFilteredQueryParams = []string{"bar"} mockTaskHeader = map[string]string{"Content-Length": "100", "Range": "bytes=0-99"} mockTaskPieceLength int32 = 2048 - mockHostID = idgen.HostIDV2("127.0.0.1", "foo") - mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar") + mockHostID = idgen.HostIDV2("127.0.0.1", "foo", false) + mockSeedHostID = idgen.HostIDV2("127.0.0.1", "bar", true) mockHostLocation = "bas" mockHostIDC = "baz" mockPeerID = idgen.PeerIDV2() @@ -2559,7 +2559,7 @@ func TestServiceV1_LeaveHost(t *testing.T) { tc.mock(host, mockPeer, hostManager, scheduling.EXPECT(), res.EXPECT(), hostManager.EXPECT()) tc.expect(t, mockPeer, svc.LeaveHost(context.Background(), &schedulerv1.LeaveHostRequest{ - Id: idgen.HostIDV2(host.IP, host.Hostname), + Id: idgen.HostIDV2(host.IP, host.Hostname, true), })) }) } diff --git a/scheduler/service/service_v2.go b/scheduler/service/service_v2.go index aa086fa1ea3..e96bdddcc02 100644 --- a/scheduler/service/service_v2.go +++ b/scheduler/service/service_v2.go @@ -789,6 +789,7 @@ func (v *V2) DeleteHost(ctx context.Context, req *schedulerv2.DeleteHostRequest) // Leave peers in host. host.LeavePeers() + v.resource.HostManager().Delete(req.GetHostId()) return nil } diff --git a/scheduler/service/service_v2_test.go b/scheduler/service/service_v2_test.go index 337b1976770..82d9235dc68 100644 --- a/scheduler/service/service_v2_test.go +++ b/scheduler/service/service_v2_test.go @@ -1011,6 +1011,8 @@ func TestServiceV2_DeleteHost(t *testing.T) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(host, true).Times(1), + mr.HostManager().Return(hostManager).Times(1), + mh.Delete(gomock.Any()).Times(1), ) }, expect: func(t *testing.T, peer *resource.Peer, err error) { @@ -1026,6 +1028,8 @@ func TestServiceV2_DeleteHost(t *testing.T) { gomock.InOrder( mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(host, true).Times(1), + mr.HostManager().Return(hostManager).Times(1), + mh.Delete(gomock.Any()).Times(1), ) }, expect: func(t *testing.T, peer *resource.Peer, err error) { diff --git a/test/e2e/v2/containerd_test.go b/test/e2e/v2/containerd_test.go index c6a623e68e5..08e1e4d30ba 100644 --- a/test/e2e/v2/containerd_test.go +++ b/test/e2e/v2/containerd_test.go @@ -60,11 +60,11 @@ var _ = Describe("Containerd with CRI support", func() { }, } - clientPods, err := util.ClientExecAll() + clientPod, err := util.ClientExec() fmt.Println(err) Expect(err).NotTo(HaveOccurred()) for _, taskMetadata := range taskMetadatas { - sha256sum, err := util.CalculateSha256ByTaskID(clientPods, taskMetadata.ID) + sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, taskMetadata.ID) Expect(err).NotTo(HaveOccurred()) Expect(taskMetadata.Sha256).To(Equal(sha256sum)) } @@ -115,12 +115,12 @@ var _ = Describe("Containerd with CRI support", func() { }, } - clientPods, err := util.ClientExecAll() + clientPod, err := util.ClientExec() fmt.Println(err) Expect(err).NotTo(HaveOccurred()) for _, taskMetadata := range taskMetadatas { - sha256sum, err := util.CalculateSha256ByTaskID(clientPods, taskMetadata.ID) + sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, taskMetadata.ID) Expect(err).NotTo(HaveOccurred()) Expect(taskMetadata.Sha256).To(Equal(sha256sum)) } @@ -179,12 +179,12 @@ var _ = Describe("Containerd with CRI support", func() { }, } - clientPods, err := util.ClientExecAll() + clientPod, err := util.ClientExec() fmt.Println(err) Expect(err).NotTo(HaveOccurred()) for _, taskMetadata := range taskMetadatas { - sha256sum, err := util.CalculateSha256ByTaskID(clientPods, taskMetadata.ID) + sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, taskMetadata.ID) Expect(err).NotTo(HaveOccurred()) Expect(taskMetadata.Sha256).To(Equal(sha256sum)) } @@ -231,12 +231,12 @@ var _ = Describe("Containerd with CRI support", func() { }, } - clientPods, err := util.ClientExecAll() + clientPod, err := util.ClientExec() fmt.Println(err) Expect(err).NotTo(HaveOccurred()) for _, taskMetadata := range taskMetadatas { - sha256sum, err := util.CalculateSha256ByTaskID(clientPods, taskMetadata.ID) + sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, taskMetadata.ID) Expect(err).NotTo(HaveOccurred()) Expect(taskMetadata.Sha256).To(Equal(sha256sum)) } diff --git a/test/e2e/v2/e2e_test.go b/test/e2e/v2/e2e_test.go index 23e08eb59cf..67f8b422846 100644 --- a/test/e2e/v2/e2e_test.go +++ b/test/e2e/v2/e2e_test.go @@ -21,6 +21,7 @@ import ( "strconv" "strings" "testing" + "time" . "github.com/onsi/ginkgo/v2" //nolint . "github.com/onsi/gomega" //nolint @@ -86,6 +87,8 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) gitCommit := strings.Fields(string(rawGitCommit))[0] fmt.Printf("git commit: %s\n", gitCommit) + // Wait for peers to start and announce. + time.Sleep(5 * time.Minute) }) // TestE2E is the root of e2e test function diff --git a/test/e2e/v2/leave_host_test.go b/test/e2e/v2/leave_host_test.go index 31ada58ca24..4f7b3850761 100644 --- a/test/e2e/v2/leave_host_test.go +++ b/test/e2e/v2/leave_host_test.go @@ -19,7 +19,6 @@ package e2e import ( "context" "fmt" - "strings" "time" . "github.com/onsi/ginkgo/v2" //nolint @@ -28,66 +27,97 @@ import ( "google.golang.org/grpc/credentials/insecure" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" + "d7y.io/dragonfly/v2/pkg/types" "d7y.io/dragonfly/v2/test/e2e/v2/util" ) var _ = Describe("Clients Leaving", func() { - Context("normally", func() { + Context("graceful exit", func() { It("number of hosts should be ok", Label("host", "leave"), func() { - grpcCredentials := insecure.NewCredentials() - schedulerClient, err := schedulerclient.GetV2ByAddr(context.Background(), ":8002", grpc.WithTransportCredentials(grpcCredentials)) + // Create scheduler GRPC client. + schedulerClient, err := schedulerclient.GetV2ByAddr(context.Background(), ":8002", grpc.WithTransportCredentials(insecure.NewCredentials())) Expect(err).NotTo(HaveOccurred()) - hostCount := util.Servers[util.SeedClientServerName].Replicas + util.Servers[util.ClientServerName].Replicas - time.Sleep(10 * time.Minute) - Expect(getHostCountFromScheduler(schedulerClient)).To(Equal(hostCount)) + // Get host count. + hostCount := util.Servers[util.ClientServerName].Replicas + Expect(calculateHostCountFromScheduler(schedulerClient)).To(Equal(hostCount)) + // Get client pod name in master node. podName, err := util.GetClientPodNameInMaster() Expect(err).NotTo(HaveOccurred()) - out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "delete", "pod", podName).CombinedOutput() + // Taint master node. + out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule").CombinedOutput() fmt.Println(string(out)) Expect(err).NotTo(HaveOccurred()) - // wait fot the client to leave gracefully + // Delete client pod in master, client will leave gracefully with cleanup. + out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "delete", "pod", podName, "--grace-period=30").CombinedOutput() + fmt.Println(string(out)) + Expect(err).NotTo(HaveOccurred()) + + // Wait fot the client to leave gracefully. + time.Sleep(1 * time.Minute) + Expect(calculateHostCountFromScheduler(schedulerClient)).To(Equal(hostCount - 1)) + + // Remove taint in master node. + out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule-").CombinedOutput() + fmt.Println(string(out)) + Expect(err).NotTo(HaveOccurred()) + + // Wait for the client to start again. time.Sleep(1 * time.Minute) - Expect(getHostCountFromScheduler(schedulerClient)).To(Equal(hostCount)) }) }) - Context("abnormally", func() { + Context("force delete", func() { It("number of hosts should be ok", Label("host", "leave"), func() { - grpcCredentials := insecure.NewCredentials() - schedulerClient, err := schedulerclient.GetV2ByAddr(context.Background(), ":8002", grpc.WithTransportCredentials(grpcCredentials)) + // Create scheduler GRPC client. + schedulerClient, err := schedulerclient.GetV2ByAddr(context.Background(), ":8002", grpc.WithTransportCredentials(insecure.NewCredentials())) Expect(err).NotTo(HaveOccurred()) - hostCount := util.Servers[util.SeedClientServerName].Replicas + util.Servers[util.ClientServerName].Replicas - Expect(getHostCountFromScheduler(schedulerClient)).To(Equal(hostCount)) + // Get host count. + hostCount := util.Servers[util.ClientServerName].Replicas + Expect(calculateHostCountFromScheduler(schedulerClient)).To(Equal(hostCount)) + // Get client pod name in master node. podName, err := util.GetClientPodNameInMaster() Expect(err).NotTo(HaveOccurred()) - out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "delete", "pod", podName, "--force", "--grace-period=0").CombinedOutput() + // Taint master node. + out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule").CombinedOutput() fmt.Println(string(out)) Expect(err).NotTo(HaveOccurred()) - // wait for host gc - time.Sleep(6 * time.Minute) - Expect(getHostCountFromScheduler(schedulerClient)).To(Equal(hostCount)) + // Force delete client pod in master, client will leave without cleanup. + out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "delete", "pod", podName, "--force", "--grace-period=0").CombinedOutput() + fmt.Println(string(out)) + Expect(err).NotTo(HaveOccurred()) + + // Wait for host gc. + time.Sleep(2 * time.Minute) + Expect(calculateHostCountFromScheduler(schedulerClient)).To(Equal(hostCount - 1)) + + // Remove taint in master node. + out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule-").CombinedOutput() + fmt.Println(string(out)) + Expect(err).NotTo(HaveOccurred()) + + // Wait for the client to start again. + time.Sleep(1 * time.Minute) }) }) }) -func getHostCountFromScheduler(schedulerClient schedulerclient.V2) (hostCount int) { +func calculateHostCountFromScheduler(schedulerClient schedulerclient.V2) (hostCount int) { response, err := schedulerClient.ListHosts(context.Background(), "") fmt.Println(response, err) Expect(err).NotTo(HaveOccurred()) hosts := response.Hosts for _, host := range hosts { - // HostID: "10.244.0.13-dragonfly-seed-client-0-seed" - // PeerHostID: "3dba4916d8271d6b71bb20e95a0b5494c9a941ab7ef3567f805abca8614dc128" - if strings.Contains(host.Id, "-") { + hostType := types.HostType(host.Type) + if hostType != types.HostTypeSuperSeed && hostType != types.HostTypeStrongSeed && hostType != types.HostTypeWeakSeed { hostCount++ } } diff --git a/test/e2e/v2/util/exec.go b/test/e2e/v2/util/exec.go index f76b0f85e48..37c67b3bbb0 100644 --- a/test/e2e/v2/util/exec.go +++ b/test/e2e/v2/util/exec.go @@ -24,7 +24,7 @@ import ( ) const ( - kindDockerContainer = "kind-control-plane" + kindDockerContainer = "kind-worker" ) func DockerCommand(arg ...string) *exec.Cmd { @@ -110,18 +110,6 @@ func ClientExec() (*PodExec, error) { return NewPodExec(DragonflyNamespace, podName, "client"), nil } -func ClientExecAll() ([]*PodExec, error) { - podInWorker, err := GetClientPodNameInWorker() - if err != nil { - return nil, err - } - podInMaster, err := GetClientPodNameInMaster() - if err != nil { - return nil, err - } - return []*PodExec{NewPodExec(DragonflyNamespace, podInWorker, "client"), NewPodExec(DragonflyNamespace, podInMaster, "client")}, nil -} - func SeedClientExec(n int) (*PodExec, error) { podName, err := GetSeedClientPodName(n) if err != nil { diff --git a/test/testdata/charts/config-v2.yaml b/test/testdata/charts/config-v2.yaml index 8b83078ef06..4b54a62492e 100644 --- a/test/testdata/charts/config-v2.yaml +++ b/test/testdata/charts/config-v2.yaml @@ -62,6 +62,9 @@ scheduler: enableHost: true config: verbose: true + scheduler: + gc: + hostGCInterval: 2m seedClient: enable: true