From 430ff6171038bbd082fb2b1bb5d6fc2389320cd2 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Wed, 11 Jan 2023 10:05:23 -0500 Subject: [PATCH] fix unit test --- pkg/cliplugins/workload/plugin/sync_test.go | 16 ++++++++++++++++ pkg/syncer/spec/dns/dns_process.go | 12 +++++------- pkg/syncer/spec/dns/dns_process_test.go | 16 ++++++++++++++-- pkg/syncer/syncer.go | 5 +---- 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/pkg/cliplugins/workload/plugin/sync_test.go b/pkg/cliplugins/workload/plugin/sync_test.go index bc9d0c0016b8..fd27ad1b1bf7 100644 --- a/pkg/cliplugins/workload/plugin/sync_test.go +++ b/pkg/cliplugins/workload/plugin/sync_test.go @@ -68,6 +68,14 @@ rules: - "get" - "watch" - "list" +- apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - "create" + - "list" + - "watch" - apiGroups: - "" resources: @@ -298,6 +306,14 @@ rules: - "get" - "watch" - "list" +- apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - "create" + - "list" + - "watch" - apiGroups: - "" resources: diff --git a/pkg/syncer/spec/dns/dns_process.go b/pkg/syncer/spec/dns/dns_process.go index 0390ab594e5b..7a4cc6b83fb0 100644 --- a/pkg/syncer/spec/dns/dns_process.go +++ b/pkg/syncer/spec/dns/dns_process.go @@ -21,6 +21,7 @@ import ( "sync" "github.com/kcp-dev/logicalcluster/v3" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -100,14 +101,13 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica dnsID := shared.GetDNSID(workspace, d.syncTargetUID, d.syncTargetName) logger = logger.WithValues("name", dnsID, "namespace", d.dnsNamespace) - logger.Info("checking if all dns objects exist and are up-to-date") + logger.V(4).Info("checking if all dns objects exist and are up-to-date") ctx = klog.NewContext(ctx, logger) // Try updating resources if not done already if initialized, ok := d.initialized.Load(dnsID); !ok || !initialized.(bool) { updated, err := d.lockMayUpdate(ctx, dnsID) if updated { - logger.Info("dns updated") return false, err } } @@ -120,16 +120,11 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica } if !apierrors.IsNotFound(err) { - return false, err } // No Endpoints resource was found: try to create all the DNS-related resources - if err := d.processNetworkPolicy(ctx, dnsID); err != nil { - return false, err - } if err := d.processServiceAccount(ctx, dnsID); err != nil { - logger.Info("sa not ready", "dnsID", dnsID) return false, err } if err := d.processRole(ctx, dnsID); err != nil { @@ -144,6 +139,9 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica if err := d.processService(ctx, dnsID); err != nil { return false, err } + if err := d.processNetworkPolicy(ctx, dnsID); err != nil { + return false, err + } // Since the Endpoints resource was not found, the DNS is not yet ready, // even though all the required resources have been created diff --git a/pkg/syncer/spec/dns/dns_process_test.go b/pkg/syncer/spec/dns/dns_process_test.go index f73489f19354..1f0961786394 100644 --- a/pkg/syncer/spec/dns/dns_process_test.go +++ b/pkg/syncer/spec/dns/dns_process_test.go @@ -35,6 +35,7 @@ import ( kubefake "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" + workloadv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/workload/v1alpha1" "github.com/kcp-dev/kcp/pkg/syncer/shared" ) @@ -45,6 +46,7 @@ var ( roleBindingGVR = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"} serviceGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} deploymentGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} + networkPolicyGVR = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"} ) func init() { @@ -57,6 +59,7 @@ func TestDNSProcess(t *testing.T) { clusterName := logicalcluster.Name("root") syncTargetUID := types.UID("targetuid") syncTargetName := "targetname" + syncTargetKey := workloadv1alpha1.ToSyncTargetKey(clusterName, syncTargetName) dnsID := shared.GetDNSID(clusterName, syncTargetUID, syncTargetName) dnsns := "dnsns" @@ -93,6 +96,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: true, expectActions: []clienttesting.Action{}, @@ -107,6 +111,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -124,6 +129,7 @@ func TestDNSProcess(t *testing.T) { clienttesting.NewCreateAction(roleBindingGVR, dnsns, MakeRoleBinding(dnsID, dnsns)), clienttesting.NewCreateAction(deploymentGVR, dnsns, MakeDeployment(dnsID, dnsns, "dnsimage")), clienttesting.NewCreateAction(serviceGVR, dnsns, MakeService(dnsID, dnsns)), + clienttesting.NewCreateAction(networkPolicyGVR, dnsns, MakeNetworkPolicy(dnsID, dnsns, syncTargetKey)), }, initialized: true, dnsImage: "dnsimage", @@ -135,6 +141,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -148,6 +155,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -161,6 +169,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -185,9 +194,10 @@ func TestDNSProcess(t *testing.T) { deploymentLister := informerFactory.Apps().V1().Deployments().Lister() serviceLister := informerFactory.Core().V1().Services().Lister() endpointLister := informerFactory.Core().V1().Endpoints().Lister() + networkPolicyLister := informerFactory.Networking().V1().NetworkPolicies().Lister() controller := NewDNSProcessor(kubeClient, serviceAccountLister, roleLister, roleBindingLister, - deploymentLister, serviceLister, endpointLister, syncTargetName, syncTargetUID, + deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, syncTargetKey, dnsns, tc.dnsImage) controller.initialized.Store(dnsID, tc.initialized) @@ -209,6 +219,7 @@ func TestDNSProcess(t *testing.T) { func TestMultipleDNSInitialization(t *testing.T) { syncTargetUID := types.UID("targetuid") syncTargetName := "targetname" + syncTargetKey := workloadv1alpha1.ToSyncTargetKey("root", syncTargetName) dnsns := "dnsns" clusterName1 := logicalcluster.Name("root1") @@ -232,9 +243,10 @@ func TestMultipleDNSInitialization(t *testing.T) { deploymentLister := informerFactory.Apps().V1().Deployments().Lister() serviceLister := informerFactory.Core().V1().Services().Lister() endpointLister := informerFactory.Core().V1().Endpoints().Lister() + networkPolicyLister := informerFactory.Networking().V1().NetworkPolicies().Lister() controller := NewDNSProcessor(kubeClient, serviceAccountLister, roleLister, roleBindingLister, - deploymentLister, serviceLister, endpointLister, syncTargetName, syncTargetUID, + deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, syncTargetKey, dnsns, "animage") informerFactory.Start(ctx.Done()) diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go index a75b6a0cbdd8..d7425e1d9e61 100644 --- a/pkg/syncer/syncer.go +++ b/pkg/syncer/syncer.go @@ -203,9 +203,6 @@ func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, i return err } - // downstreamInformerFactory to watch some DNS-related resources in the dns namespace - downstreamInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(downstreamKubeClient, resyncPeriod, kubernetesinformers.WithNamespace(syncerNamespace)) - syncTargetGVRSource, err := resourcesync.NewSyncTargetGVRSource( logger, upstreamSyncerDiscoveryClient.DiscoveryInterface, @@ -274,7 +271,7 @@ func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, i specSyncer, err := spec.NewSpecSyncer(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, upstreamURL, advancedSchedulingEnabled, upstreamSyncerClusterClient, downstreamDynamicClient, downstreamKubeClient, ddsifForUpstreamSyncer, ddsifForDownstream, downstreamNamespaceController, syncTarget.GetUID(), - syncerNamespace, downstreamInformerFactory, cfg.DNSImage) + syncerNamespace, syncerNamespaceInformerFactory, cfg.DNSImage) if err != nil { return err