From df2c102d7b00a2c5ccd8f52cf9746f3a2932e721 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Mon, 28 Nov 2022 18:43:01 -0300 Subject: [PATCH 01/12] Adding dns network policies - Part 1 --- pkg/cliplugins/workload/plugin/syncer.yaml | 8 ++++ pkg/syncer/spec/dns/dns_process.go | 46 ++++++++++++++++++---- pkg/syncer/spec/dns/networkpolicy_dns.yaml | 33 ++++++++++++++++ pkg/syncer/spec/dns/resources.go | 16 ++++++++ pkg/syncer/spec/spec_controller.go | 3 +- pkg/syncer/syncer.go | 7 +++- 6 files changed, 104 insertions(+), 9 deletions(-) create mode 100644 pkg/syncer/spec/dns/networkpolicy_dns.yaml diff --git a/pkg/cliplugins/workload/plugin/syncer.yaml b/pkg/cliplugins/workload/plugin/syncer.yaml index 239867ae44e..25bd500416b 100644 --- a/pkg/cliplugins/workload/plugin/syncer.yaml +++ b/pkg/cliplugins/workload/plugin/syncer.yaml @@ -42,6 +42,14 @@ rules: - "get" - "watch" - "list" +- apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - "create" + - "list" + - "watch" {{- range $groupMapping := .GroupMappings}} - apiGroups: - "{{$groupMapping.APIGroup}}" diff --git a/pkg/syncer/spec/dns/dns_process.go b/pkg/syncer/spec/dns/dns_process.go index 541c0158f3d..0390ab594e5 100644 --- a/pkg/syncer/spec/dns/dns_process.go +++ b/pkg/syncer/spec/dns/dns_process.go @@ -21,7 +21,6 @@ import ( "sync" "github.com/kcp-dev/logicalcluster/v3" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -30,6 +29,7 @@ import ( "k8s.io/client-go/kubernetes" listersappsv1 "k8s.io/client-go/listers/apps/v1" listerscorev1 "k8s.io/client-go/listers/core/v1" + listersnetworkingv1 "k8s.io/client-go/listers/networking/v1" listersrbacv1 "k8s.io/client-go/listers/rbac/v1" "k8s.io/klog/v2" @@ -45,9 +45,11 @@ type DNSProcessor struct { deploymentLister listersappsv1.DeploymentLister serviceLister listerscorev1.ServiceLister endpointLister listerscorev1.EndpointsLister + networkPolicyLister listersnetworkingv1.NetworkPolicyLister - syncTargetName string syncTargetUID types.UID + syncTargetName string + syncTargetKey string dnsNamespace string // namespace containing all DNS objects dnsImage string @@ -63,8 +65,10 @@ func NewDNSProcessor( deploymentLister listersappsv1.DeploymentLister, serviceLister listerscorev1.ServiceLister, endpointLister listerscorev1.EndpointsLister, - syncTargetName string, + networkPolicyLister listersnetworkingv1.NetworkPolicyLister, syncTargetUID types.UID, + syncTargetName string, + syncTargetKey string, dnsNamespace string, dnsImage string) *DNSProcessor { return &DNSProcessor{ @@ -75,8 +79,10 @@ func NewDNSProcessor( deploymentLister: deploymentLister, serviceLister: serviceLister, endpointLister: endpointLister, - syncTargetName: syncTargetName, + networkPolicyLister: networkPolicyLister, syncTargetUID: syncTargetUID, + syncTargetName: syncTargetName, + syncTargetKey: syncTargetKey, dnsNamespace: dnsNamespace, dnsImage: dnsImage, } @@ -89,18 +95,19 @@ func NewDNSProcessor( // during the check or creation of the DNS-related resources. func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logicalcluster.Name) (bool, error) { logger := klog.FromContext(ctx) - logger.WithName("dns") + logger = logger.WithName("dns") dnsID := shared.GetDNSID(workspace, d.syncTargetUID, d.syncTargetName) - logger.WithValues("name", dnsID, "namespace", d.dnsNamespace) + logger = logger.WithValues("name", dnsID, "namespace", d.dnsNamespace) - logger.V(4).Info("checking if all dns objects exist and are up-to-date") + logger.Info("checking if all dns objects exist and are up-to-date") ctx = klog.NewContext(ctx, logger) // Try updating resources if not done already if initialized, ok := d.initialized.Load(dnsID); !ok || !initialized.(bool) { updated, err := d.lockMayUpdate(ctx, dnsID) if updated { + logger.Info("dns updated") return false, err } } @@ -113,11 +120,16 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica } if !apierrors.IsNotFound(err) { + return false, err } // No Endpoints resource was found: try to create all the DNS-related resources + if err := d.processNetworkPolicy(ctx, dnsID); err != nil { + return false, err + } if err := d.processServiceAccount(ctx, dnsID); err != nil { + logger.Info("sa not ready", "dnsID", dnsID) return false, err } if err := d.processRole(ctx, dnsID); err != nil { @@ -132,6 +144,7 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica if err := d.processService(ctx, dnsID); err != nil { return false, err } + // Since the Endpoints resource was not found, the DNS is not yet ready, // even though all the required resources have been created // (deployment still needs to start). @@ -233,6 +246,25 @@ func (d *DNSProcessor) processService(ctx context.Context, name string) error { return nil } +func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string) error { + logger := klog.FromContext(ctx) + + _, err := d.networkPolicyLister.NetworkPolicies(d.dnsNamespace).Get(name) + if apierrors.IsNotFound(err) { + expected := MakeNetworkPolicy(name, d.dnsNamespace, d.syncTargetKey) + _, err = d.downstreamKubeClient.NetworkingV1().NetworkPolicies(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) + if err == nil { + logger.Info("NetworkPolicy created") + } + } + if err != nil && !apierrors.IsAlreadyExists(err) { + logger.Error(err, "failed to get NetworkPolicy (retrying)") + return err + } + + return nil +} + func hasAtLeastOneReadyAddress(endpoints *corev1.Endpoints) bool { for _, s := range endpoints.Subsets { if len(s.Addresses) > 0 && s.Addresses[0].IP != "" { diff --git a/pkg/syncer/spec/dns/networkpolicy_dns.yaml b/pkg/syncer/spec/dns/networkpolicy_dns.yaml new file mode 100644 index 00000000000..34e1cd06cc3 --- /dev/null +++ b/pkg/syncer/spec/dns/networkpolicy_dns.yaml @@ -0,0 +1,33 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: Name + namespace: Namespace +spec: + podSelector: + matchLabels: + app: Name + policyTypes: + - Ingress + - Egress + ingress: + - from: + - namespaceSelector: + matchLabels: + internal.workload.kcp.dev/cluster: Cluster + ports: + - protocol: TCP + port: 5353 + - protocol: UDP + port: 5353 + egress: + # Only give access to coredns in kube-system + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + ports: + - protocol: TCP + port: 53 + - protocol: UDP + port: 53 diff --git a/pkg/syncer/spec/dns/resources.go b/pkg/syncer/spec/dns/resources.go index 327abf2a0ca..a0ad83d7cec 100644 --- a/pkg/syncer/spec/dns/resources.go +++ b/pkg/syncer/spec/dns/resources.go @@ -23,10 +23,13 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/yaml" + + workloadv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/workload/v1alpha1" ) //go:embed *.yaml @@ -38,6 +41,7 @@ var ( roleBindingTemplate rbacv1.RoleBinding deploymentTemplate appsv1.Deployment serviceTemplate corev1.Service + networkPolicyTemplate networkingv1.NetworkPolicy ) func init() { @@ -46,6 +50,7 @@ func init() { loadTemplateOrDie("rolebinding_dns.yaml", &roleBindingTemplate) loadTemplateOrDie("deployment_dns.yaml", &deploymentTemplate) loadTemplateOrDie("service_dns.yaml", &serviceTemplate) + loadTemplateOrDie("networkpolicy_dns.yaml", &networkPolicyTemplate) } func MakeServiceAccount(name, namespace string) *corev1.ServiceAccount { @@ -104,6 +109,17 @@ func MakeService(name, namespace string) *corev1.Service { return service } +func MakeNetworkPolicy(name, namespace, cluster string) *networkingv1.NetworkPolicy { + np := networkPolicyTemplate.DeepCopy() + + np.Name = name + np.Namespace = namespace + np.Spec.PodSelector.MatchLabels["app"] = name + np.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[workloadv1alpha1.InternalDownstreamClusterLabel] = cluster + + return np +} + // load a YAML resource into a typed kubernetes object. func loadTemplateOrDie(filename string, obj interface{}) { raw, err := dnsFiles.ReadFile(filename) diff --git a/pkg/syncer/spec/spec_controller.go b/pkg/syncer/spec/spec_controller.go index 33bf5641702..ce811a7a28d 100644 --- a/pkg/syncer/spec/spec_controller.go +++ b/pkg/syncer/spec/spec_controller.go @@ -269,7 +269,8 @@ func NewSpecSyncer(syncerLogger logr.Logger, syncTargetClusterName logicalcluste syncerNamespaceInformerFactory.Apps().V1().Deployments().Lister(), dnsServiceLister, syncerNamespaceInformerFactory.Core().V1().Endpoints().Lister(), - syncTargetName, syncTargetUID, dnsNamespace, dnsImage) + syncerNamespaceInformerFactory.Networking().V1().NetworkPolicies().Lister(), + syncTargetUID, syncTargetName, syncTargetKey, dnsNamespace, dnsImage) return &c, nil } diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go index 6f4f341fe17..8826397f9c0 100644 --- a/pkg/syncer/syncer.go +++ b/pkg/syncer/syncer.go @@ -199,7 +199,11 @@ func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, i // syncerNamespaceInformerFactory to watch some DNS-related resources in the dns namespace syncerNamespaceInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(downstreamKubeClient, resyncPeriod, kubernetesinformers.WithNamespace(syncerNamespace)) + // downstreamInformerFactory to watch some DNS-related resources in the dns namespace + downstreamInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(downstreamKubeClient, resyncPeriod, kubernetesinformers.WithNamespace(syncerNamespace)) + downstreamSyncerDiscoveryClient := discovery.NewDiscoveryClient(downstreamKubeClient.RESTClient()) + syncTargetGVRSource, err := resourcesync.NewSyncTargetGVRSource( logger, downstreamSyncerDiscoveryClient, @@ -279,7 +283,8 @@ func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, i specSyncer, err := spec.NewSpecSyncer(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, upstreamURL, advancedSchedulingEnabled, upstreamSyncerClusterClient, downstreamDynamicClient, downstreamKubeClient, ddsifForUpstreamSyncer, ddsifForDownstream, downstreamNamespaceController, syncTarget.GetUID(), - syncerNamespace, syncerNamespaceInformerFactory, cfg.DNSImage) + syncerNamespace, downstreamInformerFactory, cfg.DNSImage) + if err != nil { return err } From e5623101a7d5058a1cee3ede3c9cb7cfa0afdaa1 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Wed, 11 Jan 2023 10:05:23 -0500 Subject: [PATCH 02/12] fix unit test --- pkg/cliplugins/workload/plugin/sync_test.go | 16 ++++++++++++++++ pkg/syncer/spec/dns/dns_process.go | 12 +++++------- pkg/syncer/spec/dns/dns_process_test.go | 16 ++++++++++++++-- pkg/syncer/syncer.go | 5 +---- 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/pkg/cliplugins/workload/plugin/sync_test.go b/pkg/cliplugins/workload/plugin/sync_test.go index bc9d0c0016b..fd27ad1b1bf 100644 --- a/pkg/cliplugins/workload/plugin/sync_test.go +++ b/pkg/cliplugins/workload/plugin/sync_test.go @@ -68,6 +68,14 @@ rules: - "get" - "watch" - "list" +- apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - "create" + - "list" + - "watch" - apiGroups: - "" resources: @@ -298,6 +306,14 @@ rules: - "get" - "watch" - "list" +- apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - "create" + - "list" + - "watch" - apiGroups: - "" resources: diff --git a/pkg/syncer/spec/dns/dns_process.go b/pkg/syncer/spec/dns/dns_process.go index 0390ab594e5..7a4cc6b83fb 100644 --- a/pkg/syncer/spec/dns/dns_process.go +++ b/pkg/syncer/spec/dns/dns_process.go @@ -21,6 +21,7 @@ import ( "sync" "github.com/kcp-dev/logicalcluster/v3" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -100,14 +101,13 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica dnsID := shared.GetDNSID(workspace, d.syncTargetUID, d.syncTargetName) logger = logger.WithValues("name", dnsID, "namespace", d.dnsNamespace) - logger.Info("checking if all dns objects exist and are up-to-date") + logger.V(4).Info("checking if all dns objects exist and are up-to-date") ctx = klog.NewContext(ctx, logger) // Try updating resources if not done already if initialized, ok := d.initialized.Load(dnsID); !ok || !initialized.(bool) { updated, err := d.lockMayUpdate(ctx, dnsID) if updated { - logger.Info("dns updated") return false, err } } @@ -120,16 +120,11 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica } if !apierrors.IsNotFound(err) { - return false, err } // No Endpoints resource was found: try to create all the DNS-related resources - if err := d.processNetworkPolicy(ctx, dnsID); err != nil { - return false, err - } if err := d.processServiceAccount(ctx, dnsID); err != nil { - logger.Info("sa not ready", "dnsID", dnsID) return false, err } if err := d.processRole(ctx, dnsID); err != nil { @@ -144,6 +139,9 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica if err := d.processService(ctx, dnsID); err != nil { return false, err } + if err := d.processNetworkPolicy(ctx, dnsID); err != nil { + return false, err + } // Since the Endpoints resource was not found, the DNS is not yet ready, // even though all the required resources have been created diff --git a/pkg/syncer/spec/dns/dns_process_test.go b/pkg/syncer/spec/dns/dns_process_test.go index f73489f1935..1f096178639 100644 --- a/pkg/syncer/spec/dns/dns_process_test.go +++ b/pkg/syncer/spec/dns/dns_process_test.go @@ -35,6 +35,7 @@ import ( kubefake "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" + workloadv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/workload/v1alpha1" "github.com/kcp-dev/kcp/pkg/syncer/shared" ) @@ -45,6 +46,7 @@ var ( roleBindingGVR = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"} serviceGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} deploymentGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} + networkPolicyGVR = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"} ) func init() { @@ -57,6 +59,7 @@ func TestDNSProcess(t *testing.T) { clusterName := logicalcluster.Name("root") syncTargetUID := types.UID("targetuid") syncTargetName := "targetname" + syncTargetKey := workloadv1alpha1.ToSyncTargetKey(clusterName, syncTargetName) dnsID := shared.GetDNSID(clusterName, syncTargetUID, syncTargetName) dnsns := "dnsns" @@ -93,6 +96,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: true, expectActions: []clienttesting.Action{}, @@ -107,6 +111,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -124,6 +129,7 @@ func TestDNSProcess(t *testing.T) { clienttesting.NewCreateAction(roleBindingGVR, dnsns, MakeRoleBinding(dnsID, dnsns)), clienttesting.NewCreateAction(deploymentGVR, dnsns, MakeDeployment(dnsID, dnsns, "dnsimage")), clienttesting.NewCreateAction(serviceGVR, dnsns, MakeService(dnsID, dnsns)), + clienttesting.NewCreateAction(networkPolicyGVR, dnsns, MakeNetworkPolicy(dnsID, dnsns, syncTargetKey)), }, initialized: true, dnsImage: "dnsimage", @@ -135,6 +141,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -148,6 +155,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -161,6 +169,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -185,9 +194,10 @@ func TestDNSProcess(t *testing.T) { deploymentLister := informerFactory.Apps().V1().Deployments().Lister() serviceLister := informerFactory.Core().V1().Services().Lister() endpointLister := informerFactory.Core().V1().Endpoints().Lister() + networkPolicyLister := informerFactory.Networking().V1().NetworkPolicies().Lister() controller := NewDNSProcessor(kubeClient, serviceAccountLister, roleLister, roleBindingLister, - deploymentLister, serviceLister, endpointLister, syncTargetName, syncTargetUID, + deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, syncTargetKey, dnsns, tc.dnsImage) controller.initialized.Store(dnsID, tc.initialized) @@ -209,6 +219,7 @@ func TestDNSProcess(t *testing.T) { func TestMultipleDNSInitialization(t *testing.T) { syncTargetUID := types.UID("targetuid") syncTargetName := "targetname" + syncTargetKey := workloadv1alpha1.ToSyncTargetKey("root", syncTargetName) dnsns := "dnsns" clusterName1 := logicalcluster.Name("root1") @@ -232,9 +243,10 @@ func TestMultipleDNSInitialization(t *testing.T) { deploymentLister := informerFactory.Apps().V1().Deployments().Lister() serviceLister := informerFactory.Core().V1().Services().Lister() endpointLister := informerFactory.Core().V1().Endpoints().Lister() + networkPolicyLister := informerFactory.Networking().V1().NetworkPolicies().Lister() controller := NewDNSProcessor(kubeClient, serviceAccountLister, roleLister, roleBindingLister, - deploymentLister, serviceLister, endpointLister, syncTargetName, syncTargetUID, + deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, syncTargetKey, dnsns, "animage") informerFactory.Start(ctx.Done()) diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go index 8826397f9c0..c336e7778f4 100644 --- a/pkg/syncer/syncer.go +++ b/pkg/syncer/syncer.go @@ -199,9 +199,6 @@ func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, i // syncerNamespaceInformerFactory to watch some DNS-related resources in the dns namespace syncerNamespaceInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(downstreamKubeClient, resyncPeriod, kubernetesinformers.WithNamespace(syncerNamespace)) - // downstreamInformerFactory to watch some DNS-related resources in the dns namespace - downstreamInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(downstreamKubeClient, resyncPeriod, kubernetesinformers.WithNamespace(syncerNamespace)) - downstreamSyncerDiscoveryClient := discovery.NewDiscoveryClient(downstreamKubeClient.RESTClient()) syncTargetGVRSource, err := resourcesync.NewSyncTargetGVRSource( @@ -283,7 +280,7 @@ func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, i specSyncer, err := spec.NewSpecSyncer(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, upstreamURL, advancedSchedulingEnabled, upstreamSyncerClusterClient, downstreamDynamicClient, downstreamKubeClient, ddsifForUpstreamSyncer, ddsifForDownstream, downstreamNamespaceController, syncTarget.GetUID(), - syncerNamespace, downstreamInformerFactory, cfg.DNSImage) + syncerNamespace, syncerNamespaceInformerFactory, cfg.DNSImage) if err != nil { return err From 5d7672c0a6803e1570faa868a349426d9e1de615 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Wed, 11 Jan 2023 11:42:29 -0500 Subject: [PATCH 03/12] add networkpolicies in the fake pcluster --- .../networking.k8s.io_networkpolicies.yaml | 545 ++++++++++++++++++ test/e2e/framework/kcp.go | 1 + 2 files changed, 546 insertions(+) create mode 100755 test/e2e/fixtures/kube/networking.k8s.io_networkpolicies.yaml diff --git a/test/e2e/fixtures/kube/networking.k8s.io_networkpolicies.yaml b/test/e2e/fixtures/kube/networking.k8s.io_networkpolicies.yaml new file mode 100755 index 00000000000..21cde0bf6d4 --- /dev/null +++ b/test/e2e/fixtures/kube/networking.k8s.io_networkpolicies.yaml @@ -0,0 +1,545 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: /~https://github.com/kcp-dev/kubernetes/pull/4 + creationTimestamp: null + name: networkpolicies.networking.k8s.io +spec: + conversion: + strategy: None + group: networking.k8s.io + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + shortNames: + - netpol + singular: networkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkPolicy describes what network traffic is allowed for a + set of Pods + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior for this NetworkPolicy. + properties: + egress: + description: List of egress rules to be applied to the selected pods. + Outgoing traffic is allowed if there are no NetworkPolicies selecting + the pod (and cluster policy otherwise allows the traffic), OR if + the traffic matches at least one egress rule across all of the NetworkPolicy + objects whose podSelector matches the pod. If this field is empty + then this NetworkPolicy limits all outgoing traffic (and serves + solely to ensure that the pods it selects are isolated by default). + This field is beta-level in 1.8 + items: + description: NetworkPolicyEgressRule describes a particular set + of traffic that is allowed out of pods matched by a NetworkPolicySpec's + podSelector. The traffic must match both ports and to. This type + is beta-level in 1.8 + properties: + ports: + description: List of destination ports for outgoing traffic. + Each item in this list is combined using a logical OR. If + this field is empty or missing, this rule matches all ports + (traffic not restricted by port). If this field is present + and contains at least one item, then this rule allows traffic + only if the traffic matches at least one port in the list. + items: + description: NetworkPolicyPort describes a port to allow traffic + on + properties: + endPort: + description: If set, indicates that the range of ports + from port to endPort, inclusive, should be allowed by + the policy. This field cannot be defined if the port + field is not defined or if the port field is defined + as a named (string) port. The endPort must be equal + or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: The port on the given protocol. This can + either be a numerical or named port on a pod. If this + field is not provided, this matches all port names and + numbers. If present, only traffic on the specified protocol + AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + description: The protocol (TCP, UDP, or SCTP) which traffic + must match. If not specified, this field defaults to + TCP. + type: string + type: object + type: array + to: + description: List of destinations for outgoing traffic of pods + selected for this rule. Items in this list are combined using + a logical OR operation. If this field is empty or missing, + this rule matches all destinations (traffic not restricted + by destination). If this field is present and contains at + least one item, this rule allows traffic only if the traffic + matches at least one item in the to list. + items: + description: NetworkPolicyPeer describes a peer to allow traffic + to/from. Only certain combinations of fields are allowed + properties: + ipBlock: + description: IPBlock defines policy on a particular IPBlock. + If this field is set then neither of the other fields + can be. + properties: + cidr: + description: CIDR is a string representing the IP + Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" + type: string + except: + description: Except is a slice of CIDRs that should + not be included within an IP Block Valid examples + are "192.168.1.1/24" or "2001:db9::/64" Except values + will be rejected if they are outside the CIDR range + items: + type: string + type: array + required: + - cidr + type: object + namespaceSelector: + description: |- + Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. + + If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + podSelector: + description: |- + This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. + + If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + type: object + type: array + type: object + type: array + ingress: + description: List of ingress rules to be applied to the selected pods. + Traffic is allowed to a pod if there are no NetworkPolicies selecting + the pod (and cluster policy otherwise allows the traffic), OR if + the traffic source is the pod's local node, OR if the traffic matches + at least one ingress rule across all of the NetworkPolicy objects + whose podSelector matches the pod. If this field is empty then this + NetworkPolicy does not allow any traffic (and serves solely to ensure + that the pods it selects are isolated by default) + items: + description: NetworkPolicyIngressRule describes a particular set + of traffic that is allowed to the pods matched by a NetworkPolicySpec's + podSelector. The traffic must match both ports and from. + properties: + from: + description: List of sources which should be able to access + the pods selected for this rule. Items in this list are combined + using a logical OR operation. If this field is empty or missing, + this rule matches all sources (traffic not restricted by source). + If this field is present and contains at least one item, this + rule allows traffic only if the traffic matches at least one + item in the from list. + items: + description: NetworkPolicyPeer describes a peer to allow traffic + to/from. Only certain combinations of fields are allowed + properties: + ipBlock: + description: IPBlock defines policy on a particular IPBlock. + If this field is set then neither of the other fields + can be. + properties: + cidr: + description: CIDR is a string representing the IP + Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" + type: string + except: + description: Except is a slice of CIDRs that should + not be included within an IP Block Valid examples + are "192.168.1.1/24" or "2001:db9::/64" Except values + will be rejected if they are outside the CIDR range + items: + type: string + type: array + required: + - cidr + type: object + namespaceSelector: + description: |- + Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. + + If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + podSelector: + description: |- + This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. + + If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + type: object + type: array + ports: + description: List of ports which should be made accessible on + the pods selected for this rule. Each item in this list is + combined using a logical OR. If this field is empty or missing, + this rule matches all ports (traffic not restricted by port). + If this field is present and contains at least one item, then + this rule allows traffic only if the traffic matches at least + one port in the list. + items: + description: NetworkPolicyPort describes a port to allow traffic + on + properties: + endPort: + description: If set, indicates that the range of ports + from port to endPort, inclusive, should be allowed by + the policy. This field cannot be defined if the port + field is not defined or if the port field is defined + as a named (string) port. The endPort must be equal + or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: The port on the given protocol. This can + either be a numerical or named port on a pod. If this + field is not provided, this matches all port names and + numbers. If present, only traffic on the specified protocol + AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + description: The protocol (TCP, UDP, or SCTP) which traffic + must match. If not specified, this field defaults to + TCP. + type: string + type: object + type: array + type: object + type: array + podSelector: + description: Selects the pods to which this NetworkPolicy object applies. + The array of ingress rules is applied to any pods selected by this + field. Multiple network policies can select the same set of pods. + In this case, the ingress rules for each are combined additively. + This field is NOT optional and follows standard label selector semantics. + An empty podSelector matches all pods in this namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + policyTypes: + description: List of rule types that the NetworkPolicy relates to. + Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. + If this field is not specified, it will default based on the existence + of Ingress or Egress rules; policies that contain an Egress section + are assumed to affect Egress, and all policies (whether or not they + contain an Ingress section) are assumed to affect Ingress. If you + want to write an egress-only policy, you must explicitly specify + policyTypes [ "Egress" ]. Likewise, if you want to write a policy + that specifies that no egress is allowed, you must specify a policyTypes + value that include "Egress" (since such a policy would not include + an Egress section and would otherwise default to just [ "Ingress" + ]). This field is beta-level in 1.8 + items: + type: string + type: array + required: + - podSelector + type: object + status: + description: 'Status is the current state of the NetworkPolicy. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + conditions: + description: Conditions holds an array of metav1.Condition that describe + the state of the NetworkPolicy. Current service state + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: + - v1 diff --git a/test/e2e/framework/kcp.go b/test/e2e/framework/kcp.go index df95c32f8bc..24bc970e739 100644 --- a/test/e2e/framework/kcp.go +++ b/test/e2e/framework/kcp.go @@ -884,6 +884,7 @@ func NewFakeWorkloadServer(t *testing.T, server RunningServer, org logicalcluste metav1.GroupResource{Group: "core.k8s.io", Resource: "endpoints"}, metav1.GroupResource{Group: "core.k8s.io", Resource: "pods"}, metav1.GroupResource{Group: "networking.k8s.io", Resource: "ingresses"}, + metav1.GroupResource{Group: "networking.k8s.io", Resource: "networkpolicies"}, ) // Wait for the required crds to become ready From 4f500f82526d7c5d96dd3a5bfa0c2cd137b31e16 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Thu, 12 Jan 2023 16:00:15 -0500 Subject: [PATCH 04/12] add e2e test --- pkg/cliplugins/workload/plugin/sync_test.go | 6 +++ pkg/cliplugins/workload/plugin/syncer.yaml | 6 +++ pkg/syncer/spec/dns/dns_process.go | 12 +++++- pkg/syncer/spec/dns/dns_process_test.go | 30 ++++++++----- pkg/syncer/spec/dns/networkpolicy_dns.yaml | 14 +++++- pkg/syncer/spec/dns/resources.go | 22 +++++++++- test/e2e/syncer/dns/dns_test.go | 48 +++++++++++++++++++++ 7 files changed, 124 insertions(+), 14 deletions(-) diff --git a/pkg/cliplugins/workload/plugin/sync_test.go b/pkg/cliplugins/workload/plugin/sync_test.go index fd27ad1b1bf..f5f09f6a5d6 100644 --- a/pkg/cliplugins/workload/plugin/sync_test.go +++ b/pkg/cliplugins/workload/plugin/sync_test.go @@ -60,6 +60,12 @@ rules: - "list" - "watch" - "delete" +- apiGroups: + - "" + resources: + - endpoints + verbs: + - "get" - apiGroups: - "apiextensions.k8s.io" resources: diff --git a/pkg/cliplugins/workload/plugin/syncer.yaml b/pkg/cliplugins/workload/plugin/syncer.yaml index 25bd500416b..1eca47fe619 100644 --- a/pkg/cliplugins/workload/plugin/syncer.yaml +++ b/pkg/cliplugins/workload/plugin/syncer.yaml @@ -34,6 +34,12 @@ rules: - "list" - "watch" - "delete" +- apiGroups: + - "" + resources: + - endpoints + verbs: + - "get" - apiGroups: - "apiextensions.k8s.io" resources: diff --git a/pkg/syncer/spec/dns/dns_process.go b/pkg/syncer/spec/dns/dns_process.go index 7a4cc6b83fb..4eab7e7f863 100644 --- a/pkg/syncer/spec/dns/dns_process.go +++ b/pkg/syncer/spec/dns/dns_process.go @@ -18,6 +18,7 @@ package dns import ( "context" + "errors" "sync" "github.com/kcp-dev/logicalcluster/v3" @@ -247,9 +248,18 @@ func (d *DNSProcessor) processService(ctx context.Context, name string) error { func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string) error { logger := klog.FromContext(ctx) + var kubeEndpoints *corev1.Endpoints _, err := d.networkPolicyLister.NetworkPolicies(d.dnsNamespace).Get(name) if apierrors.IsNotFound(err) { - expected := MakeNetworkPolicy(name, d.dnsNamespace, d.syncTargetKey) + kubeEndpoints, err = d.downstreamKubeClient.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) + if err != nil { + return err + } + if len(kubeEndpoints.Subsets) == 0 || len(kubeEndpoints.Subsets[0].Addresses) == 0 { + return errors.New("missing kubernetes API endpoints") + } + + expected := MakeNetworkPolicy(name, d.dnsNamespace, d.syncTargetKey, &kubeEndpoints.Subsets[0]) _, err = d.downstreamKubeClient.NetworkingV1().NetworkPolicies(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) if err == nil { logger.Info("NetworkPolicy created") diff --git a/pkg/syncer/spec/dns/dns_process_test.go b/pkg/syncer/spec/dns/dns_process_test.go index 1f096178639..139b15300ed 100644 --- a/pkg/syncer/spec/dns/dns_process_test.go +++ b/pkg/syncer/spec/dns/dns_process_test.go @@ -46,6 +46,7 @@ var ( roleBindingGVR = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"} serviceGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} deploymentGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} + endpointGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"} networkPolicyGVR = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"} ) @@ -96,7 +97,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), }, expectReady: true, expectActions: []clienttesting.Action{}, @@ -111,7 +112,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -121,7 +122,9 @@ func TestDNSProcess(t *testing.T) { dnsImage: "newdnsimage", }, "endpoint does not exist, no DNS objects": { - resources: []runtime.Object{}, + resources: []runtime.Object{ + endpoints("kubernetes", "default", "10.0.0.0"), + }, expectReady: false, expectActions: []clienttesting.Action{ clienttesting.NewCreateAction(serviceAccountGVR, dnsns, MakeServiceAccount(dnsID, dnsns)), @@ -129,7 +132,10 @@ func TestDNSProcess(t *testing.T) { clienttesting.NewCreateAction(roleBindingGVR, dnsns, MakeRoleBinding(dnsID, dnsns)), clienttesting.NewCreateAction(deploymentGVR, dnsns, MakeDeployment(dnsID, dnsns, "dnsimage")), clienttesting.NewCreateAction(serviceGVR, dnsns, MakeService(dnsID, dnsns)), - clienttesting.NewCreateAction(networkPolicyGVR, dnsns, MakeNetworkPolicy(dnsID, dnsns, syncTargetKey)), + clienttesting.NewGetAction(endpointGVR, "default", "kubernetes"), + clienttesting.NewCreateAction(networkPolicyGVR, dnsns, MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{ + Addresses: []corev1.EndpointAddress{{IP: "10.0.0.0"}}, + })), }, initialized: true, dnsImage: "dnsimage", @@ -141,7 +147,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -155,7 +161,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -169,7 +175,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey), + MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -278,10 +284,12 @@ func endpoints(name, namespace, ip string) *corev1.Endpoints { } if ip != "" { endpoint.Subsets = []corev1.EndpointSubset{ - {Addresses: []corev1.EndpointAddress{ - { - IP: ip, - }}}, + { + Addresses: []corev1.EndpointAddress{ + { + IP: ip, + }}, + }, } } return endpoint diff --git a/pkg/syncer/spec/dns/networkpolicy_dns.yaml b/pkg/syncer/spec/dns/networkpolicy_dns.yaml index 34e1cd06cc3..cb5a5db38a5 100644 --- a/pkg/syncer/spec/dns/networkpolicy_dns.yaml +++ b/pkg/syncer/spec/dns/networkpolicy_dns.yaml @@ -14,7 +14,7 @@ spec: - from: - namespaceSelector: matchLabels: - internal.workload.kcp.dev/cluster: Cluster + internal.workload.kcp.io/cluster: Cluster ports: - protocol: TCP port: 5353 @@ -26,8 +26,20 @@ spec: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: kube-system + - podSelector: + matchLabels: + k8s-app: kube-dns ports: - protocol: TCP port: 53 - protocol: UDP port: 53 + # Give access to the API server to watch its associated configmap + - to: + # one ipBlock per IP (dynamically filled) + - ipBlock: + cidr: APIServerIP + ports: + - protocol: TCP + port: 6443 + diff --git a/pkg/syncer/spec/dns/resources.go b/pkg/syncer/spec/dns/resources.go index a0ad83d7cec..e77b8d5f609 100644 --- a/pkg/syncer/spec/dns/resources.go +++ b/pkg/syncer/spec/dns/resources.go @@ -27,6 +27,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/yaml" workloadv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/workload/v1alpha1" @@ -109,7 +110,7 @@ func MakeService(name, namespace string) *corev1.Service { return service } -func MakeNetworkPolicy(name, namespace, cluster string) *networkingv1.NetworkPolicy { +func MakeNetworkPolicy(name, namespace, cluster string, kubeEndpoints *corev1.EndpointSubset) *networkingv1.NetworkPolicy { np := networkPolicyTemplate.DeepCopy() np.Name = name @@ -117,6 +118,25 @@ func MakeNetworkPolicy(name, namespace, cluster string) *networkingv1.NetworkPol np.Spec.PodSelector.MatchLabels["app"] = name np.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[workloadv1alpha1.InternalDownstreamClusterLabel] = cluster + to := make([]networkingv1.NetworkPolicyPeer, len(kubeEndpoints.Addresses)) + for i, endpoint := range kubeEndpoints.Addresses { + to[i] = networkingv1.NetworkPolicyPeer{ + IPBlock: &networkingv1.IPBlock{ + CIDR: endpoint.IP + "/32", + }, + } + } + np.Spec.Egress[1].To = to + + ports := make([]networkingv1.NetworkPolicyPort, len(kubeEndpoints.Ports)) + for i, port := range kubeEndpoints.Ports { + pport := intstr.FromInt(int(port.Port)) + ports[i].Port = &pport + pprotocol := port.Protocol + ports[i].Protocol = &pprotocol + } + np.Spec.Egress[1].Ports = ports + return np } diff --git a/test/e2e/syncer/dns/dns_test.go b/test/e2e/syncer/dns/dns_test.go index 6dfae34aa0f..81669a018fe 100644 --- a/test/e2e/syncer/dns/dns_test.go +++ b/test/e2e/syncer/dns/dns_test.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" "github.com/kcp-dev/kcp/test/e2e/framework" "github.com/kcp-dev/kcp/test/e2e/syncer/dns/workspace1" @@ -91,6 +92,18 @@ func TestDNSResolution(t *testing.T) { downstreamWS2NS1 := syncer.DownstreamNamespaceFor(t, logicalcluster.Name(workloadWorkspace2.Spec.Cluster), "dns-ws2-ns1") t.Logf("Downstream namespace 1 in workspace 2 is %s", downstreamWS2NS1) + t.Log("Checking network policies have been created") + framework.Eventually(t, func() (success bool, reason string) { + np, err := downstreamKubeClient.NetworkingV1().NetworkPolicies(syncer.SyncerID).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, fmt.Sprintf("error while getting network policies: %v\n", err) + } + if len(np.Items) != 2 { + return false, fmt.Sprintf("expecting 2 network policies, got: %d\n", len(np.Items)) + } + return true, "" + }, wait.ForeverTestTimeout, time.Millisecond*500, "Network policies haven't been created") + t.Log("Checking fully qualified DNS name resolves") framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS1NS1, "ping-fully-qualified", "PING svc.dns-ws1-ns1.svc.cluster.local ("), wait.ForeverTestTimeout, time.Millisecond*500, "Service name was not resolved") @@ -106,6 +119,41 @@ func TestDNSResolution(t *testing.T) { t.Log("Checking DNS name does not resolve across workspaces") framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS2NS1, "ping-fully-qualified-fail", "ping: bad"), wait.ForeverTestTimeout, time.Millisecond*500, "Service name was resolved") + + t.Log("Change ping-fully-qualified deployment DNS config to use workspace 2 nameserver") + dnsServices, err := downstreamKubeClient.CoreV1().Services(syncer.SyncerID).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + require.True(t, len(dnsServices.Items) >= 2) + + deployment, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS1).Get(ctx, "ping-fully-qualified", metav1.GetOptions{}) + require.NoError(t, err) + + existingDNSIP := deployment.Spec.Template.Spec.DNSConfig.Nameservers[0] + newDNSIP := "" + for _, svc := range dnsServices.Items { + if strings.HasPrefix(svc.Name, "kcp-dns-") { + if svc.Spec.ClusterIP != existingDNSIP { + newDNSIP = svc.Spec.ClusterIP + break + } + } + } + require.NotEmpty(t, newDNSIP, "could not find another DNS service") + deployment.Spec.Template.Spec.DNSConfig.Nameservers[0] = newDNSIP + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + deployment, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS1).Get(ctx, "ping-fully-qualified", metav1.GetOptions{}) + if err != nil { + return err + } + + deployment.Spec.Template.Spec.DNSConfig.Nameservers[0] = newDNSIP + _, err = downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS1).Update(ctx, deployment, metav1.UpdateOptions{}) + return err + }) + require.NoError(t, err) + + framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS1NS1, "ping-fully-qualified", "ping: bad"), + wait.ForeverTestTimeout, time.Millisecond*500, "Service name was still not resolved") } func checkLogs(ctx context.Context, t *testing.T, downstreamKubeClient *kubernetes.Clientset, downstreamNamespace, containerName, expectedPrefix string) func() (success bool, reason string) { From 8a824ebccc7389629cfb952392a80c02f9880183 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Fri, 13 Jan 2023 12:48:44 -0500 Subject: [PATCH 05/12] cluster test requires kind --- pkg/cliplugins/workload/plugin/sync_test.go | 6 ++++++ test/e2e/reconciler/cluster/controller_test.go | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/cliplugins/workload/plugin/sync_test.go b/pkg/cliplugins/workload/plugin/sync_test.go index f5f09f6a5d6..7f027a441b1 100644 --- a/pkg/cliplugins/workload/plugin/sync_test.go +++ b/pkg/cliplugins/workload/plugin/sync_test.go @@ -304,6 +304,12 @@ rules: - "list" - "watch" - "delete" +- apiGroups: + - "" + resources: + - endpoints + verbs: + - "get" - apiGroups: - "apiextensions.k8s.io" resources: diff --git a/test/e2e/reconciler/cluster/controller_test.go b/test/e2e/reconciler/cluster/controller_test.go index d3b119b1773..ce7716fae65 100644 --- a/test/e2e/reconciler/cluster/controller_test.go +++ b/test/e2e/reconciler/cluster/controller_test.go @@ -53,7 +53,11 @@ const sourceClusterName, sinkClusterName = "source", "sink" func TestClusterController(t *testing.T) { t.Parallel() - framework.Suite(t, "transparent-multi-cluster") + framework.Suite(t, "transparent-multi-cluster:requires-kind") + + if len(framework.TestConfig.PClusterKubeconfig()) == 0 { + t.Skip("Test requires a pcluster") + } type runningServer struct { client wildwestv1alpha1client.WildwestV1alpha1Interface From 6016ada785896912e9b6da6ae35c2d0926d9c935 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Thu, 19 Jan 2023 11:29:30 -0500 Subject: [PATCH 06/12] add tenant-id label and use it in network policies --- pkg/syncer/shared/helpers.go | 24 +++++++++++++++ pkg/syncer/shared/namespace.go | 1 + pkg/syncer/spec/dns/dns_process.go | 20 ++++++------ pkg/syncer/spec/dns/dns_process_test.go | 36 +++++++++++++--------- pkg/syncer/spec/dns/networkpolicy_dns.yaml | 2 +- pkg/syncer/spec/dns/resources.go | 6 ++-- pkg/syncer/spec/spec_controller.go | 2 +- pkg/syncer/spec/spec_process.go | 8 ++++- pkg/syncer/spec/spec_process_test.go | 4 +++ 9 files changed, 73 insertions(+), 30 deletions(-) diff --git a/pkg/syncer/shared/helpers.go b/pkg/syncer/shared/helpers.go index 79aafe80bff..c3eed6488e1 100644 --- a/pkg/syncer/shared/helpers.go +++ b/pkg/syncer/shared/helpers.go @@ -18,7 +18,9 @@ package shared import ( "crypto/sha256" + "encoding/json" "fmt" + "math/big" "strings" "github.com/kcp-dev/logicalcluster/v3" @@ -72,6 +74,28 @@ func GetDNSID(clusterName logicalcluster.Name, syncTargetUID types.UID, syncTarg return fmt.Sprintf("kcp-dns-%s-%s-%s", syncTargetName, uid36hash[:8], workspace36hash[:8]) } +// GetTenantID encodes the KCP tenant to which the namespace designated by the given +// NamespaceLocator belongs. It is based on the NamespaceLocator, but with an empty +// namespace value. The value will be the same for all downstream namespaces originating +// from the same KCP workspace / SyncTarget. +// The encoding is repeatable. +func GetTenantID(l NamespaceLocator) (string, error) { + clusterWideLocator := NamespaceLocator{ + SyncTarget: l.SyncTarget, + ClusterName: l.ClusterName, + } + + b, err := json.Marshal(clusterWideLocator) + if err != nil { + return "", err + } + + hash := sha256.Sum224(b) + var i big.Int + i.SetBytes(hash[:]) + return i.Text(62), nil +} + func ContainsGVR(gvrs []schema.GroupVersionResource, gvr schema.GroupVersionResource) bool { for _, item := range gvrs { if gvr == item { diff --git a/pkg/syncer/shared/namespace.go b/pkg/syncer/shared/namespace.go index 03450507b4a..acc114a46b7 100644 --- a/pkg/syncer/shared/namespace.go +++ b/pkg/syncer/shared/namespace.go @@ -30,6 +30,7 @@ import ( const ( NamespaceLocatorAnnotation = "kcp.io/namespace-locator" + TenantIDLabel = "kcp.io/tenant-id" ) // NamespaceLocator stores a logical cluster and namespace and is used diff --git a/pkg/syncer/spec/dns/dns_process.go b/pkg/syncer/spec/dns/dns_process.go index 4eab7e7f863..a68df27aeed 100644 --- a/pkg/syncer/spec/dns/dns_process.go +++ b/pkg/syncer/spec/dns/dns_process.go @@ -21,8 +21,6 @@ import ( "errors" "sync" - "github.com/kcp-dev/logicalcluster/v3" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -51,7 +49,6 @@ type DNSProcessor struct { syncTargetUID types.UID syncTargetName string - syncTargetKey string dnsNamespace string // namespace containing all DNS objects dnsImage string @@ -70,7 +67,6 @@ func NewDNSProcessor( networkPolicyLister listersnetworkingv1.NetworkPolicyLister, syncTargetUID types.UID, syncTargetName string, - syncTargetKey string, dnsNamespace string, dnsImage string) *DNSProcessor { return &DNSProcessor{ @@ -84,7 +80,6 @@ func NewDNSProcessor( networkPolicyLister: networkPolicyLister, syncTargetUID: syncTargetUID, syncTargetName: syncTargetName, - syncTargetKey: syncTargetKey, dnsNamespace: dnsNamespace, dnsImage: dnsImage, } @@ -95,11 +90,11 @@ func NewDNSProcessor( // are effectively reachable through the Service. // It returns true if the DNS is setup and reachable, and returns an error if there was an error // during the check or creation of the DNS-related resources. -func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logicalcluster.Name) (bool, error) { +func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, namespaceLocator shared.NamespaceLocator) (bool, error) { logger := klog.FromContext(ctx) logger = logger.WithName("dns") - dnsID := shared.GetDNSID(workspace, d.syncTargetUID, d.syncTargetName) + dnsID := shared.GetDNSID(namespaceLocator.ClusterName, d.syncTargetUID, d.syncTargetName) logger = logger.WithValues("name", dnsID, "namespace", d.dnsNamespace) logger.V(4).Info("checking if all dns objects exist and are up-to-date") @@ -140,7 +135,7 @@ func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, workspace logica if err := d.processService(ctx, dnsID); err != nil { return false, err } - if err := d.processNetworkPolicy(ctx, dnsID); err != nil { + if err := d.processNetworkPolicy(ctx, dnsID, namespaceLocator); err != nil { return false, err } @@ -245,7 +240,7 @@ func (d *DNSProcessor) processService(ctx context.Context, name string) error { return nil } -func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string) error { +func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string, namespaceLocator shared.NamespaceLocator) error { logger := klog.FromContext(ctx) var kubeEndpoints *corev1.Endpoints @@ -259,7 +254,12 @@ func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string) er return errors.New("missing kubernetes API endpoints") } - expected := MakeNetworkPolicy(name, d.dnsNamespace, d.syncTargetKey, &kubeEndpoints.Subsets[0]) + tenantID, err := shared.GetTenantID(namespaceLocator) + if err != nil { + return err + } + + expected := MakeNetworkPolicy(name, d.dnsNamespace, tenantID, &kubeEndpoints.Subsets[0]) _, err = d.downstreamKubeClient.NetworkingV1().NetworkPolicies(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) if err == nil { logger.Info("NetworkPolicy created") diff --git a/pkg/syncer/spec/dns/dns_process_test.go b/pkg/syncer/spec/dns/dns_process_test.go index 139b15300ed..29516a47704 100644 --- a/pkg/syncer/spec/dns/dns_process_test.go +++ b/pkg/syncer/spec/dns/dns_process_test.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/kcp-dev/logicalcluster/v3" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -35,7 +36,6 @@ import ( kubefake "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" - workloadv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/workload/v1alpha1" "github.com/kcp-dev/kcp/pkg/syncer/shared" ) @@ -58,9 +58,14 @@ func init() { func TestDNSProcess(t *testing.T) { clusterName := logicalcluster.Name("root") + syncTargetClusterName := logicalcluster.Name("targetclustername") syncTargetUID := types.UID("targetuid") syncTargetName := "targetname" - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(clusterName, syncTargetName) + + locator := shared.NewNamespaceLocator(clusterName, syncTargetClusterName, syncTargetUID, syncTargetName, "") + tenantID, err := shared.GetTenantID(locator) + require.NoError(t, err) + dnsID := shared.GetDNSID(clusterName, syncTargetUID, syncTargetName) dnsns := "dnsns" @@ -97,7 +102,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), + MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), }, expectReady: true, expectActions: []clienttesting.Action{}, @@ -112,7 +117,7 @@ func TestDNSProcess(t *testing.T) { MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), endpoints(dnsID, dnsns, "8.8.8.8"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), + MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -133,7 +138,7 @@ func TestDNSProcess(t *testing.T) { clienttesting.NewCreateAction(deploymentGVR, dnsns, MakeDeployment(dnsID, dnsns, "dnsimage")), clienttesting.NewCreateAction(serviceGVR, dnsns, MakeService(dnsID, dnsns)), clienttesting.NewGetAction(endpointGVR, "default", "kubernetes"), - clienttesting.NewCreateAction(networkPolicyGVR, dnsns, MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{ + clienttesting.NewCreateAction(networkPolicyGVR, dnsns, MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{ Addresses: []corev1.EndpointAddress{{IP: "10.0.0.0"}}, })), }, @@ -147,7 +152,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), + MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -161,7 +166,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), + MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{}, @@ -175,7 +180,7 @@ func TestDNSProcess(t *testing.T) { MakeRoleBinding(dnsID, dnsns), MakeService(dnsID, dnsns), MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, syncTargetKey, &corev1.EndpointSubset{}), + MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), }, expectReady: false, expectActions: []clienttesting.Action{ @@ -203,7 +208,7 @@ func TestDNSProcess(t *testing.T) { networkPolicyLister := informerFactory.Networking().V1().NetworkPolicies().Lister() controller := NewDNSProcessor(kubeClient, serviceAccountLister, roleLister, roleBindingLister, - deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, syncTargetKey, + deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, dnsns, tc.dnsImage) controller.initialized.Store(dnsID, tc.initialized) @@ -213,7 +218,7 @@ func TestDNSProcess(t *testing.T) { kubeClient.ClearActions() - ready, err := controller.EnsureDNSUpAndReady(ctx, clusterName) + ready, err := controller.EnsureDNSUpAndReady(ctx, locator) assert.NoError(t, err) assert.Empty(t, cmp.Diff(tc.expectReady, ready)) @@ -223,14 +228,17 @@ func TestDNSProcess(t *testing.T) { } func TestMultipleDNSInitialization(t *testing.T) { + syncTargetClusterName := logicalcluster.Name("targetclustername") syncTargetUID := types.UID("targetuid") syncTargetName := "targetname" - syncTargetKey := workloadv1alpha1.ToSyncTargetKey("root", syncTargetName) dnsns := "dnsns" clusterName1 := logicalcluster.Name("root1") clusterName2 := logicalcluster.Name("root2") + locator1 := shared.NewNamespaceLocator(clusterName1, syncTargetClusterName, syncTargetUID, syncTargetName, "") + locator2 := shared.NewNamespaceLocator(clusterName2, syncTargetClusterName, syncTargetUID, syncTargetName, "") + dnsID1 := shared.GetDNSID(clusterName1, syncTargetUID, syncTargetName) dnsID2 := shared.GetDNSID(clusterName2, syncTargetUID, syncTargetName) @@ -252,13 +260,13 @@ func TestMultipleDNSInitialization(t *testing.T) { networkPolicyLister := informerFactory.Networking().V1().NetworkPolicies().Lister() controller := NewDNSProcessor(kubeClient, serviceAccountLister, roleLister, roleBindingLister, - deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, syncTargetKey, + deploymentLister, serviceLister, endpointLister, networkPolicyLister, syncTargetUID, syncTargetName, dnsns, "animage") informerFactory.Start(ctx.Done()) informerFactory.WaitForCacheSync(ctx.Done()) - ready, err := controller.EnsureDNSUpAndReady(ctx, clusterName1) + ready, err := controller.EnsureDNSUpAndReady(ctx, locator1) assert.NoError(t, err) assert.True(t, ready) init1, _ := controller.initialized.Load(dnsID1) @@ -266,7 +274,7 @@ func TestMultipleDNSInitialization(t *testing.T) { init2, _ := controller.initialized.Load(dnsID2) assert.Nil(t, init2) - ready, err = controller.EnsureDNSUpAndReady(ctx, clusterName2) + ready, err = controller.EnsureDNSUpAndReady(ctx, locator2) assert.NoError(t, err) assert.True(t, ready) init1, _ = controller.initialized.Load(dnsID1) diff --git a/pkg/syncer/spec/dns/networkpolicy_dns.yaml b/pkg/syncer/spec/dns/networkpolicy_dns.yaml index cb5a5db38a5..a388e383ad4 100644 --- a/pkg/syncer/spec/dns/networkpolicy_dns.yaml +++ b/pkg/syncer/spec/dns/networkpolicy_dns.yaml @@ -14,7 +14,7 @@ spec: - from: - namespaceSelector: matchLabels: - internal.workload.kcp.io/cluster: Cluster + kcp.io/tenant-id: TenantID ports: - protocol: TCP port: 5353 diff --git a/pkg/syncer/spec/dns/resources.go b/pkg/syncer/spec/dns/resources.go index e77b8d5f609..c191b1588cd 100644 --- a/pkg/syncer/spec/dns/resources.go +++ b/pkg/syncer/spec/dns/resources.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/yaml" - workloadv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/workload/v1alpha1" + "github.com/kcp-dev/kcp/pkg/syncer/shared" ) //go:embed *.yaml @@ -110,13 +110,13 @@ func MakeService(name, namespace string) *corev1.Service { return service } -func MakeNetworkPolicy(name, namespace, cluster string, kubeEndpoints *corev1.EndpointSubset) *networkingv1.NetworkPolicy { +func MakeNetworkPolicy(name, namespace, tenantID string, kubeEndpoints *corev1.EndpointSubset) *networkingv1.NetworkPolicy { np := networkPolicyTemplate.DeepCopy() np.Name = name np.Namespace = namespace np.Spec.PodSelector.MatchLabels["app"] = name - np.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[workloadv1alpha1.InternalDownstreamClusterLabel] = cluster + np.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[shared.TenantIDLabel] = tenantID to := make([]networkingv1.NetworkPolicyPeer, len(kubeEndpoints.Addresses)) for i, endpoint := range kubeEndpoints.Addresses { diff --git a/pkg/syncer/spec/spec_controller.go b/pkg/syncer/spec/spec_controller.go index ce811a7a28d..3e3c8a47ce1 100644 --- a/pkg/syncer/spec/spec_controller.go +++ b/pkg/syncer/spec/spec_controller.go @@ -270,7 +270,7 @@ func NewSpecSyncer(syncerLogger logr.Logger, syncTargetClusterName logicalcluste dnsServiceLister, syncerNamespaceInformerFactory.Core().V1().Endpoints().Lister(), syncerNamespaceInformerFactory.Networking().V1().NetworkPolicies().Lister(), - syncTargetUID, syncTargetName, syncTargetKey, dnsNamespace, dnsImage) + syncTargetUID, syncTargetName, dnsNamespace, dnsImage) return &c, nil } diff --git a/pkg/syncer/spec/spec_process.go b/pkg/syncer/spec/spec_process.go index a7b52b7a724..1211b432f25 100644 --- a/pkg/syncer/spec/spec_process.go +++ b/pkg/syncer/spec/spec_process.go @@ -206,7 +206,7 @@ func (c *Controller) process(ctx context.Context, gvr schema.GroupVersionResourc } // Make sure the DNS nameserver for the current workspace is up and running - if dnsUpAndReady, err := c.dnsProcessor.EnsureDNSUpAndReady(ctx, clusterName); err != nil { + if dnsUpAndReady, err := c.dnsProcessor.EnsureDNSUpAndReady(ctx, desiredNSLocator); err != nil { logger.Error(err, "failed to check DNS nameserver is up and running (retrying)") return nil, err } else if !dnsUpAndReady && gvr == deploymentGVR { @@ -257,10 +257,16 @@ func (c *Controller) ensureDownstreamNamespaceExists(ctx context.Context, downst shared.NamespaceLocatorAnnotation: string(b), }) + desiredTenantID, err := shared.GetTenantID(desiredNSLocator) + if err != nil { + return err + } + if upstreamObj.GetLabels() != nil { newNamespace.SetLabels(map[string]string{ // TODO: this should be set once at syncer startup and propagated around everywhere. workloadv1alpha1.InternalDownstreamClusterLabel: c.syncTargetKey, + shared.TenantIDLabel: desiredTenantID, }) } diff --git a/pkg/syncer/spec/spec_process_test.go b/pkg/syncer/spec/spec_process_test.go index 4de023e2ce7..07eb51857eb 100644 --- a/pkg/syncer/spec/spec_process_test.go +++ b/pkg/syncer/spec/spec_process_test.go @@ -600,6 +600,7 @@ func TestSpecSyncerProcess(t *testing.T) { toUnstructured(t, namespace("kcp-33jbiactwhg0", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", }, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, @@ -647,6 +648,7 @@ func TestSpecSyncerProcess(t *testing.T) { toUnstructured(t, namespace("kcp-33jbiactwhg0", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", }, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, @@ -940,6 +942,7 @@ func TestSpecSyncerProcess(t *testing.T) { toUnstructured(t, namespace("kcp-33jbiactwhg0", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", }, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, @@ -1049,6 +1052,7 @@ func TestSpecSyncerProcess(t *testing.T) { toResources: []runtime.Object{ namespace("kcp-01c0zzvlqsi7n", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", }, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"path":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, From 5d0246d258b737f1a843875d22da85f08e838d7b Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Mon, 30 Jan 2023 14:57:49 -0500 Subject: [PATCH 07/12] check tenantid is set and correct (when upgrading kcp) --- pkg/syncer/spec/spec_process.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/syncer/spec/spec_process.go b/pkg/syncer/spec/spec_process.go index 1211b432f25..54c8faa8028 100644 --- a/pkg/syncer/spec/spec_process.go +++ b/pkg/syncer/spec/spec_process.go @@ -304,6 +304,13 @@ func (c *Controller) ensureDownstreamNamespaceExists(ctx context.Context, downst return fmt.Errorf("(namespace collision) namespace %s already exists, but has a different namespace locator annotation: %+v vs %+v", newNamespace.GetName(), nsLocator, desiredNSLocator) } + // Handle kcp upgrades by checking the tenant ID is set and correct + if tenantID, ok := unstrNamespace.GetLabels()[shared.TenantIDLabel]; !ok || tenantID != desiredTenantID { + unstrNamespace.GetLabels()[shared.TenantIDLabel] = desiredTenantID + _, err := namespaces.Update(ctx, unstrNamespace, metav1.UpdateOptions{}) + return err + } + return nil } From b8c26170cae45a2e5f42a8145f82535d6863abcc Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Mon, 30 Jan 2023 16:35:31 -0500 Subject: [PATCH 08/12] reenable test against fake cluster --- test/e2e/reconciler/cluster/controller_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/test/e2e/reconciler/cluster/controller_test.go b/test/e2e/reconciler/cluster/controller_test.go index ce7716fae65..d3b119b1773 100644 --- a/test/e2e/reconciler/cluster/controller_test.go +++ b/test/e2e/reconciler/cluster/controller_test.go @@ -53,11 +53,7 @@ const sourceClusterName, sinkClusterName = "source", "sink" func TestClusterController(t *testing.T) { t.Parallel() - framework.Suite(t, "transparent-multi-cluster:requires-kind") - - if len(framework.TestConfig.PClusterKubeconfig()) == 0 { - t.Skip("Test requires a pcluster") - } + framework.Suite(t, "transparent-multi-cluster") type runningServer struct { client wildwestv1alpha1client.WildwestV1alpha1Interface From fc683ba22936c8474aaa56ef49e19eba3c20c1f8 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Mon, 30 Jan 2023 16:47:51 -0500 Subject: [PATCH 09/12] better error message --- test/e2e/syncer/dns/dns_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/syncer/dns/dns_test.go b/test/e2e/syncer/dns/dns_test.go index 81669a018fe..af1a5be1a75 100644 --- a/test/e2e/syncer/dns/dns_test.go +++ b/test/e2e/syncer/dns/dns_test.go @@ -120,7 +120,7 @@ func TestDNSResolution(t *testing.T) { framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS2NS1, "ping-fully-qualified-fail", "ping: bad"), wait.ForeverTestTimeout, time.Millisecond*500, "Service name was resolved") - t.Log("Change ping-fully-qualified deployment DNS config to use workspace 2 nameserver") + t.Log("Change ping-fully-qualified deployment DNS config to use workspace 2 nameserver and check the DNS name does not resolve") dnsServices, err := downstreamKubeClient.CoreV1().Services(syncer.SyncerID).List(ctx, metav1.ListOptions{}) require.NoError(t, err) require.True(t, len(dnsServices.Items) >= 2) @@ -153,7 +153,7 @@ func TestDNSResolution(t *testing.T) { require.NoError(t, err) framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS1NS1, "ping-fully-qualified", "ping: bad"), - wait.ForeverTestTimeout, time.Millisecond*500, "Service name was still not resolved") + wait.ForeverTestTimeout, time.Millisecond*500, "Service name was resolved") } func checkLogs(ctx context.Context, t *testing.T, downstreamKubeClient *kubernetes.Clientset, downstreamNamespace, containerName, expectedPrefix string) func() (success bool, reason string) { From 1e47c2bcbed5390dff8da5c3fba4eac61cfe64e1 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Wed, 1 Feb 2023 11:58:13 -0500 Subject: [PATCH 10/12] create kubernetes endpoint in fake cluster. --- test/e2e/framework/kcp.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/e2e/framework/kcp.go b/test/e2e/framework/kcp.go index 24bc970e739..aad1b9cb62f 100644 --- a/test/e2e/framework/kcp.go +++ b/test/e2e/framework/kcp.go @@ -40,6 +40,7 @@ import ( "github.com/spf13/pflag" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -916,6 +917,23 @@ func NewFakeWorkloadServer(t *testing.T, server RunningServer, org logicalcluste return true }, wait.ForeverTestTimeout, time.Millisecond*100) + // Install the kubernetes endpoint in the default namespace. The DNS network policies reference this endpoint. + require.Eventually(t, func() bool { + _, err = kubeClient.CoreV1().Endpoints("default").Create(ctx, &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubernetes", + }, + Subsets: []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{IP: "172.19.0.2:6443"}}, + }}, + }, metav1.CreateOptions{}) + if err != nil { + t.Logf("failed to create the kubernetes endpoint: %v", err) + return false + } + return true + }, wait.ForeverTestTimeout, time.Millisecond*100) + return fakeServer } From 81870cb405c9e92a32521de5c8e5ced903dde634 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Wed, 1 Feb 2023 14:03:17 -0500 Subject: [PATCH 11/12] fix unit test --- pkg/syncer/spec/spec_process_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/syncer/spec/spec_process_test.go b/pkg/syncer/spec/spec_process_test.go index 07eb51857eb..ed08aa9127a 100644 --- a/pkg/syncer/spec/spec_process_test.go +++ b/pkg/syncer/spec/spec_process_test.go @@ -726,6 +726,7 @@ func TestSpecSyncerProcess(t *testing.T) { toResources: []runtime.Object{ namespace("kcp-33jbiactwhg0", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", }, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, @@ -775,6 +776,7 @@ func TestSpecSyncerProcess(t *testing.T) { toResources: []runtime.Object{ namespace("kcp-33jbiactwhg0", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", }, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, @@ -833,6 +835,7 @@ func TestSpecSyncerProcess(t *testing.T) { toResources: []runtime.Object{ namespace("kcp-33jbiactwhg0", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", }, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, From 656ad4db746bca128e6223adffe0b384f3f7e8b0 Mon Sep 17 00:00:00 2001 From: Lionel Villard Date: Wed, 8 Feb 2023 10:31:33 -0500 Subject: [PATCH 12/12] changes after review --- pkg/syncer/spec/spec_process.go | 16 ++--- pkg/syncer/spec/spec_process_test.go | 100 +++++++++++++++++++++++---- pkg/syncer/syncer.go | 1 - 3 files changed, 95 insertions(+), 22 deletions(-) diff --git a/pkg/syncer/spec/spec_process.go b/pkg/syncer/spec/spec_process.go index 54c8faa8028..15b789ce7de 100644 --- a/pkg/syncer/spec/spec_process.go +++ b/pkg/syncer/spec/spec_process.go @@ -262,13 +262,11 @@ func (c *Controller) ensureDownstreamNamespaceExists(ctx context.Context, downst return err } - if upstreamObj.GetLabels() != nil { - newNamespace.SetLabels(map[string]string{ - // TODO: this should be set once at syncer startup and propagated around everywhere. - workloadv1alpha1.InternalDownstreamClusterLabel: c.syncTargetKey, - shared.TenantIDLabel: desiredTenantID, - }) - } + newNamespace.SetLabels(map[string]string{ + // TODO: this should be set once at syncer startup and propagated around everywhere. + workloadv1alpha1.InternalDownstreamClusterLabel: c.syncTargetKey, + shared.TenantIDLabel: desiredTenantID, + }) namespaceLister, err := c.getDownstreamLister(namespaceGVR) if err != nil { @@ -306,7 +304,9 @@ func (c *Controller) ensureDownstreamNamespaceExists(ctx context.Context, downst // Handle kcp upgrades by checking the tenant ID is set and correct if tenantID, ok := unstrNamespace.GetLabels()[shared.TenantIDLabel]; !ok || tenantID != desiredTenantID { - unstrNamespace.GetLabels()[shared.TenantIDLabel] = desiredTenantID + labels := unstrNamespace.GetLabels() + labels[shared.TenantIDLabel] = desiredTenantID + unstrNamespace.SetLabels(labels) _, err := namespaces.Update(ctx, unstrNamespace, metav1.UpdateOptions{}) return err } diff --git a/pkg/syncer/spec/spec_process_test.go b/pkg/syncer/spec/spec_process_test.go index ed08aa9127a..da95a855708 100644 --- a/pkg/syncer/spec/spec_process_test.go +++ b/pkg/syncer/spec/spec_process_test.go @@ -1052,11 +1052,21 @@ func TestSpecSyncerProcess(t *testing.T) { "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", }, nil), gvr: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, + fromResources: []runtime.Object{ + secretWithFinalizers("foo", "test", "root:org:ws", + map[string]string{ + "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", + "something": "else"}, + nil, + []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}, + map[string][]byte{ + "a": []byte("b"), + }), + }, toResources: []runtime.Object{ namespace("kcp-01c0zzvlqsi7n", "", map[string]string{ "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", - }, + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7"}, map[string]string{ "kcp.io/namespace-locator": `{"syncTarget":{"path":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, }), @@ -1073,17 +1083,6 @@ func TestSpecSyncerProcess(t *testing.T) { service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), }, - fromResources: []runtime.Object{ - secretWithFinalizers("foo", "test", "root:org:ws", - map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - "something": "else"}, - nil, - []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}, - map[string][]byte{ - "a": []byte("b"), - }), - }, resourceToProcessLogicalClusterName: "root:org:ws", resourceToProcessName: "foo", syncTargetName: "us-west1", @@ -1098,6 +1097,66 @@ func TestSpecSyncerProcess(t *testing.T) { ), }, }, + "tenant label is added to existing downstream namespaces": { + upstreamLogicalCluster: "root:org:ws", + fromNamespace: namespace("test", "root:org:ws", map[string]string{ + "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", + }, nil), + gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + fromResources: []runtime.Object{ + secret("default-token-abc", "test", "root:org:ws", + map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, + map[string]string{"kubernetes.io/service-account.name": "default"}, + map[string][]byte{ + "token": []byte("token"), + "namespace": []byte("namespace"), + }), + deployment("theDeployment", "test", "root:org:ws", map[string]string{ + "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", + }, nil, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), + }, + toResources: []runtime.Object{ + namespace("kcp-01c0zzvlqsi7n", "", map[string]string{ + "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}, + map[string]string{ + "kcp.io/namespace-locator": `{"syncTarget":{"path":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, + }), + dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), + dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), + dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), + dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), + service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), + endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), + }, + resourceToProcessLogicalClusterName: "root:org:ws", + resourceToProcessName: "theDeployment", + syncTargetName: "us-west1", + + expectActionsOnFrom: []kcptesting.Action{}, + expectActionsOnTo: []clienttesting.Action{ + updateNamespaceAction("kcp-01c0zzvlqsi7n", + toUnstructured(t, namespace("kcp-01c0zzvlqsi7n", "", map[string]string{ + "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7"}, + map[string]string{ + "kcp.io/namespace-locator": `{"syncTarget":{"path":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, + }))), + patchDeploymentSingleClusterAction( + "theDeployment", + "kcp-01c0zzvlqsi7n", + types.ApplyPatchType, + toJson(t, + changeUnstructured( + toUnstructured(t, deployment("theDeployment", "kcp-01c0zzvlqsi7n", "", map[string]string{ + "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", + }, nil, nil)), + setNestedField(map[string]interface{}{}, "status"), + setPodSpec("spec", "template", "spec"), + ), + ), + ), + }, + }, } for name, tc := range tests { @@ -1489,3 +1548,18 @@ func patchSecretSingleClusterAction(name, namespace string, patchType types.Patc Patch: patch, } } + +func namespaceAction(verb, namespace string, subresources ...string) clienttesting.ActionImpl { + return clienttesting.ActionImpl{ + Verb: verb, + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, + Subresource: strings.Join(subresources, "/"), + } +} + +func updateNamespaceAction(namespace string, object runtime.Object, subresources ...string) clienttesting.UpdateActionImpl { + return clienttesting.UpdateActionImpl{ + ActionImpl: namespaceAction("update", namespace, subresources...), + Object: object, + } +} diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go index c336e7778f4..4085d4e54c8 100644 --- a/pkg/syncer/syncer.go +++ b/pkg/syncer/syncer.go @@ -200,7 +200,6 @@ func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, i syncerNamespaceInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(downstreamKubeClient, resyncPeriod, kubernetesinformers.WithNamespace(syncerNamespace)) downstreamSyncerDiscoveryClient := discovery.NewDiscoveryClient(downstreamKubeClient.RESTClient()) - syncTargetGVRSource, err := resourcesync.NewSyncTargetGVRSource( logger, downstreamSyncerDiscoveryClient,