Skip to content

Server Side Apply - PodSelector empty dict not applied #92913

@skuffe

Description

@skuffe

What happened:
Applying (using SSA) a networking.k8s.io/v1 NetworkPolicy with an empty PodSelector, the empty PodSelector does not get applied and the managedFields contains no field reference for the podSelector field to indicate that my controller owns the field.

This happens when using a typed client from controller-runtime (operator-sdk), a dynamic client from client-go but does not seem to happen with kubectl apply --server-side

This poses a problem with a NetworkPolicy, where an empty PodSelector means "all pods". So it appears there is no way for my controller to enforce this field using SSA as it simply gets discarded.

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  creationTimestamp: "2020-07-08T13:27:22Z"
  generation: 3
  managedFields:
  - apiVersion: networking.k8s.io/v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:ownerReferences:
          k:{"uid":"fbcf0aeb-cb55-4c33-a2d8-f8bc4d757b6a"}:
            .: {}
            f:apiVersion: {}
            f:blockOwnerDeletion: {}
            f:controller: {}
            f:kind: {}
            f:name: {}
            f:uid: {}
      f:spec:
        f:policyTypes: {}
    manager: sample-controller
    operation: Apply
    time: "2020-07-08T13:49:51Z"
  - apiVersion: networking.k8s.io/v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:ownerReferences:
          k:{"uid":"fbcf0aeb-cb55-4c33-a2d8-f8bc4d757b6a"}:
            .: {}
            f:apiVersion: {}
            f:blockOwnerDeletion: {}
            f:controller: {}
            f:kind: {}
            f:name: {}
            f:uid: {}
      f:spec:
        f:policyTypes: {}
    manager: kubectl
    operation: Apply
    time: "2020-07-08T14:01:34Z"
  name: default-deny
  namespace: example-dcsystem-example-dcapplication-build
  ownerReferences:
  - apiVersion: dc-operator.dac.local/v1alpha1
    blockOwnerDeletion: true
    controller: true
    kind: DCApplication
    name: example-dcapplication
    uid: fbcf0aeb-cb55-4c33-a2d8-f8bc4d757b6a
  resourceVersion: "91074"
  selfLink: /apis/networking.k8s.io/v1/namespaces/example-dcsystem-example-dcapplication-build/networkpolicies/default-deny
  uid: b88591a3-a2b6-4cd1-9277-e504fc92c127
spec:
  podSelector: {}
  policyTypes:
  - Ingress

To elaborate a bit further, say i change (using kubectl) the spec.podSelector to narrow the policy using e.g. matchLabels, and then let my controller reconcile the NetworkPolicy, the podSelector remains unchanged.

If i do the same change again, and then using kubectl apply --server-side apply a patch it does seem to correctly change the podSelector to empty dict as expected.

What you expected to happen:
I expect the podSelector to get applied

How to reproduce it (as minimally and precisely as possible):
go mod init
go get go get k8s.io/[email protected]
create main.go with the following contents

package main

import (
	"bufio"
	"context"
	"encoding/json"
	"flag"
	"fmt"
	"os"
	"path/filepath"

	networkingv1 "k8s.io/api/networking/v1"
	"k8s.io/apimachinery/pkg/api/meta"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
	"k8s.io/apimachinery/pkg/types"
	"k8s.io/client-go/discovery"
	memory "k8s.io/client-go/discovery/cached"
	"k8s.io/client-go/dynamic"
	"k8s.io/client-go/kubernetes/scheme"
	"k8s.io/client-go/restmapper"
	"k8s.io/client-go/tools/clientcmd"
	"k8s.io/client-go/util/homedir"
)

func main() {
	var kubeconfig *string
	if home := homedir.HomeDir(); home != "" {
		kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
	} else {
		kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
	}
	flag.Parse()

	namespace := "default"

	config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
	if err != nil {
		panic(err)
	}

	np := &networkingv1.NetworkPolicy{
		TypeMeta: metav1.TypeMeta{
			Kind:       "NetworkPolicy",
			APIVersion: "networking.k8s.io/v1",
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      "default-deny",
			Namespace: namespace},
		Spec: networkingv1.NetworkPolicySpec{
			PodSelector: metav1.LabelSelector{},
			PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
		},
	}

	unstructured := &unstructured.Unstructured{}
	if err := scheme.Scheme.Convert(np, unstructured, nil); err != nil {
		panic(err)
	}

	name := unstructured.GetName()

	fmt.Sprintln(name)

	// 1. Prepare a RESTMapper to find GVR
	dc, err := discovery.NewDiscoveryClientForConfig(config)
	if err != nil {
		panic(err)
	}
	mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc))

	// 2. Prepare the dynamic client
	dyn, err := dynamic.NewForConfig(config)
	if err != nil {
		panic(err)
	}

	gvk := np.GroupVersionKind()

	// 4. Find GVR
	mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
	if err != nil {
		panic(err)
	}

	// 5. Obtain REST interface for the GVR
	var dr dynamic.ResourceInterface
	if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
		// namespaced resources should specify the namespace
		dr = dyn.Resource(mapping.Resource).Namespace(unstructured.GetNamespace())
	} else {
		// for cluster-wide resources
		dr = dyn.Resource(mapping.Resource)
	}

	// 6. Marshal object into JSON
	data, err := json.Marshal(unstructured)
	if err != nil {
		panic(err)
	}

	fmt.Println(string(data))

	// 7. Create or Update the object with SSA
	//     types.ApplyPatchType indicates SSA.
	//     FieldManager specifies the field owner ID.
	asd, err := dr.Patch(context.TODO(), unstructured.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{
		FieldManager: "sample-sample-123", Force: newTrue(),
	})

	data, err = json.Marshal(asd)
	if err != nil {
		panic(err)
	}

	fmt.Println(string(data))

}

func newTrue() *bool {
	b := true
	return &b
}

go run main.go // this should create the networkpolicy
now edit the networkpolicy kubectl edit networkpolicy default-deny and set the podSelector to something else like;
podSelector:
matchLabels:
test: "123"

now go run main.go again, and observe no change in podSelector

Environment:

  • Kubernetes version (use kubectl version):
    Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.3", GitCommit:"2e7996e3e2712684bc73f0dec0200d64eec7fe40", GitTreeState:"clean", BuildDate:"2020-05-20T12:52:00Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"}
    Server Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.2", GitCommit:"52c56ce7a8272c798dbc29846288d7cd9fbae032", GitTreeState:"clean", BuildDate:"2020-04-30T20:19:45Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"}
  • Cloud provider or hardware configuration:
    WSL2, KIND single node cluster
  • OS (e.g: cat /etc/os-release):
    Ubuntu 20.04 LTS
  • Kernel (e.g. uname -a):
    Linux 4.19.84-microsoft-standard

Metadata

Metadata

Labels

kind/bugCategorizes issue or PR as related to a bug.wg/api-expressionCategorizes an issue or PR as relevant to WG API Expression.

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions