Skip to content

Commit 9f8e886

Browse files
UPSTREAM: <drop>: use global pullsecret for image pulls
Signed-off-by: Ankita Thomas <[email protected]>
1 parent 0f9d642 commit 9f8e886

File tree

9 files changed

+73
-49
lines changed

9 files changed

+73
-49
lines changed

cmd/manager/main.go

Lines changed: 48 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,17 @@ import (
2323
"net/http"
2424
"os"
2525
"path/filepath"
26+
"strings"
2627
"time"
2728

2829
"github.com/spf13/pflag"
2930
"go.uber.org/zap/zapcore"
31+
corev1 "k8s.io/api/core/v1"
3032
apiextensionsv1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
33+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
34+
"k8s.io/apimachinery/pkg/fields"
3135
k8slabels "k8s.io/apimachinery/pkg/labels"
32-
"k8s.io/apimachinery/pkg/selection"
36+
k8stypes "k8s.io/apimachinery/pkg/types"
3337
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
3438
_ "k8s.io/client-go/plugin/pkg/client/auth"
3539
ctrl "sigs.k8s.io/controller-runtime"
@@ -52,7 +56,6 @@ import (
5256
"github.com/operator-framework/operator-controller/internal/contentmanager"
5357
"github.com/operator-framework/operator-controller/internal/controllers"
5458
"github.com/operator-framework/operator-controller/internal/httputil"
55-
"github.com/operator-framework/operator-controller/internal/labels"
5659
"github.com/operator-framework/operator-controller/internal/resolve"
5760
"github.com/operator-framework/operator-controller/internal/rukpak/preflights/crdupgradesafety"
5861
"github.com/operator-framework/operator-controller/internal/rukpak/source"
@@ -87,6 +90,7 @@ func main() {
8790
operatorControllerVersion bool
8891
systemNamespace string
8992
caCertDir string
93+
globalPullSecret string
9094
)
9195
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
9296
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
@@ -97,6 +101,7 @@ func main() {
97101
flag.StringVar(&cachePath, "cache-path", "/var/cache", "The local directory path used for filesystem based caching")
98102
flag.BoolVar(&operatorControllerVersion, "version", false, "Prints operator-controller version information")
99103
flag.StringVar(&systemNamespace, "system-namespace", "", "Configures the namespace that gets used to deploy system resources.")
104+
flag.StringVar(&globalPullSecret, "global-pull-secret", "", "The <namespace>/<name> of the global pull secret that is going to be used to pull bundle images.")
100105
opts := zap.Options{
101106
Development: true,
102107
TimeEncoder: zapcore.RFC3339NanoTimeEncoder,
@@ -115,16 +120,42 @@ func main() {
115120
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts), zap.StacktraceLevel(zapcore.DPanicLevel)))
116121
setupLog.Info("starting up the controller", "version info", version.String())
117122

123+
var globalPullSecretKey *k8stypes.NamespacedName
124+
if globalPullSecret != "" {
125+
secretParts := strings.Split(globalPullSecret, "/")
126+
if len(secretParts) != 2 {
127+
setupLog.Error(fmt.Errorf("incorrect number of components"), "value of global-pull-secret should be of the format <namespace>/<name>")
128+
os.Exit(1)
129+
}
130+
globalPullSecretKey = &k8stypes.NamespacedName{Name: secretParts[1], Namespace: secretParts[0]}
131+
}
132+
118133
if systemNamespace == "" {
119134
systemNamespace = podNamespace()
120135
}
121136

122-
dependentRequirement, err := k8slabels.NewRequirement(labels.OwnerKindKey, selection.In, []string{ocv1alpha1.ClusterExtensionKind})
123-
if err != nil {
124-
setupLog.Error(err, "unable to create dependent label selector for cache")
125-
os.Exit(1)
137+
cacheOptions := crcache.Options{
138+
ByObject: map[client.Object]crcache.ByObject{
139+
&ocv1alpha1.ClusterExtension{}: {Label: k8slabels.Everything()},
140+
&catalogd.ClusterCatalog{}: {Label: k8slabels.Everything()},
141+
},
142+
DefaultNamespaces: map[string]crcache.Config{
143+
systemNamespace: {LabelSelector: k8slabels.Everything()},
144+
},
145+
DefaultLabelSelector: k8slabels.Nothing(),
146+
}
147+
if globalPullSecretKey != nil {
148+
cacheOptions.ByObject[&corev1.Secret{}] = crcache.ByObject{
149+
Namespaces: map[string]crcache.Config{
150+
globalPullSecretKey.Namespace: {
151+
LabelSelector: k8slabels.Everything(),
152+
FieldSelector: fields.SelectorFromSet(map[string]string{
153+
"metadata.name": globalPullSecretKey.Name,
154+
}),
155+
},
156+
},
157+
}
126158
}
127-
dependentSelector := k8slabels.NewSelector().Add(*dependentRequirement)
128159

129160
setupLog.Info("set up manager")
130161
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
@@ -133,16 +164,7 @@ func main() {
133164
HealthProbeBindAddress: probeAddr,
134165
LeaderElection: enableLeaderElection,
135166
LeaderElectionID: "9c4404e7.operatorframework.io",
136-
Cache: crcache.Options{
137-
ByObject: map[client.Object]crcache.ByObject{
138-
&ocv1alpha1.ClusterExtension{}: {Label: k8slabels.Everything()},
139-
&catalogd.ClusterCatalog{}: {Label: k8slabels.Everything()},
140-
},
141-
DefaultNamespaces: map[string]crcache.Config{
142-
systemNamespace: {LabelSelector: k8slabels.Everything()},
143-
},
144-
DefaultLabelSelector: dependentSelector,
145-
},
167+
Cache: cacheOptions,
146168
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
147169
// when the Manager ends. This requires the binary to immediately end when the
148170
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
@@ -200,6 +222,15 @@ func main() {
200222
AuthNamespace: systemNamespace,
201223
CertPoolWatcher: certPoolWatcher,
202224
}
225+
if globalPullSecretKey != nil {
226+
unpacker.PullSecretFetcher = func(ctx context.Context) ([]corev1.Secret, error) {
227+
pullSecret, err := coreClient.Secrets(globalPullSecretKey.Namespace).Get(ctx, globalPullSecretKey.Name, metav1.GetOptions{})
228+
if err != nil {
229+
return nil, err
230+
}
231+
return []corev1.Secret{*pullSecret}, err
232+
}
233+
}
203234

204235
clusterExtensionFinalizers := crfinalizer.NewFinalizers()
205236
domain := ocv1alpha1.GroupVersion.Group

internal/rukpak/source/image_registry.go

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import (
1717
gcrkube "github.com/google/go-containerregistry/pkg/authn/kubernetes"
1818
"github.com/google/go-containerregistry/pkg/name"
1919
"github.com/google/go-containerregistry/pkg/v1/remote"
20+
corev1 "k8s.io/api/core/v1"
2021
apimacherrors "k8s.io/apimachinery/pkg/util/errors"
2122
"sigs.k8s.io/controller-runtime/pkg/log"
2223

@@ -52,11 +53,14 @@ func NewUnrecoverable(err error) *Unrecoverable {
5253
// TODO: Make asynchronous
5354

5455
type ImageRegistry struct {
55-
BaseCachePath string
56-
AuthNamespace string
57-
CertPoolWatcher *httputil.CertPoolWatcher
56+
BaseCachePath string
57+
AuthNamespace string
58+
CertPoolWatcher *httputil.CertPoolWatcher
59+
PullSecretFetcher PullSecretFetcher
5860
}
5961

62+
type PullSecretFetcher func(ctx context.Context) ([]corev1.Secret, error)
63+
6064
func (i *ImageRegistry) Unpack(ctx context.Context, bundle *BundleSource) (*Result, error) {
6165
l := log.FromContext(ctx)
6266
if bundle.Type != SourceTypeImage {
@@ -119,6 +123,20 @@ func (i *ImageRegistry) Unpack(ctx context.Context, bundle *BundleSource) (*Resu
119123
}
120124
}
121125

126+
if i.PullSecretFetcher != nil {
127+
pullSecrets, err := i.PullSecretFetcher(ctx)
128+
if err != nil {
129+
l.V(1).Error(err, "failed to fetch global pullsecret, attempting unauthenticated image pull")
130+
} else {
131+
pullSecretAuth, err := gcrkube.NewFromPullSecrets(ctx, pullSecrets)
132+
if err != nil {
133+
l.V(1).Error(err, "failed to parse global pullsecret, attempting unauthenticated image pull")
134+
} else {
135+
remoteOpts = append(remoteOpts, remote.WithAuthFromKeychain(pullSecretAuth))
136+
}
137+
}
138+
}
139+
122140
// always fetch the hash
123141
imgDesc, err := remote.Head(imgRef, remoteOpts...)
124142
if err != nil {

openshift/generate-manifests.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ for container_name in "${!IMAGE_MAPPINGS[@]}"; do
6868
placeholder="${IMAGE_MAPPINGS[$container_name]}"
6969
$YQ -i "(select(.kind == \"Deployment\")|.spec.template.spec.containers[]|select(.name==\"$container_name\")|.image) = \"$placeholder\"" "$TMP_KUSTOMIZE_OUTPUT"
7070
$YQ -i 'select(.kind == "Deployment").spec.template.metadata.annotations += {"target.workload.openshift.io/management": "{\"effect\": \"PreferredDuringScheduling\"}"}' "$TMP_KUSTOMIZE_OUTPUT"
71-
$YQ -i 'select(.kind == "Deployment").spec.template.metadata.annotations += {"openshift.io/required-scc": "privileged"}' "$TMP_KUSTOMIZE_OUTPUT"
71+
$YQ -i 'select(.kind == "Deployment").spec.template.metadata.annotations += {"openshift.io/required-scc": "restricted-v2"}' "$TMP_KUSTOMIZE_OUTPUT"
7272
$YQ -i 'select(.kind == "Deployment").spec.template.spec += {"priorityClassName": "system-cluster-critical"}' "$TMP_KUSTOMIZE_OUTPUT"
7373
$YQ -i 'select(.kind == "Namespace").metadata.annotations += {"workload.openshift.io/allowed": "management"}' "$TMP_KUSTOMIZE_OUTPUT"
7474
done
@@ -127,3 +127,4 @@ cp "$TMP_MANIFEST_DIR"/* "$MANIFEST_DIR"/
127127
fi
128128
done
129129
)
130+

openshift/kustomize/overlays/openshift/olmv1-ns/kustomization.yaml

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,7 @@ resources:
88
- ../../../../../config/base/manager
99

1010
patches:
11-
- target:
12-
kind: ClusterRole
13-
name: manager-role
14-
path: patches/manager_role.yaml
1511
- target:
1612
kind: Deployment
1713
name: controller-manager
1814
path: patches/manager_deployment_ca.yaml
19-
- path: patches/manager_namespace_privileged.yaml

openshift/kustomize/overlays/openshift/olmv1-ns/patches/manager_namespace_privileged.yaml

Lines changed: 0 additions & 6 deletions
This file was deleted.

openshift/kustomize/overlays/openshift/olmv1-ns/patches/manager_role.yaml

Lines changed: 0 additions & 7 deletions
This file was deleted.

openshift/manifests/00-namespace-openshift-operator-controller.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ kind: Namespace
33
metadata:
44
labels:
55
control-plane: controller-manager
6-
pod-security.kubernetes.io/enforce: privileged
6+
pod-security.kubernetes.io/enforce: restricted
77
pod-security.kubernetes.io/enforce-version: latest
88
name: openshift-operator-controller
99
annotations:

openshift/manifests/10-clusterrole-operator-controller-manager-role.yml

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,3 @@ rules:
5353
verbs:
5454
- patch
5555
- update
56-
- apiGroups:
57-
- security.openshift.io
58-
resourceNames:
59-
- privileged
60-
resources:
61-
- securitycontextconstraints
62-
verbs:
63-
- use

openshift/manifests/20-deployment-openshift-operator-controller-operator-controller-controller-manager.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ spec:
1818
annotations:
1919
kubectl.kubernetes.io/default-container: manager
2020
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
21-
openshift.io/required-scc: privileged
21+
openshift.io/required-scc: restricted-v2
2222
labels:
2323
control-plane: controller-manager
2424
spec:

0 commit comments

Comments
 (0)