Skip to content

Commit af24321

Browse files
Merge pull request #2369 from kevinrizza/bump-1.33.3
OCPBUGS-59533: Rebase v1.33.3 to master/4.20
2 parents 36f2a99 + dd5e299 commit af24321

File tree

13 files changed

+494
-57
lines changed

13 files changed

+494
-57
lines changed

CHANGELOG/CHANGELOG-1.33.md

Lines changed: 186 additions & 48 deletions
Large diffs are not rendered by default.

cmd/kubeadm/app/util/etcd/etcd.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -576,6 +576,15 @@ func (c *Client) MemberPromote(learnerID uint64) error {
576576
ctx, cancel := context.WithTimeout(context.Background(), etcdTimeout)
577577
defer cancel()
578578

579+
isLearner, err := c.isLearner(learnerID)
580+
if err != nil {
581+
return false, err
582+
}
583+
if !isLearner {
584+
klog.V(1).Infof("[etcd] Member %s was already promoted.", strconv.FormatUint(learnerID, 16))
585+
return true, nil
586+
}
587+
579588
_, err = cli.MemberPromote(ctx, learnerID)
580589
if err == nil {
581590
klog.V(1).Infof("[etcd] The learner was promoted as a voting member: %s", strconv.FormatUint(learnerID, 16))

openshift-hack/images/hyperkube/Dockerfile.rhel

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@ COPY --from=builder /tmp/build/* /usr/bin/
1414
LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \
1515
io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \
1616
io.openshift.tags="openshift,hyperkube" \
17-
io.openshift.build.versions="kubernetes=1.33.2"
17+
io.openshift.build.versions="kubernetes=1.33.3"

pkg/controller/job/job_controller.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -537,6 +537,12 @@ func (jm *Controller) deleteJob(logger klog.Logger, obj interface{}) {
537537
}
538538
}
539539
jm.enqueueLabelSelector(jobObj)
540+
541+
key := cache.MetaObjectToName(jobObj).String()
542+
err := jm.podBackoffStore.removeBackoffRecord(key)
543+
if err != nil {
544+
utilruntime.HandleError(fmt.Errorf("error removing backoff record %w", err))
545+
}
540546
}
541547

542548
func (jm *Controller) enqueueLabelSelector(jobObj *batch.Job) {

pkg/kubelet/images/image_gc_manager.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -521,7 +521,10 @@ func (im *realImageGCManager) freeImage(ctx context.Context, image evictionInfo,
521521
if isRuntimeClassInImageCriAPIEnabled {
522522
imageKey = getImageTuple(image.id, image.runtimeHandlerUsedToPullImage)
523523
}
524+
525+
im.imageRecordsLock.Lock()
524526
delete(im.imageRecords, imageKey)
527+
im.imageRecordsLock.Unlock()
525528

526529
metrics.ImageGarbageCollectedTotal.WithLabelValues(reason).Inc()
527530
return err

pkg/kubelet/kuberuntime/kuberuntime_container_linux.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
137137
// If pod has exclusive cpu and the container in question has integer cpu requests
138138
// the cfs quota will not be enforced
139139
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.ContainerHasExclusiveCPUs(pod, container)
140-
klog.V(2).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
140+
klog.V(5).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
141141
lcr := m.calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit, disableCPUQuota)
142142

143143
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,

pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,8 @@ func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runt
5959

6060
// If pod has exclusive cpu the sandbox will not have cfs quote enforced
6161
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod)
62-
klog.V(2).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
6362

63+
klog.V(5).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
6464
return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory(), disableCPUQuota)
6565
}
6666

pkg/registry/batch/job/strategy.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -379,6 +379,7 @@ func getStatusValidationOptions(newJob, oldJob *batch.Job) batchvalidation.JobSt
379379
isUncountedTerminatedPodsChanged := !apiequality.Semantic.DeepEqual(oldJob.Status.UncountedTerminatedPods, newJob.Status.UncountedTerminatedPods)
380380
isReadyChanged := !ptr.Equal(oldJob.Status.Ready, newJob.Status.Ready)
381381
isTerminatingChanged := !ptr.Equal(oldJob.Status.Terminating, newJob.Status.Terminating)
382+
isSuspendedWithZeroCompletions := ptr.Equal(newJob.Spec.Suspend, ptr.To(true)) && ptr.Equal(newJob.Spec.Completions, ptr.To[int32](0))
382383

383384
return batchvalidation.JobStatusValidationOptions{
384385
// We allow to decrease the counter for succeeded pods for jobs which
@@ -394,7 +395,7 @@ func getStatusValidationOptions(newJob, oldJob *batch.Job) batchvalidation.JobSt
394395
RejectFailedJobWithoutFailureTarget: isJobFailedChanged || isFailedIndexesChanged,
395396
RejectCompleteJobWithoutSuccessCriteriaMet: isJobCompleteChanged || isJobSuccessCriteriaMetChanged,
396397
RejectFinishedJobWithActivePods: isJobFinishedChanged || isActiveChanged,
397-
RejectFinishedJobWithoutStartTime: isJobFinishedChanged || isStartTimeChanged,
398+
RejectFinishedJobWithoutStartTime: (isJobFinishedChanged || isStartTimeChanged) && !isSuspendedWithZeroCompletions,
398399
RejectFinishedJobWithUncountedTerminatedPods: isJobFinishedChanged || isUncountedTerminatedPodsChanged,
399400
RejectStartTimeUpdateForUnsuspendedJob: isStartTimeChanged,
400401
RejectCompletionTimeBeforeStartTime: isStartTimeChanged || isCompletionTimeChanged,

pkg/registry/batch/job/strategy_test.go

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3535,6 +3535,36 @@ func TestStatusStrategy_ValidateUpdate(t *testing.T) {
35353535
{Type: field.ErrorTypeInvalid, Field: "status.ready"},
35363536
},
35373537
},
3538+
"valid transition to Complete for suspended Job with completions=0; without startTime": {
3539+
enableJobManagedBy: true,
3540+
job: &batch.Job{
3541+
ObjectMeta: validObjectMeta,
3542+
Spec: batch.JobSpec{
3543+
Completions: ptr.To[int32](0),
3544+
Suspend: ptr.To(true),
3545+
},
3546+
},
3547+
newJob: &batch.Job{
3548+
ObjectMeta: validObjectMeta,
3549+
Spec: batch.JobSpec{
3550+
Completions: ptr.To[int32](0),
3551+
Suspend: ptr.To(true),
3552+
},
3553+
Status: batch.JobStatus{
3554+
CompletionTime: &now,
3555+
Conditions: []batch.JobCondition{
3556+
{
3557+
Type: batch.JobSuccessCriteriaMet,
3558+
Status: api.ConditionTrue,
3559+
},
3560+
{
3561+
Type: batch.JobComplete,
3562+
Status: api.ConditionTrue,
3563+
},
3564+
},
3565+
},
3566+
},
3567+
},
35383568
}
35393569
for name, tc := range cases {
35403570
t.Run(name, func(t *testing.T) {

staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -662,6 +662,7 @@ func TestMatchExactResourceVersionFallback(t *testing.T) {
662662
}
663663
for _, tc := range tcs {
664664
t.Run(tc.name, func(t *testing.T) {
665+
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ListFromCacheSnapshot, true)
665666
backingStorage := &dummyStorage{}
666667
expectStoreRequests := 0
667668
backingStorage.getListFn = func(_ context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
@@ -759,6 +760,125 @@ func TestGetListNonRecursiveCacheBypass(t *testing.T) {
759760
}
760761
}
761762

763+
func TestGetListNonRecursiveCacheWithConsistentListFromCache(t *testing.T) {
764+
// Set feature gates once at the beginning since we only care about ConsistentListFromCache=true and ListFromCacheSnapshot=false
765+
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentListFromCache, true)
766+
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ListFromCacheSnapshot, false)
767+
forceRequestWatchProgressSupport(t)
768+
769+
tests := []struct {
770+
name string
771+
consistentListFromCache bool
772+
expectGetListCallCount int
773+
expectGetCurrentRV bool
774+
injectRVError bool
775+
expectedError error
776+
}{
777+
{
778+
name: "ConsistentListFromCache enabled - served from cache",
779+
consistentListFromCache: true,
780+
expectGetListCallCount: 1,
781+
expectGetCurrentRV: true,
782+
injectRVError: false,
783+
expectedError: nil,
784+
},
785+
}
786+
787+
for _, tc := range tests {
788+
t.Run(tc.name, func(t *testing.T) {
789+
var getListCount, getCurrentRVCount int
790+
backingStorage := &dummyStorage{}
791+
792+
backingStorage.getListFn = func(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
793+
getListCount++
794+
if tc.injectRVError {
795+
return errDummy
796+
}
797+
podList := listObj.(*example.PodList)
798+
podList.ListMeta = metav1.ListMeta{ResourceVersion: "100"}
799+
return nil
800+
}
801+
802+
backingStorage.getRVFn = func(ctx context.Context) (uint64, error) {
803+
getCurrentRVCount++
804+
rv := uint64(100)
805+
err := error(nil)
806+
if tc.injectRVError {
807+
err = errDummy
808+
return 0, err
809+
}
810+
return rv, nil
811+
}
812+
813+
cacher, v, err := newTestCacher(backingStorage)
814+
if err != nil {
815+
t.Fatalf("Couldn't create cacher: %v", err)
816+
}
817+
defer cacher.Stop()
818+
819+
// Wait for cacher to be ready before injecting errors
820+
if err := cacher.ready.wait(context.Background()); err != nil {
821+
t.Fatalf("unexpected error waiting for the cache to be ready: %v", err)
822+
}
823+
delegator := NewCacheDelegator(cacher, backingStorage)
824+
defer delegator.Stop()
825+
826+
// Setup test object
827+
key := "pods/ns"
828+
input := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "ns"}}
829+
if err := v.UpdateObject(input, 100); err != nil {
830+
t.Fatalf("Unexpected error: %v", err)
831+
}
832+
833+
// Put object into the store
834+
if err := cacher.watchCache.Add(input); err != nil {
835+
t.Fatalf("Unexpected error: %v", err)
836+
}
837+
838+
pred := storage.SelectionPredicate{
839+
Label: labels.Everything(),
840+
Field: fields.Everything(),
841+
Limit: 500,
842+
}
843+
result := &example.PodList{}
844+
845+
// Make the list call with empty RV - delegator will get current RV and use it
846+
err = delegator.GetList(context.TODO(), key, storage.ListOptions{
847+
ResourceVersion: "",
848+
Predicate: pred,
849+
Recursive: true,
850+
}, result)
851+
852+
// Verify error matches expectation
853+
if !errors.Is(err, tc.expectedError) {
854+
t.Errorf("Expected error %v, got: %v", tc.expectedError, err)
855+
}
856+
857+
// Verify the correct storage method was called
858+
if getListCount != tc.expectGetListCallCount {
859+
t.Errorf("Expected GetList to be called %d times, but it was called %d times", tc.expectGetListCallCount, getListCount)
860+
}
861+
if tc.expectGetCurrentRV && getCurrentRVCount == 0 {
862+
t.Error("Expected GetCurrentResourceVersion to be called, but it wasn't")
863+
}
864+
if !tc.expectGetCurrentRV && getCurrentRVCount > 0 {
865+
t.Errorf("Expected GetCurrentResourceVersion not to be called, but it was called %d times", getCurrentRVCount)
866+
}
867+
868+
// For successful cache reads, verify the resource version
869+
if err == nil {
870+
resultRV, err := cacher.versioner.ParseResourceVersion(result.ResourceVersion)
871+
if err != nil {
872+
t.Fatalf("Failed to parse result resource version: %v", err)
873+
}
874+
expectedRV := uint64(100)
875+
if resultRV != expectedRV {
876+
t.Errorf("Expected RV %d but got %d", expectedRV, resultRV)
877+
}
878+
}
879+
})
880+
}
881+
}
762882
func TestGetCacheBypass(t *testing.T) {
763883
backingStorage := &dummyStorage{}
764884
cacher, _, err := newTestCacher(backingStorage)

0 commit comments

Comments
 (0)