diff --git a/api/bootstrap/kubeadm/v1beta1/conversion.go b/api/bootstrap/kubeadm/v1beta1/conversion.go index d74ed59a6f9f..d005df363c52 100644 --- a/api/bootstrap/kubeadm/v1beta1/conversion.go +++ b/api/bootstrap/kubeadm/v1beta1/conversion.go @@ -75,7 +75,7 @@ func (src *KubeadmConfigSpec) ConvertTo(dst *bootstrapv1.KubeadmConfigSpec) { if dst.InitConfiguration.Timeouts == nil { dst.InitConfiguration.Timeouts = &bootstrapv1.Timeouts{} } - dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = bootstrapv1.ConvertToSeconds(src.ClusterConfiguration.APIServer.TimeoutForControlPlane) + dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = clusterv1.ConvertToSeconds(src.ClusterConfiguration.APIServer.TimeoutForControlPlane) initControlPlaneComponentHealthCheckSeconds = dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds } if (src.JoinConfiguration != nil && src.JoinConfiguration.Discovery.Timeout != nil) || initControlPlaneComponentHealthCheckSeconds != nil { @@ -87,7 +87,7 @@ func (src *KubeadmConfigSpec) ConvertTo(dst *bootstrapv1.KubeadmConfigSpec) { } dst.JoinConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = initControlPlaneComponentHealthCheckSeconds if src.JoinConfiguration != nil && src.JoinConfiguration.Discovery.Timeout != nil { - dst.JoinConfiguration.Timeouts.TLSBootstrapSeconds = bootstrapv1.ConvertToSeconds(src.JoinConfiguration.Discovery.Timeout) + dst.JoinConfiguration.Timeouts.TLSBootstrapSeconds = clusterv1.ConvertToSeconds(src.JoinConfiguration.Discovery.Timeout) } } @@ -115,7 +115,7 @@ func (dst *KubeadmConfigSpec) ConvertFrom(src *bootstrapv1.KubeadmConfigSpec) { if dst.ClusterConfiguration == nil { dst.ClusterConfiguration = &ClusterConfiguration{} } - dst.ClusterConfiguration.APIServer.TimeoutForControlPlane = bootstrapv1.ConvertFromSeconds(src.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds) + dst.ClusterConfiguration.APIServer.TimeoutForControlPlane = clusterv1.ConvertFromSeconds(src.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds) } if reflect.DeepEqual(dst.InitConfiguration, &InitConfiguration{}) { dst.InitConfiguration = nil @@ -124,7 +124,7 @@ func (dst *KubeadmConfigSpec) ConvertFrom(src *bootstrapv1.KubeadmConfigSpec) { if dst.JoinConfiguration == nil { dst.JoinConfiguration = &JoinConfiguration{} } - dst.JoinConfiguration.Discovery.Timeout = bootstrapv1.ConvertFromSeconds(src.JoinConfiguration.Timeouts.TLSBootstrapSeconds) + dst.JoinConfiguration.Discovery.Timeout = clusterv1.ConvertFromSeconds(src.JoinConfiguration.Timeouts.TLSBootstrapSeconds) } if reflect.DeepEqual(dst.JoinConfiguration, &JoinConfiguration{}) { dst.JoinConfiguration = nil @@ -225,6 +225,14 @@ func Convert_v1beta2_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions( return autoConvert_v1beta2_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in, out, s) } +func Convert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(in *bootstrapv1.BootstrapToken, out *BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTL = clusterv1.ConvertFromSeconds(in.TTLSeconds) + return nil +} + func Convert_v1beta1_APIServer_To_v1beta2_APIServer(in *APIServer, out *bootstrapv1.APIServer, s apimachineryconversion.Scope) error { // TimeoutForControlPlane has been removed in v1beta2 return autoConvert_v1beta1_APIServer_To_v1beta2_APIServer(in, out, s) @@ -296,6 +304,14 @@ func Convert_v1beta1_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in *Kube return nil } +func Convert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *bootstrapv1.BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTLSeconds = clusterv1.ConvertToSeconds(in.TTL) + return nil +} + // Implement local conversion func because conversion-gen is not aware of conversion func in other packages (see https://github.com/kubernetes/code-generator/issues/94) func Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(in *clusterv1beta1.ObjectMeta, out *clusterv1.ObjectMeta, s apimachineryconversion.Scope) error { diff --git a/api/bootstrap/kubeadm/v1beta1/conversion_test.go b/api/bootstrap/kubeadm/v1beta1/conversion_test.go index 59987ae9c1ce..6a4844c19c44 100644 --- a/api/bootstrap/kubeadm/v1beta1/conversion_test.go +++ b/api/bootstrap/kubeadm/v1beta1/conversion_test.go @@ -63,6 +63,7 @@ func KubeadmConfigFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { spokeClusterConfiguration, hubBootstrapTokenString, spokeBootstrapTokenString, + spokeBootstrapToken, hubKubeadmConfigSpec, } } @@ -75,6 +76,7 @@ func KubeadmConfigTemplateFuzzFuncs(_ runtimeserializer.CodecFactory) []interfac spokeClusterConfiguration, spokeBootstrapTokenString, hubBootstrapTokenString, + spokeBootstrapToken, hubKubeadmConfigSpec, } } @@ -153,6 +155,14 @@ func spokeAPIServer(in *APIServer, c randfill.Continue) { } } +func spokeBootstrapToken(in *BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeDiscovery(in *Discovery, c randfill.Continue) { c.FillNoCustom(in) diff --git a/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go b/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go index fc28e230cfb7..e6eb43571d8b 100644 --- a/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go +++ b/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go @@ -55,16 +55,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BootstrapTokenDiscovery)(nil), (*v1beta2.BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(a.(*BootstrapTokenDiscovery), b.(*v1beta2.BootstrapTokenDiscovery), scope) }); err != nil { @@ -450,6 +440,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*ClusterConfiguration)(nil), (*v1beta2.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ClusterConfiguration_To_v1beta2_ClusterConfiguration(a.(*ClusterConfiguration), b.(*v1beta2.ClusterConfiguration), scope) }); err != nil { @@ -495,6 +490,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.ControlPlaneComponent)(nil), (*ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(a.(*v1beta2.ControlPlaneComponent), b.(*ControlPlaneComponent), scope) }); err != nil { @@ -580,33 +580,23 @@ func Convert_v1beta2_APIServer_To_v1beta1_APIServer(in *v1beta2.APIServer, out * func autoConvert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { out.Token = (*v1beta2.BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTL requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken is an autogenerated conversion function. -func Convert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { - return autoConvert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s) -} - func autoConvert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTLSeconds requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken is an autogenerated conversion function. -func Convert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { - return autoConvert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(in, out, s) -} - func autoConvert_v1beta1_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *v1beta2.BootstrapTokenDiscovery, s conversion.Scope) error { out.Token = in.Token out.APIServerEndpoint = in.APIServerEndpoint @@ -1101,7 +1091,17 @@ func Convert_v1beta2_ImageMeta_To_v1beta1_ImageMeta(in *v1beta2.ImageMeta, out * } func autoConvert_v1beta1_InitConfiguration_To_v1beta2_InitConfiguration(in *InitConfiguration, out *v1beta2.InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]v1beta2.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]v1beta2.BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1beta1_BootstrapToken_To_v1beta2_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1beta1_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } @@ -1119,7 +1119,17 @@ func Convert_v1beta1_InitConfiguration_To_v1beta2_InitConfiguration(in *InitConf } func autoConvert_v1beta2_InitConfiguration_To_v1beta1_InitConfiguration(in *v1beta2.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1beta2_BootstrapToken_To_v1beta1_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1beta2_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } diff --git a/api/bootstrap/kubeadm/v1beta2/conversion.go b/api/bootstrap/kubeadm/v1beta2/conversion.go index 187990045729..7fc32a065449 100644 --- a/api/bootstrap/kubeadm/v1beta2/conversion.go +++ b/api/bootstrap/kubeadm/v1beta2/conversion.go @@ -17,12 +17,7 @@ limitations under the License. package v1beta2 import ( - "math" "sort" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" ) func (*KubeadmConfig) Hub() {} @@ -65,27 +60,3 @@ func ConvertFromArgs(in []Arg) map[string]string { } return args } - -// ConvertToSeconds takes *metav1.Duration and returns a *int32. -// Durations longer than MaxInt32 are capped. -// NOTE: this is a util function intended only for usage in API conversions. -func ConvertToSeconds(in *metav1.Duration) *int32 { - if in == nil { - return nil - } - seconds := math.Trunc(in.Seconds()) - if seconds > math.MaxInt32 { - return ptr.To[int32](math.MaxInt32) - } - return ptr.To(int32(seconds)) -} - -// ConvertFromSeconds takes *int32 and returns a *metav1.Duration. -// Durations longer than MaxInt32 are capped. -// NOTE: this is a util function intended only for usage in API conversions. -func ConvertFromSeconds(in *int32) *metav1.Duration { - if in == nil { - return nil - } - return ptr.To(metav1.Duration{Duration: time.Duration(*in) * time.Second}) -} diff --git a/api/bootstrap/kubeadm/v1beta2/conversion_test.go b/api/bootstrap/kubeadm/v1beta2/conversion_test.go index 48d04dbaef89..a6ff629d3c3d 100644 --- a/api/bootstrap/kubeadm/v1beta2/conversion_test.go +++ b/api/bootstrap/kubeadm/v1beta2/conversion_test.go @@ -17,13 +17,9 @@ limitations under the License. package v1beta2 import ( - "math" "testing" - "time" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" ) func TestConvertArgs(t *testing.T) { @@ -57,19 +53,3 @@ func TestConvertArgs(t *testing.T) { }, )) } - -func TestConvertSeconds(t *testing.T) { - g := NewWithT(t) - - seconds := ptr.To[int32](100) - duration := ConvertFromSeconds(seconds) - g.Expect(ConvertToSeconds(duration)).To(Equal(seconds)) - - seconds = nil - duration = ConvertFromSeconds(seconds) - g.Expect(ConvertToSeconds(duration)).To(Equal(seconds)) - - // Durations longer than MaxInt32 are capped. - duration = ptr.To(metav1.Duration{Duration: (math.MaxInt32 + 1) * time.Second}) - g.Expect(ConvertToSeconds(duration)).To(Equal(ptr.To[int32](math.MaxInt32))) -} diff --git a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go index d420c61fed67..70ff7c8ca3b9 100644 --- a/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go +++ b/api/bootstrap/kubeadm/v1beta2/kubeadm_types.go @@ -377,12 +377,13 @@ type BootstrapToken struct { // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 Description string `json:"description,omitempty"` - // ttl defines the time to live for this token. Defaults to 24h. - // Expires and TTL are mutually exclusive. + // ttlSeconds defines the time to live for this token. Defaults to 24h. + // Expires and ttlSeconds are mutually exclusive. // +optional - TTL *metav1.Duration `json:"ttl,omitempty"` + // +kubebuilder:validation:Minimum=0 + TTLSeconds *int32 `json:"ttlSeconds,omitempty"` // expires specifies the timestamp when this token expires. Defaults to being set - // dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + // dynamically at runtime based on the ttlSeconds. Expires and ttlSeconds are mutually exclusive. // +optional Expires *metav1.Time `json:"expires,omitempty"` // usages describes the ways in which this token can be used. Can by default be used diff --git a/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go b/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go index 5fab4436e7ad..c7972dd86100 100644 --- a/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go +++ b/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go @@ -86,9 +86,9 @@ func (in *BootstrapToken) DeepCopyInto(out *BootstrapToken) { *out = new(BootstrapTokenString) **out = **in } - if in.TTL != nil { - in, out := &in.TTL, &out.TTL - *out = new(metav1.Duration) + if in.TTLSeconds != nil { + in, out := &in.TTLSeconds, &out.TTLSeconds + *out = new(int32) **out = **in } if in.Expires != nil { diff --git a/api/controlplane/kubeadm/v1beta1/conversion.go b/api/controlplane/kubeadm/v1beta1/conversion.go index 8f92b159622c..152ff467772c 100644 --- a/api/controlplane/kubeadm/v1beta1/conversion.go +++ b/api/controlplane/kubeadm/v1beta1/conversion.go @@ -19,6 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apimachineryconversion "k8s.io/apimachinery/pkg/conversion" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/conversion" bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" @@ -187,6 +188,63 @@ func Convert_v1beta1_KubeadmControlPlaneStatus_To_v1beta2_KubeadmControlPlaneSta return nil } +func Convert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in *KubeadmControlPlaneMachineTemplate, out *controlplanev1.KubeadmControlPlaneMachineTemplate, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} +func Convert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate(in *controlplanev1.KubeadmControlPlaneMachineTemplate, out *KubeadmControlPlaneMachineTemplate, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_KubeadmControlPlaneTemplateMachineTemplate(in *KubeadmControlPlaneTemplateMachineTemplate, out *controlplanev1.KubeadmControlPlaneTemplateMachineTemplate, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_KubeadmControlPlaneTemplateMachineTemplate(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + +func Convert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate(in *controlplanev1.KubeadmControlPlaneTemplateMachineTemplate, out *KubeadmControlPlaneTemplateMachineTemplate, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(in *RemediationStrategy, out *controlplanev1.RemediationStrategy, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(in, out, s); err != nil { + return err + } + out.MinHealthyPeriodSeconds = clusterv1.ConvertToSeconds(in.MinHealthyPeriod) + out.RetryPeriodSeconds = ptr.Deref(clusterv1.ConvertToSeconds(&in.RetryPeriod), 0) + return nil +} + +func Convert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(in *controlplanev1.RemediationStrategy, out *RemediationStrategy, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(in, out, s); err != nil { + return err + } + out.MinHealthyPeriod = clusterv1.ConvertFromSeconds(in.MinHealthyPeriodSeconds) + out.RetryPeriod = ptr.Deref(clusterv1.ConvertFromSeconds(&in.RetryPeriodSeconds), metav1.Duration{}) + return nil +} + // Implement local conversion func because conversion-gen is not aware of conversion func in other packages (see https://github.com/kubernetes/code-generator/issues/94) func Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(in *clusterv1beta1.ObjectMeta, out *clusterv1.ObjectMeta, s apimachineryconversion.Scope) error { diff --git a/api/controlplane/kubeadm/v1beta1/conversion_test.go b/api/controlplane/kubeadm/v1beta1/conversion_test.go index 61367457f1ea..d88d18fd15a7 100644 --- a/api/controlplane/kubeadm/v1beta1/conversion_test.go +++ b/api/controlplane/kubeadm/v1beta1/conversion_test.go @@ -66,6 +66,9 @@ func KubeadmControlPlaneFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{ spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeRemediationStrategy, + spokeKubeadmControlPlaneMachineTemplate, + spokeBootstrapToken, } } @@ -78,6 +81,9 @@ func KubeadmControlPlaneTemplateFuzzFuncs(_ runtimeserializer.CodecFactory) []in spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeRemediationStrategy, + spokeKubeadmControlPlaneTemplateMachineTemplate, + spokeBootstrapToken, } } @@ -181,3 +187,48 @@ func spokeClusterConfiguration(in *bootstrapv1beta1.ClusterConfiguration, c rand in.ControlPlaneEndpoint = "" in.ClusterName = "" } + +func spokeRemediationStrategy(in *RemediationStrategy, c randfill.Continue) { + c.FillNoCustom(in) + + if in.MinHealthyPeriod != nil { + in.MinHealthyPeriod = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + in.RetryPeriod = metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second} +} + +func spokeKubeadmControlPlaneMachineTemplate(in *KubeadmControlPlaneMachineTemplate, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeKubeadmControlPlaneTemplateMachineTemplate(in *KubeadmControlPlaneTemplateMachineTemplate, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeBootstrapToken(in *bootstrapv1beta1.BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} diff --git a/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go b/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go index 272a6cbd53d4..989532c4929f 100644 --- a/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go +++ b/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go @@ -62,16 +62,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*KubeadmControlPlaneMachineTemplate)(nil), (*v1beta2.KubeadmControlPlaneMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(a.(*KubeadmControlPlaneMachineTemplate), b.(*v1beta2.KubeadmControlPlaneMachineTemplate), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.KubeadmControlPlaneMachineTemplate)(nil), (*KubeadmControlPlaneMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate(a.(*v1beta2.KubeadmControlPlaneMachineTemplate), b.(*KubeadmControlPlaneMachineTemplate), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*KubeadmControlPlaneSpec)(nil), (*v1beta2.KubeadmControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneSpec(a.(*KubeadmControlPlaneSpec), b.(*v1beta2.KubeadmControlPlaneSpec), scope) }); err != nil { @@ -102,16 +92,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*KubeadmControlPlaneTemplateMachineTemplate)(nil), (*v1beta2.KubeadmControlPlaneTemplateMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_KubeadmControlPlaneTemplateMachineTemplate(a.(*KubeadmControlPlaneTemplateMachineTemplate), b.(*v1beta2.KubeadmControlPlaneTemplateMachineTemplate), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.KubeadmControlPlaneTemplateMachineTemplate)(nil), (*KubeadmControlPlaneTemplateMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate(a.(*v1beta2.KubeadmControlPlaneTemplateMachineTemplate), b.(*KubeadmControlPlaneTemplateMachineTemplate), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*KubeadmControlPlaneTemplateResource)(nil), (*v1beta2.KubeadmControlPlaneTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_KubeadmControlPlaneTemplateResource_To_v1beta2_KubeadmControlPlaneTemplateResource(a.(*KubeadmControlPlaneTemplateResource), b.(*v1beta2.KubeadmControlPlaneTemplateResource), scope) }); err != nil { @@ -162,16 +142,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*RemediationStrategy)(nil), (*v1beta2.RemediationStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(a.(*RemediationStrategy), b.(*v1beta2.RemediationStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.RemediationStrategy)(nil), (*RemediationStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(a.(*v1beta2.RemediationStrategy), b.(*RemediationStrategy), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*RollingUpdate)(nil), (*v1beta2.RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_RollingUpdate_To_v1beta2_RollingUpdate(a.(*RollingUpdate), b.(*v1beta2.RollingUpdate), scope) }); err != nil { @@ -217,31 +187,61 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*KubeadmControlPlaneMachineTemplate)(nil), (*v1beta2.KubeadmControlPlaneMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(a.(*KubeadmControlPlaneMachineTemplate), b.(*v1beta2.KubeadmControlPlaneMachineTemplate), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*KubeadmControlPlaneStatus)(nil), (*v1beta2.KubeadmControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_KubeadmControlPlaneStatus_To_v1beta2_KubeadmControlPlaneStatus(a.(*KubeadmControlPlaneStatus), b.(*v1beta2.KubeadmControlPlaneStatus), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*KubeadmControlPlaneTemplateMachineTemplate)(nil), (*v1beta2.KubeadmControlPlaneTemplateMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_KubeadmControlPlaneTemplateMachineTemplate(a.(*KubeadmControlPlaneTemplateMachineTemplate), b.(*v1beta2.KubeadmControlPlaneTemplateMachineTemplate), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*corev1beta1.ObjectMeta)(nil), (*corev1beta2.ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(a.(*corev1beta1.ObjectMeta), b.(*corev1beta2.ObjectMeta), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*RemediationStrategy)(nil), (*v1beta2.RemediationStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(a.(*RemediationStrategy), b.(*v1beta2.RemediationStrategy), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*kubeadmv1beta2.KubeadmConfigSpec)(nil), (*kubeadmv1beta1.KubeadmConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_KubeadmConfigSpec_To_v1beta1_KubeadmConfigSpec(a.(*kubeadmv1beta2.KubeadmConfigSpec), b.(*kubeadmv1beta1.KubeadmConfigSpec), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.KubeadmControlPlaneMachineTemplate)(nil), (*KubeadmControlPlaneMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate(a.(*v1beta2.KubeadmControlPlaneMachineTemplate), b.(*KubeadmControlPlaneMachineTemplate), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.KubeadmControlPlaneStatus)(nil), (*KubeadmControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_KubeadmControlPlaneStatus_To_v1beta1_KubeadmControlPlaneStatus(a.(*v1beta2.KubeadmControlPlaneStatus), b.(*KubeadmControlPlaneStatus), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.KubeadmControlPlaneTemplateMachineTemplate)(nil), (*KubeadmControlPlaneTemplateMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate(a.(*v1beta2.KubeadmControlPlaneTemplateMachineTemplate), b.(*KubeadmControlPlaneTemplateMachineTemplate), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*corev1beta2.ObjectMeta)(nil), (*corev1beta1.ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(a.(*corev1beta2.ObjectMeta), b.(*corev1beta1.ObjectMeta), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.RemediationStrategy)(nil), (*RemediationStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(a.(*v1beta2.RemediationStrategy), b.(*RemediationStrategy), scope) + }); err != nil { + return err + } return nil } @@ -325,34 +325,24 @@ func autoConvert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmCo } out.InfrastructureRef = in.InfrastructureRef out.ReadinessGates = *(*[]corev1beta2.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate is an autogenerated conversion function. -func Convert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in *KubeadmControlPlaneMachineTemplate, out *v1beta2.KubeadmControlPlaneMachineTemplate, s conversion.Scope) error { - return autoConvert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in, out, s) -} - func autoConvert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate(in *v1beta2.KubeadmControlPlaneMachineTemplate, out *KubeadmControlPlaneMachineTemplate, s conversion.Scope) error { if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } out.InfrastructureRef = in.InfrastructureRef out.ReadinessGates = *(*[]corev1beta1.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate is an autogenerated conversion function. -func Convert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate(in *v1beta2.KubeadmControlPlaneMachineTemplate, out *KubeadmControlPlaneMachineTemplate, s conversion.Scope) error { - return autoConvert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1beta1_KubeadmControlPlaneMachineTemplate(in, out, s) -} - func autoConvert_v1beta1_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneSpec(in *KubeadmControlPlaneSpec, out *v1beta2.KubeadmControlPlaneSpec, s conversion.Scope) error { out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) out.Version = in.Version @@ -365,7 +355,15 @@ func autoConvert_v1beta1_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneS out.RolloutBefore = (*v1beta2.RolloutBefore)(unsafe.Pointer(in.RolloutBefore)) out.RolloutAfter = (*v1.Time)(unsafe.Pointer(in.RolloutAfter)) out.RolloutStrategy = (*v1beta2.RolloutStrategy)(unsafe.Pointer(in.RolloutStrategy)) - out.RemediationStrategy = (*v1beta2.RemediationStrategy)(unsafe.Pointer(in.RemediationStrategy)) + if in.RemediationStrategy != nil { + in, out := &in.RemediationStrategy, &out.RemediationStrategy + *out = new(v1beta2.RemediationStrategy) + if err := Convert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.RemediationStrategy = nil + } out.MachineNamingStrategy = (*v1beta2.MachineNamingStrategy)(unsafe.Pointer(in.MachineNamingStrategy)) return nil } @@ -387,7 +385,15 @@ func autoConvert_v1beta2_KubeadmControlPlaneSpec_To_v1beta1_KubeadmControlPlaneS out.RolloutBefore = (*RolloutBefore)(unsafe.Pointer(in.RolloutBefore)) out.RolloutAfter = (*v1.Time)(unsafe.Pointer(in.RolloutAfter)) out.RolloutStrategy = (*RolloutStrategy)(unsafe.Pointer(in.RolloutStrategy)) - out.RemediationStrategy = (*RemediationStrategy)(unsafe.Pointer(in.RemediationStrategy)) + if in.RemediationStrategy != nil { + in, out := &in.RemediationStrategy, &out.RemediationStrategy + *out = new(RemediationStrategy) + if err := Convert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.RemediationStrategy = nil + } out.MachineNamingStrategy = (*MachineNamingStrategy)(unsafe.Pointer(in.MachineNamingStrategy)) return nil } @@ -530,32 +536,22 @@ func autoConvert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_K if err := Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_KubeadmControlPlaneTemplateMachineTemplate is an autogenerated conversion function. -func Convert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_KubeadmControlPlaneTemplateMachineTemplate(in *KubeadmControlPlaneTemplateMachineTemplate, out *v1beta2.KubeadmControlPlaneTemplateMachineTemplate, s conversion.Scope) error { - return autoConvert_v1beta1_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta2_KubeadmControlPlaneTemplateMachineTemplate(in, out, s) -} - func autoConvert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate(in *v1beta2.KubeadmControlPlaneTemplateMachineTemplate, out *KubeadmControlPlaneTemplateMachineTemplate, s conversion.Scope) error { if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate is an autogenerated conversion function. -func Convert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate(in *v1beta2.KubeadmControlPlaneTemplateMachineTemplate, out *KubeadmControlPlaneTemplateMachineTemplate, s conversion.Scope) error { - return autoConvert_v1beta2_KubeadmControlPlaneTemplateMachineTemplate_To_v1beta1_KubeadmControlPlaneTemplateMachineTemplate(in, out, s) -} - func autoConvert_v1beta1_KubeadmControlPlaneTemplateResource_To_v1beta2_KubeadmControlPlaneTemplateResource(in *KubeadmControlPlaneTemplateResource, out *v1beta2.KubeadmControlPlaneTemplateResource, s conversion.Scope) error { if err := Convert_v1beta1_ObjectMeta_To_v1beta2_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -602,7 +598,15 @@ func autoConvert_v1beta1_KubeadmControlPlaneTemplateResourceSpec_To_v1beta2_Kube out.RolloutBefore = (*v1beta2.RolloutBefore)(unsafe.Pointer(in.RolloutBefore)) out.RolloutAfter = (*v1.Time)(unsafe.Pointer(in.RolloutAfter)) out.RolloutStrategy = (*v1beta2.RolloutStrategy)(unsafe.Pointer(in.RolloutStrategy)) - out.RemediationStrategy = (*v1beta2.RemediationStrategy)(unsafe.Pointer(in.RemediationStrategy)) + if in.RemediationStrategy != nil { + in, out := &in.RemediationStrategy, &out.RemediationStrategy + *out = new(v1beta2.RemediationStrategy) + if err := Convert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.RemediationStrategy = nil + } out.MachineNamingStrategy = (*v1beta2.MachineNamingStrategy)(unsafe.Pointer(in.MachineNamingStrategy)) return nil } @@ -628,7 +632,15 @@ func autoConvert_v1beta2_KubeadmControlPlaneTemplateResourceSpec_To_v1beta1_Kube out.RolloutBefore = (*RolloutBefore)(unsafe.Pointer(in.RolloutBefore)) out.RolloutAfter = (*v1.Time)(unsafe.Pointer(in.RolloutAfter)) out.RolloutStrategy = (*RolloutStrategy)(unsafe.Pointer(in.RolloutStrategy)) - out.RemediationStrategy = (*RemediationStrategy)(unsafe.Pointer(in.RemediationStrategy)) + if in.RemediationStrategy != nil { + in, out := &in.RemediationStrategy, &out.RemediationStrategy + *out = new(RemediationStrategy) + if err := Convert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.RemediationStrategy = nil + } out.MachineNamingStrategy = (*MachineNamingStrategy)(unsafe.Pointer(in.MachineNamingStrategy)) return nil } @@ -708,28 +720,18 @@ func Convert_v1beta2_MachineNamingStrategy_To_v1beta1_MachineNamingStrategy(in * func autoConvert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(in *RemediationStrategy, out *v1beta2.RemediationStrategy, s conversion.Scope) error { out.MaxRetry = (*int32)(unsafe.Pointer(in.MaxRetry)) - out.RetryPeriod = in.RetryPeriod - out.MinHealthyPeriod = (*v1.Duration)(unsafe.Pointer(in.MinHealthyPeriod)) + // WARNING: in.RetryPeriod requires manual conversion: does not exist in peer-type + // WARNING: in.MinHealthyPeriod requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy is an autogenerated conversion function. -func Convert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(in *RemediationStrategy, out *v1beta2.RemediationStrategy, s conversion.Scope) error { - return autoConvert_v1beta1_RemediationStrategy_To_v1beta2_RemediationStrategy(in, out, s) -} - func autoConvert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(in *v1beta2.RemediationStrategy, out *RemediationStrategy, s conversion.Scope) error { out.MaxRetry = (*int32)(unsafe.Pointer(in.MaxRetry)) - out.RetryPeriod = in.RetryPeriod - out.MinHealthyPeriod = (*v1.Duration)(unsafe.Pointer(in.MinHealthyPeriod)) + // WARNING: in.RetryPeriodSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.MinHealthyPeriodSeconds requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy is an autogenerated conversion function. -func Convert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(in *v1beta2.RemediationStrategy, out *RemediationStrategy, s conversion.Scope) error { - return autoConvert_v1beta2_RemediationStrategy_To_v1beta1_RemediationStrategy(in, out, s) -} - func autoConvert_v1beta1_RollingUpdate_To_v1beta2_RollingUpdate(in *RollingUpdate, out *v1beta2.RollingUpdate, s conversion.Scope) error { out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge)) return nil diff --git a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go index ff826c8bda02..ad2534fa08c4 100644 --- a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go +++ b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go @@ -17,8 +17,6 @@ limitations under the License. package v1beta2 import ( - "time" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -72,9 +70,9 @@ const ( // ensure it runs last (thus ensuring that kubelet is still working while other pre-terminate hooks run). PreTerminateHookCleanupAnnotation = clusterv1.PreTerminateDeleteHookAnnotationPrefix + "/kcp-cleanup" - // DefaultMinHealthyPeriod defines the default minimum period before we consider a remediation on a + // DefaultMinHealthyPeriodSeconds defines the default minimum period before we consider a remediation on a // machine unrelated from the previous remediation. - DefaultMinHealthyPeriod = 1 * time.Hour + DefaultMinHealthyPeriodSeconds = int32(60 * 60) ) // KubeadmControlPlane's Available condition and corresponding reasons. @@ -505,22 +503,25 @@ type KubeadmControlPlaneMachineTemplate struct { // +kubebuilder:validation:MaxItems=32 ReadinessGates []clusterv1.MachineReadinessGate `json:"readinessGates,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a controlplane node // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the machine controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // If no value is provided, the default value for this property of the Machine resource will be used. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` } // RolloutBefore describes when a rollout should be performed on the KCP machines. @@ -569,37 +570,39 @@ type RemediationStrategy struct { // remediated; such operation is considered a retry, remediation-retry #1. // If M1-2 (replacement of M1-1) becomes unhealthy, remediation-retry #2 will happen, etc. // - // A retry could happen only after RetryPeriod from the previous retry. - // If a machine is marked as unhealthy after MinHealthyPeriod from the previous remediation expired, + // A retry could happen only after retryPeriodSeconds from the previous retry. + // If a machine is marked as unhealthy after minHealthyPeriodSeconds from the previous remediation expired, // this is not considered a retry anymore because the new issue is assumed unrelated from the previous one. // // If not set, the remedation will be retried infinitely. // +optional MaxRetry *int32 `json:"maxRetry,omitempty"` - // retryPeriod is the duration that KCP should wait before remediating a machine being created as a replacement + // retryPeriodSeconds is the duration that KCP should wait before remediating a machine being created as a replacement // for an unhealthy machine (a retry). // // If not set, a retry will happen immediately. // +optional - RetryPeriod metav1.Duration `json:"retryPeriod,omitempty"` + // +kubebuilder:validation:Minimum=0 + RetryPeriodSeconds int32 `json:"retryPeriodSeconds,omitempty"` - // minHealthyPeriod defines the duration after which KCP will consider any failure to a machine unrelated + // minHealthyPeriodSeconds defines the duration after which KCP will consider any failure to a machine unrelated // from the previous one. In this case the remediation is not considered a retry anymore, and thus the retry - // counter restarts from 0. For example, assuming MinHealthyPeriod is set to 1h (default) + // counter restarts from 0. For example, assuming minHealthyPeriodSeconds is set to 1h (default) // // M1 become unhealthy; remediation happens, and M1-1 is created as a replacement. // If M1-1 (replacement of M1) has problems within the 1hr after the creation, also // this machine will be remediated and this operation is considered a retry - a problem related // to the original issue happened to M1 -. // - // If instead the problem on M1-1 is happening after MinHealthyPeriod expired, e.g. four days after + // If instead the problem on M1-1 is happening after minHealthyPeriodSeconds expired, e.g. four days after // m1-1 has been created as a remediation of M1, the problem on M1-1 is considered unrelated to // the original issue happened to M1. // // If not set, this value is defaulted to 1h. // +optional - MinHealthyPeriod *metav1.Duration `json:"minHealthyPeriod,omitempty"` + // +kubebuilder:validation:Minimum=0 + MinHealthyPeriodSeconds *int32 `json:"minHealthyPeriodSeconds,omitempty"` } // MachineNamingStrategy allows changing the naming pattern used when creating Machines. diff --git a/api/controlplane/kubeadm/v1beta2/kubeadmcontrolplanetemplate_types.go b/api/controlplane/kubeadm/v1beta2/kubeadmcontrolplanetemplate_types.go index e549f1615e00..11fa73850f88 100644 --- a/api/controlplane/kubeadm/v1beta2/kubeadmcontrolplanetemplate_types.go +++ b/api/controlplane/kubeadm/v1beta2/kubeadmcontrolplanetemplate_types.go @@ -135,20 +135,23 @@ type KubeadmControlPlaneTemplateMachineTemplate struct { // +optional ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a controlplane node // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the machine controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // If no value is provided, the default value for this property of the Machine resource will be used. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` } diff --git a/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go b/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go index 04565651443e..b4ecd7cc8659 100644 --- a/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go +++ b/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go @@ -131,19 +131,19 @@ func (in *KubeadmControlPlaneMachineTemplate) DeepCopyInto(out *KubeadmControlPl *out = make([]corev1beta2.MachineReadinessGate, len(*in)) copy(*out, *in) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(v1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(v1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(v1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } } @@ -328,19 +328,19 @@ func (in *KubeadmControlPlaneTemplateList) DeepCopyObject() runtime.Object { func (in *KubeadmControlPlaneTemplateMachineTemplate) DeepCopyInto(out *KubeadmControlPlaneTemplateMachineTemplate) { *out = *in in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(v1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(v1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(v1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } } @@ -499,10 +499,9 @@ func (in *RemediationStrategy) DeepCopyInto(out *RemediationStrategy) { *out = new(int32) **out = **in } - out.RetryPeriod = in.RetryPeriod - if in.MinHealthyPeriod != nil { - in, out := &in.MinHealthyPeriod, &out.MinHealthyPeriod - *out = new(v1.Duration) + if in.MinHealthyPeriodSeconds != nil { + in, out := &in.MinHealthyPeriodSeconds, &out.MinHealthyPeriodSeconds + *out = new(int32) **out = **in } } diff --git a/api/core/v1beta1/conversion.go b/api/core/v1beta1/conversion.go index f298142c871f..3cd717bad207 100644 --- a/api/core/v1beta1/conversion.go +++ b/api/core/v1beta1/conversion.go @@ -208,11 +208,12 @@ func Convert_v1beta1_MachineHealthCheckClass_To_v1beta2_MachineHealthCheckClass( for _, c := range in.UnhealthyConditions { out.UnhealthyNodeConditions = append(out.UnhealthyNodeConditions, clusterv1.UnhealthyNodeCondition{ - Type: c.Type, - Status: c.Status, - Timeout: c.Timeout, + Type: c.Type, + Status: c.Status, + TimeoutSeconds: ptr.Deref(clusterv1.ConvertToSeconds(&c.Timeout), 0), }) } + out.NodeStartupTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeStartupTimeout) return nil } @@ -226,9 +227,10 @@ func Convert_v1beta2_MachineHealthCheckClass_To_v1beta1_MachineHealthCheckClass( out.UnhealthyConditions = append(out.UnhealthyConditions, UnhealthyCondition{ Type: c.Type, Status: c.Status, - Timeout: c.Timeout, + Timeout: ptr.Deref(clusterv1.ConvertFromSeconds(&c.TimeoutSeconds), metav1.Duration{}), }) } + out.NodeStartupTimeout = clusterv1.ConvertFromSeconds(in.NodeStartupTimeoutSeconds) return nil } @@ -241,6 +243,136 @@ func Convert_v1beta1_LocalObjectTemplate_To_v1beta2_InfrastructureClass(in *Loca return autoConvert_v1beta1_LocalObjectTemplate_To_v1beta2_LocalObjectTemplate(in, &out.LocalObjectTemplate, s) } +func Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in *ControlPlaneClass, out *clusterv1.ControlPlaneClass, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + +func Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *clusterv1.ControlPlaneClass, out *ControlPlaneClass, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in *ControlPlaneTopology, out *clusterv1.ControlPlaneTopology, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + +func Convert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in *clusterv1.ControlPlaneTopology, out *ControlPlaneTopology, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClass(in *MachineDeploymentClass, out *clusterv1.MachineDeploymentClass, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClass(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + +func Convert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass(in *clusterv1.MachineDeploymentClass, out *MachineDeploymentClass, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentTopology(in *MachineDeploymentTopology, out *clusterv1.MachineDeploymentTopology, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentTopology(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + +func Convert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology(in *clusterv1.MachineDeploymentTopology, out *MachineDeploymentTopology, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(in *MachinePoolClass, out *clusterv1.MachinePoolClass, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + +func Convert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in *clusterv1.MachinePoolClass, out *MachinePoolClass, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(in *MachinePoolTopology, out *clusterv1.MachinePoolTopology, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + +func Convert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(in *clusterv1.MachinePoolTopology, out *MachinePoolTopology, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil +} + +func Convert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out *clusterv1.MachineSpec, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + out.NodeVolumeDetachTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeVolumeDetachTimeout) + out.NodeDeletionTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDeletionTimeout) + return nil +} + func Convert_v1beta2_ClusterClassStatus_To_v1beta1_ClusterClassStatus(in *clusterv1.ClusterClassStatus, out *ClusterClassStatus, s apimachineryconversion.Scope) error { if err := autoConvert_v1beta2_ClusterClassStatus_To_v1beta1_ClusterClassStatus(in, out, s); err != nil { return err @@ -539,11 +671,12 @@ func Convert_v1beta1_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSpec(in for _, c := range in.UnhealthyConditions { out.UnhealthyNodeConditions = append(out.UnhealthyNodeConditions, clusterv1.UnhealthyNodeCondition{ - Type: c.Type, - Status: c.Status, - Timeout: c.Timeout, + Type: c.Type, + Status: c.Status, + TimeoutSeconds: ptr.Deref(clusterv1.ConvertToSeconds(&c.Timeout), 0), }) } + out.NodeStartupTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeStartupTimeout) return nil } @@ -557,9 +690,10 @@ func Convert_v1beta2_MachineHealthCheckSpec_To_v1beta1_MachineHealthCheckSpec(in out.UnhealthyConditions = append(out.UnhealthyConditions, UnhealthyCondition{ Type: c.Type, Status: c.Status, - Timeout: c.Timeout, + Timeout: ptr.Deref(clusterv1.ConvertFromSeconds(&c.TimeoutSeconds), metav1.Duration{}), }) } + out.NodeStartupTimeout = clusterv1.ConvertFromSeconds(in.NodeStartupTimeoutSeconds) return nil } @@ -808,7 +942,13 @@ func Convert_v1beta1_MachineSetSpec_To_v1beta2_MachineSetSpec(in *MachineSetSpec } func Convert_v1beta2_MachineSpec_To_v1beta1_MachineSpec(in *clusterv1.MachineSpec, out *MachineSpec, s apimachineryconversion.Scope) error { - return autoConvert_v1beta2_MachineSpec_To_v1beta1_MachineSpec(in, out, s) + if err := autoConvert_v1beta2_MachineSpec_To_v1beta1_MachineSpec(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + out.NodeVolumeDetachTimeout = clusterv1.ConvertFromSeconds(in.NodeVolumeDetachTimeoutSeconds) + out.NodeDeletionTimeout = clusterv1.ConvertFromSeconds(in.NodeDeletionTimeoutSeconds) + return nil } func Convert_v1beta2_MachinePoolStatus_To_v1beta1_MachinePoolStatus(in *clusterv1.MachinePoolStatus, out *MachinePoolStatus, s apimachineryconversion.Scope) error { diff --git a/api/core/v1beta1/conversion_test.go b/api/core/v1beta1/conversion_test.go index 1ebbac08d519..bd5d665700ea 100644 --- a/api/core/v1beta1/conversion_test.go +++ b/api/core/v1beta1/conversion_test.go @@ -22,9 +22,11 @@ import ( "reflect" "strconv" "testing" + "time" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/utils/ptr" "sigs.k8s.io/randfill" @@ -79,6 +81,11 @@ func ClusterFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { spokeClusterTopology, spokeClusterStatus, spokeClusterVariable, + spokeControlPlaneTopology, + spokeMachineDeploymentTopology, + spokeMachinePoolTopology, + spokeMachineHealthCheckClass, + spokeUnhealthyCondition, } } @@ -129,6 +136,11 @@ func ClusterClassFuncs(_ runtimeserializer.CodecFactory) []interface{} { hubJSONSchemaProps, spokeClusterClassStatus, spokeJSONSchemaProps, + spokeControlPlaneClass, + spokeMachineDeploymentClass, + spokeMachinePoolClass, + spokeMachineHealthCheckClass, + spokeUnhealthyCondition, } } @@ -241,6 +253,7 @@ func spokeJSONSchemaProps(in *JSONSchemaProps, c randfill.Continue) { func MachineFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachineStatus, + spokeMachineSpec, spokeMachineStatus, } } @@ -262,6 +275,20 @@ func hubMachineStatus(in *clusterv1.MachineStatus, c randfill.Continue) { } } +func spokeMachineSpec(in *MachineSpec, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeMachineStatus(in *MachineStatus, c randfill.Continue) { c.FillNoCustom(in) // Drop empty structs with only omit empty fields. @@ -276,6 +303,7 @@ func MachineSetFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachineSetStatus, spokeMachineSetStatus, + spokeMachineSpec, } } @@ -310,6 +338,7 @@ func MachineDeploymentFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} hubMachineDeploymentStatus, spokeMachineDeploymentSpec, spokeMachineDeploymentStatus, + spokeMachineSpec, } } @@ -353,6 +382,8 @@ func MachineHealthCheckFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} return []interface{}{ hubMachineHealthCheckStatus, spokeMachineHealthCheckStatus, + spokeMachineHealthCheckSpec, + spokeUnhealthyCondition, } } @@ -380,6 +411,7 @@ func MachinePoolFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachinePoolStatus, spokeMachinePoolStatus, + spokeMachineSpec, } } @@ -416,3 +448,109 @@ func spokeMachinePoolStatus(in *MachinePoolStatus, c randfill.Continue) { } } } + +func spokeControlPlaneTopology(in *ControlPlaneTopology, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeMachineDeploymentTopology(in *MachineDeploymentTopology, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeMachinePoolTopology(in *MachinePoolTopology, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeMachineHealthCheckClass(in *MachineHealthCheckClass, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeStartupTimeout != nil { + in.NodeStartupTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeControlPlaneClass(in *ControlPlaneClass, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeMachineDeploymentClass(in *MachineDeploymentClass, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeMachinePoolClass(in *MachinePoolClass, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeVolumeDetachTimeout != nil { + in.NodeVolumeDetachTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } + if in.NodeDeletionTimeout != nil { + in.NodeDeletionTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeMachineHealthCheckSpec(in *MachineHealthCheckSpec, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeStartupTimeout != nil { + in.NodeStartupTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeUnhealthyCondition(in *UnhealthyCondition, c randfill.Continue) { + c.FillNoCustom(in) + + in.Timeout = metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second} +} diff --git a/api/core/v1beta1/zz_generated.conversion.go b/api/core/v1beta1/zz_generated.conversion.go index 6980d730fd9c..274a3a522815 100644 --- a/api/core/v1beta1/zz_generated.conversion.go +++ b/api/core/v1beta1/zz_generated.conversion.go @@ -185,16 +185,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ControlPlaneClass)(nil), (*v1beta2.ControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(a.(*ControlPlaneClass), b.(*v1beta2.ControlPlaneClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ControlPlaneClass)(nil), (*ControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(a.(*v1beta2.ControlPlaneClass), b.(*ControlPlaneClass), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ControlPlaneClassNamingStrategy)(nil), (*v1beta2.ControlPlaneClassNamingStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ControlPlaneClassNamingStrategy_To_v1beta2_ControlPlaneClassNamingStrategy(a.(*ControlPlaneClassNamingStrategy), b.(*v1beta2.ControlPlaneClassNamingStrategy), scope) }); err != nil { @@ -205,16 +195,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ControlPlaneTopology)(nil), (*v1beta2.ControlPlaneTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(a.(*ControlPlaneTopology), b.(*v1beta2.ControlPlaneTopology), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ControlPlaneTopology)(nil), (*ControlPlaneTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(a.(*v1beta2.ControlPlaneTopology), b.(*ControlPlaneTopology), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ControlPlaneVariables)(nil), (*v1beta2.ControlPlaneVariables)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ControlPlaneVariables_To_v1beta2_ControlPlaneVariables(a.(*ControlPlaneVariables), b.(*v1beta2.ControlPlaneVariables), scope) }); err != nil { @@ -305,16 +285,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MachineDeploymentClass)(nil), (*v1beta2.MachineDeploymentClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClass(a.(*MachineDeploymentClass), b.(*v1beta2.MachineDeploymentClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.MachineDeploymentClass)(nil), (*MachineDeploymentClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass(a.(*v1beta2.MachineDeploymentClass), b.(*MachineDeploymentClass), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachineDeploymentClassNamingStrategy)(nil), (*v1beta2.MachineDeploymentClassNamingStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineDeploymentClassNamingStrategy_To_v1beta2_MachineDeploymentClassNamingStrategy(a.(*MachineDeploymentClassNamingStrategy), b.(*v1beta2.MachineDeploymentClassNamingStrategy), scope) }); err != nil { @@ -360,16 +330,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MachineDeploymentTopology)(nil), (*v1beta2.MachineDeploymentTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentTopology(a.(*MachineDeploymentTopology), b.(*v1beta2.MachineDeploymentTopology), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.MachineDeploymentTopology)(nil), (*MachineDeploymentTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology(a.(*v1beta2.MachineDeploymentTopology), b.(*MachineDeploymentTopology), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachineDeploymentVariables)(nil), (*v1beta2.MachineDeploymentVariables)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineDeploymentVariables_To_v1beta2_MachineDeploymentVariables(a.(*MachineDeploymentVariables), b.(*v1beta2.MachineDeploymentVariables), scope) }); err != nil { @@ -500,16 +460,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MachinePoolClass)(nil), (*v1beta2.MachinePoolClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(a.(*MachinePoolClass), b.(*v1beta2.MachinePoolClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.MachinePoolClass)(nil), (*MachinePoolClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(a.(*v1beta2.MachinePoolClass), b.(*MachinePoolClass), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachinePoolClassNamingStrategy)(nil), (*v1beta2.MachinePoolClassNamingStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachinePoolClassNamingStrategy_To_v1beta2_MachinePoolClassNamingStrategy(a.(*MachinePoolClassNamingStrategy), b.(*v1beta2.MachinePoolClassNamingStrategy), scope) }); err != nil { @@ -545,16 +495,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MachinePoolTopology)(nil), (*v1beta2.MachinePoolTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(a.(*MachinePoolTopology), b.(*v1beta2.MachinePoolTopology), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.MachinePoolTopology)(nil), (*MachinePoolTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(a.(*v1beta2.MachinePoolTopology), b.(*MachinePoolTopology), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachinePoolVariables)(nil), (*v1beta2.MachinePoolVariables)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachinePoolVariables_To_v1beta2_MachinePoolVariables(a.(*MachinePoolVariables), b.(*v1beta2.MachinePoolVariables), scope) }); err != nil { @@ -610,11 +550,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MachineSpec)(nil), (*v1beta2.MachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(a.(*MachineSpec), b.(*v1beta2.MachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachineTemplateSpec)(nil), (*v1beta2.MachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineTemplateSpec_To_v1beta2_MachineTemplateSpec(a.(*MachineTemplateSpec), b.(*v1beta2.MachineTemplateSpec), scope) }); err != nil { @@ -805,6 +740,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*ControlPlaneClass)(nil), (*v1beta2.ControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(a.(*ControlPlaneClass), b.(*v1beta2.ControlPlaneClass), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ControlPlaneTopology)(nil), (*v1beta2.ControlPlaneTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(a.(*ControlPlaneTopology), b.(*v1beta2.ControlPlaneTopology), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*ExternalPatchDefinition)(nil), (*v1beta2.ExternalPatchDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExternalPatchDefinition_To_v1beta2_ExternalPatchDefinition(a.(*ExternalPatchDefinition), b.(*v1beta2.ExternalPatchDefinition), scope) }); err != nil { @@ -815,6 +760,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*MachineDeploymentClass)(nil), (*v1beta2.MachineDeploymentClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClass(a.(*MachineDeploymentClass), b.(*v1beta2.MachineDeploymentClass), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*MachineDeploymentSpec)(nil), (*v1beta2.MachineDeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineDeploymentSpec_To_v1beta2_MachineDeploymentSpec(a.(*MachineDeploymentSpec), b.(*v1beta2.MachineDeploymentSpec), scope) }); err != nil { @@ -825,6 +775,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*MachineDeploymentTopology)(nil), (*v1beta2.MachineDeploymentTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentTopology(a.(*MachineDeploymentTopology), b.(*v1beta2.MachineDeploymentTopology), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*MachineHealthCheckClass)(nil), (*v1beta2.MachineHealthCheckClass)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineHealthCheckClass_To_v1beta2_MachineHealthCheckClass(a.(*MachineHealthCheckClass), b.(*v1beta2.MachineHealthCheckClass), scope) }); err != nil { @@ -840,6 +795,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*MachinePoolClass)(nil), (*v1beta2.MachinePoolClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(a.(*MachinePoolClass), b.(*v1beta2.MachinePoolClass), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*MachinePoolSpec)(nil), (*v1beta2.MachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachinePoolSpec_To_v1beta2_MachinePoolSpec(a.(*MachinePoolSpec), b.(*v1beta2.MachinePoolSpec), scope) }); err != nil { @@ -850,6 +810,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*MachinePoolTopology)(nil), (*v1beta2.MachinePoolTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(a.(*MachinePoolTopology), b.(*v1beta2.MachinePoolTopology), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*MachineSetSpec)(nil), (*v1beta2.MachineSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineSetSpec_To_v1beta2_MachineSetSpec(a.(*MachineSetSpec), b.(*v1beta2.MachineSetSpec), scope) }); err != nil { @@ -860,6 +825,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*MachineSpec)(nil), (*v1beta2.MachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(a.(*MachineSpec), b.(*v1beta2.MachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*MachineStatus)(nil), (*v1beta2.MachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachineStatus_To_v1beta2_MachineStatus(a.(*MachineStatus), b.(*v1beta2.MachineStatus), scope) }); err != nil { @@ -895,6 +865,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.ControlPlaneClass)(nil), (*ControlPlaneClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(a.(*v1beta2.ControlPlaneClass), b.(*ControlPlaneClass), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.ControlPlaneTopology)(nil), (*ControlPlaneTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(a.(*v1beta2.ControlPlaneTopology), b.(*ControlPlaneTopology), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.ExternalPatchDefinition)(nil), (*ExternalPatchDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ExternalPatchDefinition_To_v1beta1_ExternalPatchDefinition(a.(*v1beta2.ExternalPatchDefinition), b.(*ExternalPatchDefinition), scope) }); err != nil { @@ -905,11 +885,21 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.MachineDeploymentClass)(nil), (*MachineDeploymentClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass(a.(*v1beta2.MachineDeploymentClass), b.(*MachineDeploymentClass), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.MachineDeploymentStatus)(nil), (*MachineDeploymentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_MachineDeploymentStatus_To_v1beta1_MachineDeploymentStatus(a.(*v1beta2.MachineDeploymentStatus), b.(*MachineDeploymentStatus), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.MachineDeploymentTopology)(nil), (*MachineDeploymentTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology(a.(*v1beta2.MachineDeploymentTopology), b.(*MachineDeploymentTopology), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.MachineHealthCheckClass)(nil), (*MachineHealthCheckClass)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_MachineHealthCheckClass_To_v1beta1_MachineHealthCheckClass(a.(*v1beta2.MachineHealthCheckClass), b.(*MachineHealthCheckClass), scope) }); err != nil { @@ -925,11 +915,21 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.MachinePoolClass)(nil), (*MachinePoolClass)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(a.(*v1beta2.MachinePoolClass), b.(*MachinePoolClass), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.MachinePoolStatus)(nil), (*MachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_MachinePoolStatus_To_v1beta1_MachinePoolStatus(a.(*v1beta2.MachinePoolStatus), b.(*MachinePoolStatus), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.MachinePoolTopology)(nil), (*MachinePoolTopology)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(a.(*v1beta2.MachinePoolTopology), b.(*MachinePoolTopology), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.MachineSetStatus)(nil), (*MachineSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_MachineSetStatus_To_v1beta1_MachineSetStatus(a.(*v1beta2.MachineSetStatus), b.(*MachineSetStatus), scope) }); err != nil { @@ -1663,18 +1663,13 @@ func autoConvert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in *Cont out.MachineHealthCheck = nil } out.NamingStrategy = (*v1beta2.ControlPlaneClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type out.ReadinessGates = *(*[]v1beta2.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) return nil } -// Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass is an autogenerated conversion function. -func Convert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in *ControlPlaneClass, out *v1beta2.ControlPlaneClass, s conversion.Scope) error { - return autoConvert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in, out, s) -} - func autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *v1beta2.ControlPlaneClass, out *ControlPlaneClass, s conversion.Scope) error { if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err @@ -1693,18 +1688,13 @@ func autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *v1be out.MachineHealthCheck = nil } out.NamingStrategy = (*ControlPlaneClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type out.ReadinessGates = *(*[]MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) return nil } -// Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass is an autogenerated conversion function. -func Convert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *v1beta2.ControlPlaneClass, out *ControlPlaneClass, s conversion.Scope) error { - return autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in, out, s) -} - func autoConvert_v1beta1_ControlPlaneClassNamingStrategy_To_v1beta2_ControlPlaneClassNamingStrategy(in *ControlPlaneClassNamingStrategy, out *v1beta2.ControlPlaneClassNamingStrategy, s conversion.Scope) error { out.Template = (*string)(unsafe.Pointer(in.Template)) return nil @@ -1739,9 +1729,9 @@ func autoConvert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in } else { out.MachineHealthCheck = nil } - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type out.ReadinessGates = *(*[]v1beta2.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) if in.Variables != nil { in, out := &in.Variables, &out.Variables @@ -1755,11 +1745,6 @@ func autoConvert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in return nil } -// Convert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology is an autogenerated conversion function. -func Convert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in *ControlPlaneTopology, out *v1beta2.ControlPlaneTopology, s conversion.Scope) error { - return autoConvert_v1beta1_ControlPlaneTopology_To_v1beta2_ControlPlaneTopology(in, out, s) -} - func autoConvert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in *v1beta2.ControlPlaneTopology, out *ControlPlaneTopology, s conversion.Scope) error { if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err @@ -1774,9 +1759,9 @@ func autoConvert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in } else { out.MachineHealthCheck = nil } - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type out.ReadinessGates = *(*[]MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) if in.Variables != nil { in, out := &in.Variables, &out.Variables @@ -1790,11 +1775,6 @@ func autoConvert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in return nil } -// Convert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology is an autogenerated conversion function. -func Convert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in *v1beta2.ControlPlaneTopology, out *ControlPlaneTopology, s conversion.Scope) error { - return autoConvert_v1beta2_ControlPlaneTopology_To_v1beta1_ControlPlaneTopology(in, out, s) -} - func autoConvert_v1beta1_ControlPlaneVariables_To_v1beta2_ControlPlaneVariables(in *ControlPlaneVariables, out *v1beta2.ControlPlaneVariables, s conversion.Scope) error { if in.Overrides != nil { in, out := &in.Overrides, &out.Overrides @@ -2121,20 +2101,15 @@ func autoConvert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClas } out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) out.NamingStrategy = (*v1beta2.MachineDeploymentClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) out.ReadinessGates = *(*[]v1beta2.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.Strategy = (*v1beta2.MachineDeploymentStrategy)(unsafe.Pointer(in.Strategy)) return nil } -// Convert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClass is an autogenerated conversion function. -func Convert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClass(in *MachineDeploymentClass, out *v1beta2.MachineDeploymentClass, s conversion.Scope) error { - return autoConvert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClass(in, out, s) -} - func autoConvert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass(in *v1beta2.MachineDeploymentClass, out *MachineDeploymentClass, s conversion.Scope) error { out.Class = in.Class if err := Convert_v1beta2_MachineDeploymentClassTemplate_To_v1beta1_MachineDeploymentClassTemplate(&in.Template, &out.Template, s); err != nil { @@ -2151,20 +2126,15 @@ func autoConvert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClas } out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) out.NamingStrategy = (*MachineDeploymentClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) out.ReadinessGates = *(*[]MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.Strategy = (*MachineDeploymentStrategy)(unsafe.Pointer(in.Strategy)) return nil } -// Convert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass is an autogenerated conversion function. -func Convert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass(in *v1beta2.MachineDeploymentClass, out *MachineDeploymentClass, s conversion.Scope) error { - return autoConvert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClass(in, out, s) -} - func autoConvert_v1beta1_MachineDeploymentClassNamingStrategy_To_v1beta2_MachineDeploymentClassNamingStrategy(in *MachineDeploymentClassNamingStrategy, out *v1beta2.MachineDeploymentClassNamingStrategy, s conversion.Scope) error { out.Template = (*string)(unsafe.Pointer(in.Template)) return nil @@ -2399,9 +2369,9 @@ func autoConvert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentT } else { out.MachineHealthCheck = nil } - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) out.ReadinessGates = *(*[]v1beta2.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.Strategy = (*v1beta2.MachineDeploymentStrategy)(unsafe.Pointer(in.Strategy)) @@ -2417,11 +2387,6 @@ func autoConvert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentT return nil } -// Convert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentTopology is an autogenerated conversion function. -func Convert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentTopology(in *MachineDeploymentTopology, out *v1beta2.MachineDeploymentTopology, s conversion.Scope) error { - return autoConvert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentTopology(in, out, s) -} - func autoConvert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology(in *v1beta2.MachineDeploymentTopology, out *MachineDeploymentTopology, s conversion.Scope) error { if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err @@ -2439,9 +2404,9 @@ func autoConvert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentT } else { out.MachineHealthCheck = nil } - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) out.ReadinessGates = *(*[]MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.Strategy = (*MachineDeploymentStrategy)(unsafe.Pointer(in.Strategy)) @@ -2457,11 +2422,6 @@ func autoConvert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentT return nil } -// Convert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology is an autogenerated conversion function. -func Convert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology(in *v1beta2.MachineDeploymentTopology, out *MachineDeploymentTopology, s conversion.Scope) error { - return autoConvert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentTopology(in, out, s) -} - func autoConvert_v1beta1_MachineDeploymentVariables_To_v1beta2_MachineDeploymentVariables(in *MachineDeploymentVariables, out *v1beta2.MachineDeploymentVariables, s conversion.Scope) error { if in.Overrides != nil { in, out := &in.Overrides, &out.Overrides @@ -2680,7 +2640,7 @@ func autoConvert_v1beta1_MachineHealthCheckClass_To_v1beta2_MachineHealthCheckCl // WARNING: in.UnhealthyConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeout requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -2689,7 +2649,7 @@ func autoConvert_v1beta2_MachineHealthCheckClass_To_v1beta1_MachineHealthCheckCl // WARNING: in.UnhealthyNodeConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeoutSeconds requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -2742,7 +2702,7 @@ func autoConvert_v1beta1_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSpe // WARNING: in.UnhealthyConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeout requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -2753,7 +2713,7 @@ func autoConvert_v1beta2_MachineHealthCheckSpec_To_v1beta1_MachineHealthCheckSpe // WARNING: in.UnhealthyNodeConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeoutSeconds requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -2927,18 +2887,13 @@ func autoConvert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(in *Machin } out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) out.NamingStrategy = (*v1beta2.MachinePoolClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) return nil } -// Convert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass is an autogenerated conversion function. -func Convert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(in *MachinePoolClass, out *v1beta2.MachinePoolClass, s conversion.Scope) error { - return autoConvert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(in, out, s) -} - func autoConvert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in *v1beta2.MachinePoolClass, out *MachinePoolClass, s conversion.Scope) error { out.Class = in.Class if err := Convert_v1beta2_MachinePoolClassTemplate_To_v1beta1_MachinePoolClassTemplate(&in.Template, &out.Template, s); err != nil { @@ -2946,18 +2901,13 @@ func autoConvert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in *v1beta } out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) out.NamingStrategy = (*MachinePoolClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) return nil } -// Convert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass is an autogenerated conversion function. -func Convert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in *v1beta2.MachinePoolClass, out *MachinePoolClass, s conversion.Scope) error { - return autoConvert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in, out, s) -} - func autoConvert_v1beta1_MachinePoolClassNamingStrategy_To_v1beta2_MachinePoolClassNamingStrategy(in *MachinePoolClassNamingStrategy, out *v1beta2.MachinePoolClassNamingStrategy, s conversion.Scope) error { out.Template = (*string)(unsafe.Pointer(in.Template)) return nil @@ -3154,9 +3104,9 @@ func autoConvert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(in * out.Class = in.Class out.Name = in.Name out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) if in.Variables != nil { @@ -3171,11 +3121,6 @@ func autoConvert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(in * return nil } -// Convert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology is an autogenerated conversion function. -func Convert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(in *MachinePoolTopology, out *v1beta2.MachinePoolTopology, s conversion.Scope) error { - return autoConvert_v1beta1_MachinePoolTopology_To_v1beta2_MachinePoolTopology(in, out, s) -} - func autoConvert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(in *v1beta2.MachinePoolTopology, out *MachinePoolTopology, s conversion.Scope) error { if err := Convert_v1beta2_ObjectMeta_To_v1beta1_ObjectMeta(&in.Metadata, &out.Metadata, s); err != nil { return err @@ -3183,9 +3128,9 @@ func autoConvert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(in * out.Class = in.Class out.Name = in.Name out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) if in.Variables != nil { @@ -3200,11 +3145,6 @@ func autoConvert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(in * return nil } -// Convert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology is an autogenerated conversion function. -func Convert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(in *v1beta2.MachinePoolTopology, out *MachinePoolTopology, s conversion.Scope) error { - return autoConvert_v1beta2_MachinePoolTopology_To_v1beta1_MachinePoolTopology(in, out, s) -} - func autoConvert_v1beta1_MachinePoolVariables_To_v1beta2_MachinePoolVariables(in *MachinePoolVariables, out *v1beta2.MachinePoolVariables, s conversion.Scope) error { if in.Overrides != nil { in, out := &in.Overrides, &out.Overrides @@ -3463,17 +3403,12 @@ func autoConvert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) out.ReadinessGates = *(*[]v1beta2.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_MachineSpec_To_v1beta2_MachineSpec is an autogenerated conversion function. -func Convert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out *v1beta2.MachineSpec, s conversion.Scope) error { - return autoConvert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(in, out, s) -} - func autoConvert_v1beta2_MachineSpec_To_v1beta1_MachineSpec(in *v1beta2.MachineSpec, out *MachineSpec, s conversion.Scope) error { out.ClusterName = in.ClusterName if err := Convert_v1beta2_Bootstrap_To_v1beta1_Bootstrap(&in.Bootstrap, &out.Bootstrap, s); err != nil { @@ -3485,9 +3420,9 @@ func autoConvert_v1beta2_MachineSpec_To_v1beta1_MachineSpec(in *v1beta2.MachineS out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type out.ReadinessGates = *(*[]MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - out.NodeVolumeDetachTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeVolumeDetachTimeout)) - out.NodeDeletionTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDeletionTimeout)) + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type return nil } @@ -3902,7 +3837,17 @@ func autoConvert_v1beta1_WorkersClass_To_v1beta2_WorkersClass(in *WorkersClass, } else { out.MachineDeployments = nil } - out.MachinePools = *(*[]v1beta2.MachinePoolClass)(unsafe.Pointer(&in.MachinePools)) + if in.MachinePools != nil { + in, out := &in.MachinePools, &out.MachinePools + *out = make([]v1beta2.MachinePoolClass, len(*in)) + for i := range *in { + if err := Convert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MachinePools = nil + } return nil } @@ -3923,7 +3868,17 @@ func autoConvert_v1beta2_WorkersClass_To_v1beta1_WorkersClass(in *v1beta2.Worker } else { out.MachineDeployments = nil } - out.MachinePools = *(*[]MachinePoolClass)(unsafe.Pointer(&in.MachinePools)) + if in.MachinePools != nil { + in, out := &in.MachinePools, &out.MachinePools + *out = make([]MachinePoolClass, len(*in)) + for i := range *in { + if err := Convert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MachinePools = nil + } return nil } diff --git a/api/core/v1beta2/cluster_types.go b/api/core/v1beta2/cluster_types.go index b7c585e86ef0..7b82efb27499 100644 --- a/api/core/v1beta2/cluster_types.go +++ b/api/core/v1beta2/cluster_types.go @@ -611,22 +611,25 @@ type ControlPlaneTopology struct { // +optional MachineHealthCheck *MachineHealthCheckTopology `json:"machineHealthCheck,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // Defaults to 10 seconds. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` // readinessGates specifies additional conditions to include when evaluating Machine Ready condition. // @@ -710,22 +713,25 @@ type MachineDeploymentTopology struct { // +optional MachineHealthCheck *MachineHealthCheckTopology `json:"machineHealthCheck,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // Defaults to 10 seconds. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` // minReadySeconds is the minimum number of seconds for which a newly created machine should // be ready. @@ -810,22 +816,25 @@ type MachinePoolTopology struct { // +kubebuilder:validation:items:MaxLength=256 FailureDomains []string `json:"failureDomains,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the MachinePool + // nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the MachinePool // hosts after the MachinePool is marked for deletion. A duration of 0 will retry deletion indefinitely. // Defaults to 10 seconds. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` // minReadySeconds is the minimum number of seconds for which a newly created machine pool should // be ready. diff --git a/api/core/v1beta2/clusterclass_types.go b/api/core/v1beta2/clusterclass_types.go index 1b62415cf4c2..49b7e5398c50 100644 --- a/api/core/v1beta2/clusterclass_types.go +++ b/api/core/v1beta2/clusterclass_types.go @@ -173,25 +173,28 @@ type ControlPlaneClass struct { // +optional NamingStrategy *ControlPlaneClassNamingStrategy `json:"namingStrategy,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // NOTE: This value can be overridden while defining a Cluster.Topology. // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // NOTE: This value can be overridden while defining a Cluster.Topology. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // Defaults to 10 seconds. // NOTE: This value can be overridden while defining a Cluster.Topology. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` // readinessGates specifies additional conditions to include when evaluating Machine Ready condition. // @@ -291,25 +294,28 @@ type MachineDeploymentClass struct { // +optional NamingStrategy *MachineDeploymentClassNamingStrategy `json:"namingStrategy,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // Defaults to 10 seconds. // NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` // minReadySeconds is the minimum number of seconds for which a newly created machine should // be ready. @@ -403,7 +409,7 @@ type MachineHealthCheckClass struct { // +kubebuilder:validation:MaxLength=32 UnhealthyRange *string `json:"unhealthyRange,omitempty"` - // nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + // nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck // to consider a Machine unhealthy if a corresponding Node isn't associated // through a `Spec.ProviderID` field. // @@ -416,7 +422,8 @@ type MachineHealthCheckClass struct { // Defaults to 10 minutes. // If you wish to disable this feature, set the value explicitly to 0. // +optional - NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeStartupTimeoutSeconds *int32 `json:"nodeStartupTimeoutSeconds,omitempty"` // remediationTemplate is a reference to a remediation template // provided by an infrastructure provider. @@ -457,25 +464,28 @@ type MachinePoolClass struct { // +optional NamingStrategy *MachinePoolClassNamingStrategy `json:"namingStrategy,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine // hosts after the Machine Pool is marked for deletion. A duration of 0 will retry deletion indefinitely. // Defaults to 10 seconds. // NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` // minReadySeconds is the minimum number of seconds for which a newly created machine pool should // be ready. diff --git a/api/core/v1beta2/common_types.go b/api/core/v1beta2/common_types.go index 11affde8a668..c45faba5c3f5 100644 --- a/api/core/v1beta2/common_types.go +++ b/api/core/v1beta2/common_types.go @@ -19,7 +19,6 @@ package v1beta2 import ( corev1 "k8s.io/api/core/v1" apivalidation "k8s.io/apimachinery/pkg/api/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -275,11 +274,6 @@ const ( TemplateSuffix = "Template" ) -var ( - // ZeroDuration is a zero value of the metav1.Duration type. - ZeroDuration = metav1.Duration{} -) - // MachineAddressType describes a valid MachineAddress type. // +kubebuilder:validation:Enum=Hostname;ExternalIP;InternalIP;ExternalDNS;InternalDNS type MachineAddressType string diff --git a/api/core/v1beta2/conversion.go b/api/core/v1beta2/conversion.go index eeee691f499d..a30a21c55d99 100644 --- a/api/core/v1beta2/conversion.go +++ b/api/core/v1beta2/conversion.go @@ -16,6 +16,14 @@ limitations under the License. package v1beta2 +import ( + "math" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + func (*Cluster) Hub() {} func (*ClusterClass) Hub() {} func (*Machine) Hub() {} @@ -23,3 +31,27 @@ func (*MachineSet) Hub() {} func (*MachineDeployment) Hub() {} func (*MachineHealthCheck) Hub() {} func (*MachinePool) Hub() {} + +// ConvertToSeconds takes *metav1.Duration and returns a *int32. +// Durations longer than MaxInt32 are capped. +// NOTE: this is a util function intended only for usage in API conversions. +func ConvertToSeconds(in *metav1.Duration) *int32 { + if in == nil { + return nil + } + seconds := math.Trunc(in.Seconds()) + if seconds > math.MaxInt32 { + return ptr.To[int32](math.MaxInt32) + } + return ptr.To(int32(seconds)) +} + +// ConvertFromSeconds takes *int32 and returns a *metav1.Duration. +// Durations longer than MaxInt32 are capped. +// NOTE: this is a util function intended only for usage in API conversions. +func ConvertFromSeconds(in *int32) *metav1.Duration { + if in == nil { + return nil + } + return ptr.To(metav1.Duration{Duration: time.Duration(*in) * time.Second}) +} diff --git a/api/core/v1beta2/conversion_test.go b/api/core/v1beta2/conversion_test.go new file mode 100644 index 000000000000..2fe0ac3be123 --- /dev/null +++ b/api/core/v1beta2/conversion_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "math" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +func TestConvertSeconds(t *testing.T) { + g := NewWithT(t) + + seconds := ptr.To[int32](100) + duration := ConvertFromSeconds(seconds) + g.Expect(ConvertToSeconds(duration)).To(Equal(seconds)) + + seconds = nil + duration = ConvertFromSeconds(seconds) + g.Expect(ConvertToSeconds(duration)).To(Equal(seconds)) + + // Durations longer than MaxInt32 are capped. + duration = ptr.To(metav1.Duration{Duration: (math.MaxInt32 + 1) * time.Second}) + g.Expect(ConvertToSeconds(duration)).To(Equal(ptr.To[int32](math.MaxInt32))) +} diff --git a/api/core/v1beta2/machine_types.go b/api/core/v1beta2/machine_types.go index 6755d84dec96..8102383b63cd 100644 --- a/api/core/v1beta2/machine_types.go +++ b/api/core/v1beta2/machine_types.go @@ -449,22 +449,25 @@ type MachineSpec struct { // +kubebuilder:validation:MaxItems=32 ReadinessGates []MachineReadinessGate `json:"readinessGates,omitempty"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // Defaults to 10 seconds. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` } // MachineReadinessGate contains the type of a Machine condition to be used as a readiness gate. @@ -642,13 +645,13 @@ type MachineV1Beta1DeprecatedStatus struct { // MachineDeletionStatus is the deletion state of the Machine. type MachineDeletionStatus struct { // nodeDrainStartTime is the time when the drain of the node started and is used to determine - // if the NodeDrainTimeout is exceeded. + // if the nodeDrainTimeoutSeconds is exceeded. // Only present when the Machine has a deletionTimestamp and draining the node had been started. // +optional NodeDrainStartTime *metav1.Time `json:"nodeDrainStartTime,omitempty"` // waitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started - // and is used to determine if the NodeVolumeDetachTimeout is exceeded. + // and is used to determine if the nodeVolumeDetachTimeoutSeconds is exceeded. // Detaching volumes from nodes is usually done by CSI implementations and the current state // is observed from the node's `.Status.VolumesAttached` field. // Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. diff --git a/api/core/v1beta2/machinehealthcheck_types.go b/api/core/v1beta2/machinehealthcheck_types.go index 606075cc0749..e1fa1b5ea2c3 100644 --- a/api/core/v1beta2/machinehealthcheck_types.go +++ b/api/core/v1beta2/machinehealthcheck_types.go @@ -17,8 +17,6 @@ limitations under the License. package v1beta2 import ( - "time" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -40,11 +38,11 @@ const ( ) var ( - // DefaultNodeStartupTimeout is the time allowed for a node to start up. + // DefaultNodeStartupTimeoutSeconds is the time allowed for a node to start up. // Can be made longer as part of spec if required for particular provider. // 10 minutes should allow the instance to start and the node to join the // cluster on most providers. - DefaultNodeStartupTimeout = metav1.Duration{Duration: 10 * time.Minute} + DefaultNodeStartupTimeoutSeconds = int32(600) ) // ANCHOR: MachineHealthCheckSpec @@ -93,7 +91,7 @@ type MachineHealthCheckSpec struct { // +kubebuilder:validation:MaxLength=32 UnhealthyRange *string `json:"unhealthyRange,omitempty"` - // nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + // nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck // to consider a Machine unhealthy if a corresponding Node isn't associated // through a `Spec.ProviderID` field. // @@ -106,7 +104,8 @@ type MachineHealthCheckSpec struct { // Defaults to 10 minutes. // If you wish to disable this feature, set the value explicitly to 0. // +optional - NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeStartupTimeoutSeconds *int32 `json:"nodeStartupTimeoutSeconds,omitempty"` // remediationTemplate is a reference to a remediation template // provided by an infrastructure provider. @@ -138,12 +137,13 @@ type UnhealthyNodeCondition struct { // +required Status corev1.ConditionStatus `json:"status"` - // timeout is the duration that a node must be in a given status for, + // timeoutSeconds is the duration that a node must be in a given status for, // after which the node is considered unhealthy. // For example, with a value of "1h", the node must match the status // for at least 1 hour before being considered unhealthy. // +required - Timeout metav1.Duration `json:"timeout"` + // +kubebuilder:validation:Minimum=0 + TimeoutSeconds int32 `json:"timeoutSeconds"` } // ANCHOR_END: UnhealthyNodeCondition diff --git a/api/core/v1beta2/zz_generated.deepcopy.go b/api/core/v1beta2/zz_generated.deepcopy.go index c740f3cb3600..7056de151815 100644 --- a/api/core/v1beta2/zz_generated.deepcopy.go +++ b/api/core/v1beta2/zz_generated.deepcopy.go @@ -747,19 +747,19 @@ func (in *ControlPlaneClass) DeepCopyInto(out *ControlPlaneClass) { *out = new(ControlPlaneClassNamingStrategy) (*in).DeepCopyInto(*out) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(metav1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(metav1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(metav1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } if in.ReadinessGates != nil { @@ -813,19 +813,19 @@ func (in *ControlPlaneTopology) DeepCopyInto(out *ControlPlaneTopology) { *out = new(MachineHealthCheckTopology) (*in).DeepCopyInto(*out) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(metav1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(metav1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(metav1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } if in.ReadinessGates != nil { @@ -1302,19 +1302,19 @@ func (in *MachineDeploymentClass) DeepCopyInto(out *MachineDeploymentClass) { *out = new(MachineDeploymentClassNamingStrategy) (*in).DeepCopyInto(*out) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(metav1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(metav1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(metav1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } if in.MinReadySeconds != nil { @@ -1561,19 +1561,19 @@ func (in *MachineDeploymentTopology) DeepCopyInto(out *MachineDeploymentTopology *out = new(MachineHealthCheckTopology) (*in).DeepCopyInto(*out) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(metav1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(metav1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(metav1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } if in.MinReadySeconds != nil { @@ -1875,9 +1875,9 @@ func (in *MachineHealthCheckClass) DeepCopyInto(out *MachineHealthCheckClass) { *out = new(string) **out = **in } - if in.NodeStartupTimeout != nil { - in, out := &in.NodeStartupTimeout, &out.NodeStartupTimeout - *out = new(metav1.Duration) + if in.NodeStartupTimeoutSeconds != nil { + in, out := &in.NodeStartupTimeoutSeconds, &out.NodeStartupTimeoutSeconds + *out = new(int32) **out = **in } if in.RemediationTemplate != nil { @@ -1968,9 +1968,9 @@ func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { *out = new(string) **out = **in } - if in.NodeStartupTimeout != nil { - in, out := &in.NodeStartupTimeout, &out.NodeStartupTimeout - *out = new(metav1.Duration) + if in.NodeStartupTimeoutSeconds != nil { + in, out := &in.NodeStartupTimeoutSeconds, &out.NodeStartupTimeoutSeconds + *out = new(int32) **out = **in } if in.RemediationTemplate != nil { @@ -2168,19 +2168,19 @@ func (in *MachinePoolClass) DeepCopyInto(out *MachinePoolClass) { *out = new(MachinePoolClassNamingStrategy) (*in).DeepCopyInto(*out) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(metav1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(metav1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(metav1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } if in.MinReadySeconds != nil { @@ -2402,19 +2402,19 @@ func (in *MachinePoolTopology) DeepCopyInto(out *MachinePoolTopology) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(metav1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(metav1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(metav1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } if in.MinReadySeconds != nil { @@ -2758,19 +2758,19 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { *out = make([]MachineReadinessGate, len(*in)) copy(*out, *in) } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(metav1.Duration) + if in.NodeDrainTimeoutSeconds != nil { + in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(metav1.Duration) + if in.NodeVolumeDetachTimeoutSeconds != nil { + in, out := &in.NodeVolumeDetachTimeoutSeconds, &out.NodeVolumeDetachTimeoutSeconds + *out = new(int32) **out = **in } - if in.NodeDeletionTimeout != nil { - in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout - *out = new(metav1.Duration) + if in.NodeDeletionTimeoutSeconds != nil { + in, out := &in.NodeDeletionTimeoutSeconds, &out.NodeDeletionTimeoutSeconds + *out = new(int32) **out = **in } } @@ -3099,7 +3099,6 @@ func (in *Topology) DeepCopy() *Topology { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UnhealthyNodeCondition) DeepCopyInto(out *UnhealthyNodeCondition) { *out = *in - out.Timeout = in.Timeout } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyNodeCondition. diff --git a/api/core/v1beta2/zz_generated.openapi.go b/api/core/v1beta2/zz_generated.openapi.go index 558fb30ff13e..e62b009ffba4 100644 --- a/api/core/v1beta2/zz_generated.openapi.go +++ b/api/core/v1beta2/zz_generated.openapi.go @@ -1382,22 +1382,25 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneClass(ref common.ReferenceC Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassNamingStrategy"), }, }, - "nodeDrainTimeout": { + "nodeDrainTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeVolumeDetachTimeout": { + "nodeVolumeDetachTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeDeletionTimeout": { + "nodeDeletionTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology.", + Type: []string{"integer"}, + Format: "int32", }, }, "readinessGates": { @@ -1427,7 +1430,7 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneClass(ref common.ReferenceC }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassNamingStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, + "k8s.io/api/core/v1.ObjectReference", "sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneClassNamingStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, } } @@ -1478,22 +1481,25 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneTopology(ref common.Referen Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckTopology"), }, }, - "nodeDrainTimeout": { + "nodeDrainTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout`", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeVolumeDetachTimeout": { + "nodeVolumeDetachTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeDeletionTimeout": { + "nodeDeletionTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", + Type: []string{"integer"}, + Format: "int32", }, }, "readinessGates": { @@ -1528,7 +1534,7 @@ func schema_cluster_api_api_core_v1beta2_ControlPlaneTopology(ref common.Referen }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneVariables", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.ControlPlaneVariables", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, } } @@ -2171,13 +2177,13 @@ func schema_cluster_api_api_core_v1beta2_MachineDeletionStatus(ref common.Refere Properties: map[string]spec.Schema{ "nodeDrainStartTime": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainStartTime is the time when the drain of the node started and is used to determine if the NodeDrainTimeout is exceeded. Only present when the Machine has a deletionTimestamp and draining the node had been started.", + Description: "nodeDrainStartTime is the time when the drain of the node started and is used to determine if the nodeDrainTimeoutSeconds is exceeded. Only present when the Machine has a deletionTimestamp and draining the node had been started.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "waitForNodeVolumeDetachStartTime": { SchemaProps: spec.SchemaProps{ - Description: "waitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started and is used to determine if the NodeVolumeDetachTimeout is exceeded. Detaching volumes from nodes is usually done by CSI implementations and the current state is observed from the node's `.Status.VolumesAttached` field. Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started.", + Description: "waitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started and is used to determine if the nodeVolumeDetachTimeoutSeconds is exceeded. Detaching volumes from nodes is usually done by CSI implementations and the current state is observed from the node's `.Status.VolumesAttached` field. Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2280,22 +2286,25 @@ func schema_cluster_api_api_core_v1beta2_MachineDeploymentClass(ref common.Refer Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentClassNamingStrategy"), }, }, - "nodeDrainTimeout": { + "nodeDrainTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeVolumeDetachTimeout": { + "nodeVolumeDetachTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeDeletionTimeout": { + "nodeDeletionTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", + Type: []string{"integer"}, + Format: "int32", }, }, "minReadySeconds": { @@ -2338,7 +2347,7 @@ func schema_cluster_api_api_core_v1beta2_MachineDeploymentClass(ref common.Refer }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentClassNamingStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentClassNamingStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate"}, } } @@ -2717,22 +2726,25 @@ func schema_cluster_api_api_core_v1beta2_MachineDeploymentTopology(ref common.Re Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckTopology"), }, }, - "nodeDrainTimeout": { + "nodeDrainTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout`", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeVolumeDetachTimeout": { + "nodeVolumeDetachTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeDeletionTimeout": { + "nodeDeletionTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", + Type: []string{"integer"}, + Format: "int32", }, }, "minReadySeconds": { @@ -2781,7 +2793,7 @@ func schema_cluster_api_api_core_v1beta2_MachineDeploymentTopology(ref common.Re }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentVariables", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineDeploymentVariables", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, } } @@ -3227,10 +3239,11 @@ func schema_cluster_api_api_core_v1beta2_MachineHealthCheckClass(ref common.Refe Format: "", }, }, - "nodeStartupTimeout": { + "nodeStartupTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeStartupTimeout allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field.\n\nThe duration set in this field is compared to the greatest of: - Cluster's infrastructure ready condition timestamp (if and when available) - Control Plane's initialized condition timestamp (if and when available) - Machine's infrastructure ready condition timestamp (if and when available) - Machine's metadata creation timestamp\n\nDefaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field.\n\nThe duration set in this field is compared to the greatest of: - Cluster's infrastructure ready condition timestamp (if and when available) - Control Plane's initialized condition timestamp (if and when available) - Machine's infrastructure ready condition timestamp (if and when available) - Machine's metadata creation timestamp\n\nDefaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0.", + Type: []string{"integer"}, + Format: "int32", }, }, "remediationTemplate": { @@ -3243,7 +3256,7 @@ func schema_cluster_api_api_core_v1beta2_MachineHealthCheckClass(ref common.Refe }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "k8s.io/apimachinery/pkg/util/intstr.IntOrString", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/util/intstr.IntOrString", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -3368,10 +3381,11 @@ func schema_cluster_api_api_core_v1beta2_MachineHealthCheckSpec(ref common.Refer Format: "", }, }, - "nodeStartupTimeout": { + "nodeStartupTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeStartupTimeout allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field.\n\nThe duration set in this field is compared to the greatest of: - Cluster's infrastructure ready condition timestamp (if and when available) - Control Plane's initialized condition timestamp (if and when available) - Machine's infrastructure ready condition timestamp (if and when available) - Machine's metadata creation timestamp\n\nDefaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field.\n\nThe duration set in this field is compared to the greatest of: - Cluster's infrastructure ready condition timestamp (if and when available) - Control Plane's initialized condition timestamp (if and when available) - Machine's infrastructure ready condition timestamp (if and when available) - Machine's metadata creation timestamp\n\nDefaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0.", + Type: []string{"integer"}, + Format: "int32", }, }, "remediationTemplate": { @@ -3385,7 +3399,7 @@ func schema_cluster_api_api_core_v1beta2_MachineHealthCheckSpec(ref common.Refer }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector", "k8s.io/apimachinery/pkg/util/intstr.IntOrString", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector", "k8s.io/apimachinery/pkg/util/intstr.IntOrString", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -3519,10 +3533,11 @@ func schema_cluster_api_api_core_v1beta2_MachineHealthCheckTopology(ref common.R Format: "", }, }, - "nodeStartupTimeout": { + "nodeStartupTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeStartupTimeout allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field.\n\nThe duration set in this field is compared to the greatest of: - Cluster's infrastructure ready condition timestamp (if and when available) - Control Plane's initialized condition timestamp (if and when available) - Machine's infrastructure ready condition timestamp (if and when available) - Machine's metadata creation timestamp\n\nDefaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field.\n\nThe duration set in this field is compared to the greatest of: - Cluster's infrastructure ready condition timestamp (if and when available) - Control Plane's initialized condition timestamp (if and when available) - Machine's infrastructure ready condition timestamp (if and when available) - Machine's metadata creation timestamp\n\nDefaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0.", + Type: []string{"integer"}, + Format: "int32", }, }, "remediationTemplate": { @@ -3535,7 +3550,7 @@ func schema_cluster_api_api_core_v1beta2_MachineHealthCheckTopology(ref common.R }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "k8s.io/apimachinery/pkg/util/intstr.IntOrString", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, + "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/util/intstr.IntOrString", "sigs.k8s.io/cluster-api/api/core/v1beta2.UnhealthyNodeCondition"}, } } @@ -3761,22 +3776,25 @@ func schema_cluster_api_api_core_v1beta2_MachinePoolClass(ref common.ReferenceCa Ref: ref("sigs.k8s.io/cluster-api/api/core/v1beta2.MachinePoolClassNamingStrategy"), }, }, - "nodeDrainTimeout": { + "nodeDrainTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeVolumeDetachTimeout": { + "nodeVolumeDetachTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeDeletionTimeout": { + "nodeDeletionTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine Pool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine Pool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass.", + Type: []string{"integer"}, + Format: "int32", }, }, "minReadySeconds": { @@ -3791,7 +3809,7 @@ func schema_cluster_api_api_core_v1beta2_MachinePoolClass(ref common.ReferenceCa }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachinePoolClassNamingStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachinePoolClassTemplate"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.MachinePoolClassNamingStrategy", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachinePoolClassTemplate"}, } } @@ -4171,22 +4189,25 @@ func schema_cluster_api_api_core_v1beta2_MachinePoolTopology(ref common.Referenc }, }, }, - "nodeDrainTimeout": { + "nodeDrainTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout`", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeVolumeDetachTimeout": { + "nodeVolumeDetachTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeDeletionTimeout": { + "nodeDeletionTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the MachinePool hosts after the MachinePool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the MachinePool hosts after the MachinePool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", + Type: []string{"integer"}, + Format: "int32", }, }, "minReadySeconds": { @@ -4214,7 +4235,7 @@ func schema_cluster_api_api_core_v1beta2_MachinePoolTopology(ref common.Referenc }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachinePoolVariables", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, + "sigs.k8s.io/cluster-api/api/core/v1beta2.MachinePoolVariables", "sigs.k8s.io/cluster-api/api/core/v1beta2.ObjectMeta"}, } } @@ -4793,22 +4814,25 @@ func schema_cluster_api_api_core_v1beta2_MachineSpec(ref common.ReferenceCallbac }, }, }, - "nodeDrainTimeout": { + "nodeDrainTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout`", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeVolumeDetachTimeout": { + "nodeVolumeDetachTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations.", + Type: []string{"integer"}, + Format: "int32", }, }, - "nodeDeletionTimeout": { + "nodeDeletionTimeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds.", + Type: []string{"integer"}, + Format: "int32", }, }, }, @@ -4816,7 +4840,7 @@ func schema_cluster_api_api_core_v1beta2_MachineSpec(ref common.ReferenceCallbac }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/core/v1beta2.Bootstrap", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate"}, + "k8s.io/api/core/v1.ObjectReference", "sigs.k8s.io/cluster-api/api/core/v1beta2.Bootstrap", "sigs.k8s.io/cluster-api/api/core/v1beta2.MachineReadinessGate"}, } } @@ -5356,18 +5380,18 @@ func schema_cluster_api_api_core_v1beta2_UnhealthyNodeCondition(ref common.Refer Format: "", }, }, - "timeout": { + "timeoutSeconds": { SchemaProps: spec.SchemaProps{ - Description: "timeout is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of \"1h\", the node must match the status for at least 1 hour before being considered unhealthy.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of \"1h\", the node must match the status for at least 1 hour before being considered unhealthy.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", }, }, }, - Required: []string{"type", "status", "timeout"}, + Required: []string{"type", "status", "timeoutSeconds"}, }, }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml index b8c3ffd26bd0..aaf062f11537 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigs.yaml @@ -5284,7 +5284,7 @@ spec: expires: description: |- expires specifies the timestamp when this token expires. Defaults to being set - dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + dynamically at runtime based on the ttlSeconds. Expires and ttlSeconds are mutually exclusive. format: date-time type: string groups: @@ -5302,11 +5302,13 @@ spec: token is used for establishing bidirectional trust between nodes and control-planes. Used for joining nodes in the cluster. type: string - ttl: + ttlSeconds: description: |- - ttl defines the time to live for this token. Defaults to 24h. - Expires and TTL are mutually exclusive. - type: string + ttlSeconds defines the time to live for this token. Defaults to 24h. + Expires and ttlSeconds are mutually exclusive. + format: int32 + minimum: 0 + type: integer usages: description: |- usages describes the ways in which this token can be used. Can by default be used diff --git a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml index 225179b4b056..fcc6dbc90e6c 100644 --- a/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml +++ b/bootstrap/kubeadm/config/crd/bases/bootstrap.cluster.x-k8s.io_kubeadmconfigtemplates.yaml @@ -5186,7 +5186,7 @@ spec: expires: description: |- expires specifies the timestamp when this token expires. Defaults to being set - dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + dynamically at runtime based on the ttlSeconds. Expires and ttlSeconds are mutually exclusive. format: date-time type: string groups: @@ -5204,11 +5204,13 @@ spec: token is used for establishing bidirectional trust between nodes and control-planes. Used for joining nodes in the cluster. type: string - ttl: + ttlSeconds: description: |- - ttl defines the time to live for this token. Defaults to 24h. - Expires and TTL are mutually exclusive. - type: string + ttlSeconds defines the time to live for this token. Defaults to 24h. + Expires and ttlSeconds are mutually exclusive. + format: int32 + minimum: 0 + type: integer usages: description: |- usages describes the ways in which this token can be used. Can by default be used diff --git a/bootstrap/kubeadm/config/crd/kustomization.yaml b/bootstrap/kubeadm/config/crd/kustomization.yaml index bbd49f8dd950..3169771830c3 100644 --- a/bootstrap/kubeadm/config/crd/kustomization.yaml +++ b/bootstrap/kubeadm/config/crd/kustomization.yaml @@ -1,5 +1,8 @@ labels: - pairs: + # Note: This is needed so the topology reconciler can figure out + # the contract of v1beta1 when v1beta1 objects are used in ClusterClasses. + cluster.x-k8s.io/v1beta1: v1beta1 cluster.x-k8s.io/v1beta2: v1beta2 # This kustomization.yaml is not intended to be run by itself, diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/conversion.go b/bootstrap/kubeadm/types/upstreamv1beta3/conversion.go index a73b723793ab..82372f794be4 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/conversion.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstream" ) @@ -74,7 +75,7 @@ func Convert_upstreamv1beta3_JoinConfiguration_To_v1beta2_JoinConfiguration(in * if out.Timeouts == nil { out.Timeouts = &bootstrapv1.Timeouts{} } - out.Timeouts.TLSBootstrapSeconds = bootstrapv1.ConvertToSeconds(in.Discovery.Timeout) + out.Timeouts.TLSBootstrapSeconds = clusterv1.ConvertToSeconds(in.Discovery.Timeout) } return nil } @@ -109,6 +110,14 @@ func Convert_upstreamv1beta3_NodeRegistrationOptions_To_v1beta2_NodeRegistration return autoConvert_upstreamv1beta3_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(in, out, s) } +func Convert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *bootstrapv1.BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTLSeconds = clusterv1.ConvertToSeconds(in.TTL) + return nil +} + // Custom conversion from the hub version, CABPK v1beta1, to this API, kubeadm v1beta3. func Convert_v1beta2_InitConfiguration_To_upstreamv1beta3_InitConfiguration(in *bootstrapv1.InitConfiguration, out *InitConfiguration, s apimachineryconversion.Scope) error { @@ -123,7 +132,7 @@ func Convert_v1beta2_JoinConfiguration_To_upstreamv1beta3_JoinConfiguration(in * } if in.Timeouts != nil { - out.Discovery.Timeout = bootstrapv1.ConvertFromSeconds(in.Timeouts.TLSBootstrapSeconds) + out.Discovery.Timeout = clusterv1.ConvertFromSeconds(in.Timeouts.TLSBootstrapSeconds) } return nil } @@ -160,6 +169,14 @@ func Convert_v1beta2_NodeRegistrationOptions_To_upstreamv1beta3_NodeRegistration return autoConvert_v1beta2_NodeRegistrationOptions_To_upstreamv1beta3_NodeRegistrationOptions(in, out, s) } +func Convert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(in *bootstrapv1.BootstrapToken, out *BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTL = clusterv1.ConvertFromSeconds(in.TTLSeconds) + return nil +} + // Func to allow handling fields that only exist in upstream types. var _ upstream.AdditionalDataSetter = &ClusterConfiguration{} @@ -188,7 +205,7 @@ func (src *ClusterConfiguration) SetAdditionalData(data upstream.AdditionalData) src.Networking.PodSubnet = *data.PodSubnet } if data.ControlPlaneComponentHealthCheckSeconds != nil { - src.APIServer.TimeoutForControlPlane = bootstrapv1.ConvertFromSeconds(data.ControlPlaneComponentHealthCheckSeconds) + src.APIServer.TimeoutForControlPlane = clusterv1.ConvertFromSeconds(data.ControlPlaneComponentHealthCheckSeconds) } } @@ -221,6 +238,6 @@ func (src *ClusterConfiguration) GetAdditionalData(data *upstream.AdditionalData data.PodSubnet = ptr.To(src.Networking.PodSubnet) } if src.APIServer.TimeoutForControlPlane != nil { - data.ControlPlaneComponentHealthCheckSeconds = bootstrapv1.ConvertToSeconds(src.APIServer.TimeoutForControlPlane) + data.ControlPlaneComponentHealthCheckSeconds = clusterv1.ConvertToSeconds(src.APIServer.TimeoutForControlPlane) } } diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go b/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go index 5eb6601f7a83..c550d8932aa0 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/conversion_test.go @@ -56,7 +56,7 @@ func TestFuzzyConversion(t *testing.T) { Spoke: &InitConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, - FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, + FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs, initConfigurationFuzzFuncs}, })) t.Run("for JoinConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, @@ -89,6 +89,12 @@ func clusterConfigurationFuzzFuncs(_ runtimeserializer.CodecFactory) []interface } } +func initConfigurationFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { + return []interface{}{ + spokeBootstrapToken, + } +} + // Custom fuzzers for kubeadm v1beta3 types. // NOTES: // - When fields do not exist in cabpk v1beta1 types, pinning it to avoid kubeadm v1beta3 --> cabpk v1beta1 --> kubeadm v1beta3 round trip errors. @@ -181,3 +187,11 @@ func hubInitConfigurationFuzzer(obj *bootstrapv1.InitConfiguration, c randfill.C obj.Timeouts = nil } + +func spokeBootstrapToken(in *BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go index 39b1ef5d90bf..8261a612eb0c 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/zz_generated.conversion.go @@ -53,16 +53,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BootstrapTokenDiscovery)(nil), (*v1beta2.BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_upstreamv1beta3_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(a.(*BootstrapTokenDiscovery), b.(*v1beta2.BootstrapTokenDiscovery), scope) }); err != nil { @@ -168,6 +158,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*ClusterConfiguration)(nil), (*v1beta2.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_upstreamv1beta3_ClusterConfiguration_To_v1beta2_ClusterConfiguration(a.(*ClusterConfiguration), b.(*v1beta2.ClusterConfiguration), scope) }); err != nil { @@ -208,6 +203,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.ControlPlaneComponent)(nil), (*ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ControlPlaneComponent_To_upstreamv1beta3_ControlPlaneComponent(a.(*v1beta2.ControlPlaneComponent), b.(*ControlPlaneComponent), scope) }); err != nil { @@ -288,33 +288,23 @@ func Convert_v1beta2_APIServer_To_upstreamv1beta3_APIServer(in *v1beta2.APIServe func autoConvert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { out.Token = (*v1beta2.BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTL requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken is an autogenerated conversion function. -func Convert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { - return autoConvert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s) -} - func autoConvert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTLSeconds requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken is an autogenerated conversion function. -func Convert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { - return autoConvert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(in, out, s) -} - func autoConvert_upstreamv1beta3_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *v1beta2.BootstrapTokenDiscovery, s conversion.Scope) error { out.Token = in.Token out.APIServerEndpoint = in.APIServerEndpoint @@ -620,7 +610,17 @@ func Convert_v1beta2_ImageMeta_To_upstreamv1beta3_ImageMeta(in *v1beta2.ImageMet } func autoConvert_upstreamv1beta3_InitConfiguration_To_v1beta2_InitConfiguration(in *InitConfiguration, out *v1beta2.InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]v1beta2.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]v1beta2.BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_upstreamv1beta3_BootstrapToken_To_v1beta2_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_upstreamv1beta3_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } @@ -634,7 +634,17 @@ func autoConvert_upstreamv1beta3_InitConfiguration_To_v1beta2_InitConfiguration( } func autoConvert_v1beta2_InitConfiguration_To_upstreamv1beta3_InitConfiguration(in *v1beta2.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1beta2_BootstrapToken_To_upstreamv1beta3_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1beta2_NodeRegistrationOptions_To_upstreamv1beta3_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } diff --git a/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go b/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go index 92752581a5a5..cf80524a259c 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta4/conversion.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/upstream" ) @@ -97,12 +98,20 @@ func Convert_upstreamv1beta4_Timeouts_To_v1beta2_Timeouts(in *Timeouts, out *boo return err } - out.ControlPlaneComponentHealthCheckSeconds = bootstrapv1.ConvertToSeconds(in.ControlPlaneComponentHealthCheck) - out.KubeletHealthCheckSeconds = bootstrapv1.ConvertToSeconds(in.KubeletHealthCheck) - out.KubernetesAPICallSeconds = bootstrapv1.ConvertToSeconds(in.KubernetesAPICall) - out.DiscoverySeconds = bootstrapv1.ConvertToSeconds(in.Discovery) - out.EtcdAPICallSeconds = bootstrapv1.ConvertToSeconds(in.EtcdAPICall) - out.TLSBootstrapSeconds = bootstrapv1.ConvertToSeconds(in.TLSBootstrap) + out.ControlPlaneComponentHealthCheckSeconds = clusterv1.ConvertToSeconds(in.ControlPlaneComponentHealthCheck) + out.KubeletHealthCheckSeconds = clusterv1.ConvertToSeconds(in.KubeletHealthCheck) + out.KubernetesAPICallSeconds = clusterv1.ConvertToSeconds(in.KubernetesAPICall) + out.DiscoverySeconds = clusterv1.ConvertToSeconds(in.Discovery) + out.EtcdAPICallSeconds = clusterv1.ConvertToSeconds(in.EtcdAPICall) + out.TLSBootstrapSeconds = clusterv1.ConvertToSeconds(in.TLSBootstrap) + return nil +} + +func Convert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *bootstrapv1.BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTLSeconds = clusterv1.ConvertToSeconds(in.TTL) return nil } @@ -130,12 +139,20 @@ func Convert_v1beta2_Timeouts_To_upstreamv1beta4_Timeouts(in *bootstrapv1.Timeou return err } - out.ControlPlaneComponentHealthCheck = bootstrapv1.ConvertFromSeconds(in.ControlPlaneComponentHealthCheckSeconds) - out.KubeletHealthCheck = bootstrapv1.ConvertFromSeconds(in.KubeletHealthCheckSeconds) - out.KubernetesAPICall = bootstrapv1.ConvertFromSeconds(in.KubernetesAPICallSeconds) - out.EtcdAPICall = bootstrapv1.ConvertFromSeconds(in.EtcdAPICallSeconds) - out.TLSBootstrap = bootstrapv1.ConvertFromSeconds(in.TLSBootstrapSeconds) - out.Discovery = bootstrapv1.ConvertFromSeconds(in.DiscoverySeconds) + out.ControlPlaneComponentHealthCheck = clusterv1.ConvertFromSeconds(in.ControlPlaneComponentHealthCheckSeconds) + out.KubeletHealthCheck = clusterv1.ConvertFromSeconds(in.KubeletHealthCheckSeconds) + out.KubernetesAPICall = clusterv1.ConvertFromSeconds(in.KubernetesAPICallSeconds) + out.EtcdAPICall = clusterv1.ConvertFromSeconds(in.EtcdAPICallSeconds) + out.TLSBootstrap = clusterv1.ConvertFromSeconds(in.TLSBootstrapSeconds) + out.Discovery = clusterv1.ConvertFromSeconds(in.DiscoverySeconds) + return nil +} + +func Convert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(in *bootstrapv1.BootstrapToken, out *BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTL = clusterv1.ConvertFromSeconds(in.TTLSeconds) return nil } diff --git a/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go b/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go index dab2c0f94e8a..5840ae9eda40 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta4/conversion_test.go @@ -56,7 +56,7 @@ func TestFuzzyConversion(t *testing.T) { Spoke: &InitConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, - FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, + FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs, initConfigurationFuzzFuncs}, })) t.Run("for JoinConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, @@ -80,6 +80,12 @@ func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } +func initConfigurationFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { + return []interface{}{ + spokeBootstrapToken, + } +} + // Custom fuzzers for kubeadm v1beta4 types. // NOTES: // - When fields do not exist in cabpk v1beta1 types, pinning them to avoid kubeadm v1beta4 --> cabpk v1beta1 --> kubeadm v1beta4 round trip errors. @@ -172,3 +178,11 @@ func hubJoinConfigurationFuzzer(obj *bootstrapv1.JoinConfiguration, c randfill.C obj.Discovery.File.KubeConfig = nil } } + +func spokeBootstrapToken(in *BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} diff --git a/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go b/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go index b9585f71003b..e923871a1e79 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go +++ b/bootstrap/kubeadm/types/upstreamv1beta4/zz_generated.conversion.go @@ -63,16 +63,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BootstrapTokenDiscovery)(nil), (*v1beta2.BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_upstreamv1beta4_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(a.(*BootstrapTokenDiscovery), b.(*v1beta2.BootstrapTokenDiscovery), scope) }); err != nil { @@ -218,6 +208,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*ClusterConfiguration)(nil), (*v1beta2.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_upstreamv1beta4_ClusterConfiguration_To_v1beta2_ClusterConfiguration(a.(*ClusterConfiguration), b.(*v1beta2.ClusterConfiguration), scope) }); err != nil { @@ -253,6 +248,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.Discovery)(nil), (*Discovery)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_Discovery_To_upstreamv1beta4_Discovery(a.(*v1beta2.Discovery), b.(*Discovery), scope) }); err != nil { @@ -339,33 +339,23 @@ func Convert_v1beta2_Arg_To_upstreamv1beta4_Arg(in *v1beta2.Arg, out *Arg, s con func autoConvert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { out.Token = (*v1beta2.BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTL requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken is an autogenerated conversion function. -func Convert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { - return autoConvert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s) -} - func autoConvert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTLSeconds requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken is an autogenerated conversion function. -func Convert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { - return autoConvert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(in, out, s) -} - func autoConvert_upstreamv1beta4_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *v1beta2.BootstrapTokenDiscovery, s conversion.Scope) error { out.Token = in.Token out.APIServerEndpoint = in.APIServerEndpoint @@ -685,7 +675,17 @@ func Convert_v1beta2_ImageMeta_To_upstreamv1beta4_ImageMeta(in *v1beta2.ImageMet } func autoConvert_upstreamv1beta4_InitConfiguration_To_v1beta2_InitConfiguration(in *InitConfiguration, out *v1beta2.InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]v1beta2.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]v1beta2.BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_upstreamv1beta4_BootstrapToken_To_v1beta2_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } // WARNING: in.DryRun requires manual conversion: does not exist in peer-type if err := Convert_upstreamv1beta4_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err @@ -709,7 +709,17 @@ func autoConvert_upstreamv1beta4_InitConfiguration_To_v1beta2_InitConfiguration( } func autoConvert_v1beta2_InitConfiguration_To_upstreamv1beta4_InitConfiguration(in *v1beta2.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1beta2_BootstrapToken_To_upstreamv1beta4_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1beta2_NodeRegistrationOptions_To_upstreamv1beta4_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index a5f5be0c668f..ff1fa39c8a39 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -2906,9 +2906,9 @@ spec: Any further remediation is only allowed if at most "maxUnhealthy" machines selected by "selector" are not healthy. x-kubernetes-int-or-string: true - nodeStartupTimeout: + nodeStartupTimeoutSeconds: description: |- - nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field. @@ -2920,7 +2920,9 @@ spec: Defaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0. - type: string + format: int32 + minimum: 0 + type: integer remediationTemplate: description: |- remediationTemplate is a reference to a remediation template @@ -2986,20 +2988,22 @@ spec: Unknown. minLength: 1 type: string - timeout: + timeoutSeconds: description: |- - timeout is the duration that a node must be in a given status for, + timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of "1h", the node must match the status for at least 1 hour before being considered unhealthy. - type: string + format: int32 + minimum: 0 + type: integer type: description: type of Node condition minLength: 1 type: string required: - status - - timeout + - timeoutSeconds - type type: object maxItems: 100 @@ -3119,26 +3123,32 @@ spec: minLength: 1 type: string type: object - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology. - type: string - nodeVolumeDetachTimeout: + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology. - type: string + format: int32 + minimum: 0 + type: integer readinessGates: description: |- readinessGates specifies additional conditions to include when evaluating Machine Ready condition. @@ -3968,9 +3978,9 @@ spec: Any further remediation is only allowed if at most "maxUnhealthy" machines selected by "selector" are not healthy. x-kubernetes-int-or-string: true - nodeStartupTimeout: + nodeStartupTimeoutSeconds: description: |- - nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field. @@ -3982,7 +3992,9 @@ spec: Defaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0. - type: string + format: int32 + minimum: 0 + type: integer remediationTemplate: description: |- remediationTemplate is a reference to a remediation template @@ -4048,20 +4060,22 @@ spec: False, Unknown. minLength: 1 type: string - timeout: + timeoutSeconds: description: |- - timeout is the duration that a node must be in a given status for, + timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of "1h", the node must match the status for at least 1 hour before being considered unhealthy. - type: string + format: int32 + minimum: 0 + type: integer type: description: type of Node condition minLength: 1 type: string required: - status - - timeout + - timeoutSeconds - type type: object maxItems: 100 @@ -4106,26 +4120,32 @@ spec: minLength: 1 type: string type: object - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. - type: string - nodeVolumeDetachTimeout: + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. - type: string + format: int32 + minimum: 0 + type: integer readinessGates: description: |- readinessGates specifies additional conditions to include when evaluating Machine Ready condition. @@ -4465,26 +4485,32 @@ spec: minLength: 1 type: string type: object - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine Pool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. - type: string - nodeVolumeDetachTimeout: + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachinePoolClass. - type: string + format: int32 + minimum: 0 + type: integer template: description: |- template is a local struct containing a collection of templates for creation of diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index ef5d7364aa44..fd8338fac554 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -2446,9 +2446,9 @@ spec: Any further remediation is only allowed if at most "maxUnhealthy" machines selected by "selector" are not healthy. x-kubernetes-int-or-string: true - nodeStartupTimeout: + nodeStartupTimeoutSeconds: description: |- - nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field. @@ -2460,7 +2460,9 @@ spec: Defaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0. - type: string + format: int32 + minimum: 0 + type: integer remediationTemplate: description: |- remediationTemplate is a reference to a remediation template @@ -2526,20 +2528,22 @@ spec: False, Unknown. minLength: 1 type: string - timeout: + timeoutSeconds: description: |- - timeout is the duration that a node must be in a given status for, + timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of "1h", the node must match the status for at least 1 hour before being considered unhealthy. - type: string + format: int32 + minimum: 0 + type: integer type: description: type of Node condition minLength: 1 type: string required: - status - - timeout + - timeoutSeconds - type type: object maxItems: 100 @@ -2583,23 +2587,29 @@ spec: More info: http://kubernetes.io/docs/user-guide/labels type: object type: object - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer readinessGates: description: |- readinessGates specifies additional conditions to include when evaluating Machine Ready condition. @@ -2783,9 +2793,9 @@ spec: Any further remediation is only allowed if at most "maxUnhealthy" machines selected by "selector" are not healthy. x-kubernetes-int-or-string: true - nodeStartupTimeout: + nodeStartupTimeoutSeconds: description: |- - nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field. @@ -2797,7 +2807,9 @@ spec: Defaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0. - type: string + format: int32 + minimum: 0 + type: integer remediationTemplate: description: |- remediationTemplate is a reference to a remediation template @@ -2863,20 +2875,22 @@ spec: of True, False, Unknown. minLength: 1 type: string - timeout: + timeoutSeconds: description: |- - timeout is the duration that a node must be in a given status for, + timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of "1h", the node must match the status for at least 1 hour before being considered unhealthy. - type: string + format: int32 + minimum: 0 + type: integer type: description: type of Node condition minLength: 1 type: string required: - status - - timeout + - timeoutSeconds - type type: object maxItems: 100 @@ -2935,23 +2949,29 @@ spec: maxLength: 63 minLength: 1 type: string - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer readinessGates: description: |- readinessGates specifies additional conditions to include when evaluating Machine Ready condition. @@ -3209,23 +3229,29 @@ spec: maxLength: 63 minLength: 1 type: string - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the MachinePool + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the MachinePool hosts after the MachinePool is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer replicas: description: |- replicas is the number of nodes belonging to this pool. diff --git a/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml b/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml index 9553f785392b..5b2e44ead16f 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml @@ -2241,23 +2241,29 @@ spec: Defaults to 0 (Machine will be considered available as soon as the Machine is ready) format: int32 type: integer - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer providerID: description: |- providerID is the identification ID of the machine provided by the provider. diff --git a/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml b/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml index 64318f554237..f0c39ab8c6da 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinehealthchecks.yaml @@ -1052,9 +1052,9 @@ spec: Deprecated: This field is deprecated and is going to be removed in the next apiVersion. Please see https://github.com/kubernetes-sigs/cluster-api/issues/10722 for more details. x-kubernetes-int-or-string: true - nodeStartupTimeout: + nodeStartupTimeoutSeconds: description: |- - nodeStartupTimeout allows to set the maximum time for MachineHealthCheck + nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck to consider a Machine unhealthy if a corresponding Node isn't associated through a `Spec.ProviderID` field. @@ -1066,7 +1066,9 @@ spec: Defaults to 10 minutes. If you wish to disable this feature, set the value explicitly to 0. - type: string + format: int32 + minimum: 0 + type: integer remediationTemplate: description: |- remediationTemplate is a reference to a remediation template @@ -1178,20 +1180,22 @@ spec: description: status of the condition, one of True, False, Unknown. minLength: 1 type: string - timeout: + timeoutSeconds: description: |- - timeout is the duration that a node must be in a given status for, + timeoutSeconds is the duration that a node must be in a given status for, after which the node is considered unhealthy. For example, with a value of "1h", the node must match the status for at least 1 hour before being considered unhealthy. - type: string + format: int32 + minimum: 0 + type: integer type: description: type of Node condition minLength: 1 type: string required: - status - - timeout + - timeoutSeconds - type type: object maxItems: 100 diff --git a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml index 6407389dae4d..90767fe91767 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml @@ -1850,23 +1850,29 @@ spec: Defaults to 0 (Machine will be considered available as soon as the Machine is ready) format: int32 type: integer - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer providerID: description: |- providerID is the identification ID of the machine provided by the provider. diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index c6c1c0ad37c8..86771c26fcec 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -1646,23 +1646,29 @@ spec: Defaults to 0 (Machine will be considered available as soon as the Machine is ready) format: int32 type: integer - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer providerID: description: |- providerID is the identification ID of the machine provided by the provider. @@ -1851,14 +1857,14 @@ spec: nodeDrainStartTime: description: |- nodeDrainStartTime is the time when the drain of the node started and is used to determine - if the NodeDrainTimeout is exceeded. + if the nodeDrainTimeoutSeconds is exceeded. Only present when the Machine has a deletionTimestamp and draining the node had been started. format: date-time type: string waitForNodeVolumeDetachStartTime: description: |- waitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started - and is used to determine if the NodeVolumeDetachTimeout is exceeded. + and is used to determine if the nodeVolumeDetachTimeoutSeconds is exceeded. Detaching volumes from nodes is usually done by CSI implementations and the current state is observed from the node's `.Status.VolumesAttached` field. Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. diff --git a/config/crd/bases/cluster.x-k8s.io_machinesets.yaml b/config/crd/bases/cluster.x-k8s.io_machinesets.yaml index 968ff363eefd..a43ff777fcf6 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinesets.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinesets.yaml @@ -1905,23 +1905,29 @@ spec: Defaults to 0 (Machine will be considered available as soon as the Machine is ready) format: int32 type: integer - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. Defaults to 10 seconds. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a node. The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer providerID: description: |- providerID is the identification ID of the machine provided by the provider. diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml index 361d3bfa6a7c..4d4f8d546b0f 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml @@ -6220,7 +6220,7 @@ spec: expires: description: |- expires specifies the timestamp when this token expires. Defaults to being set - dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + dynamically at runtime based on the ttlSeconds. Expires and ttlSeconds are mutually exclusive. format: date-time type: string groups: @@ -6238,11 +6238,13 @@ spec: token is used for establishing bidirectional trust between nodes and control-planes. Used for joining nodes in the cluster. type: string - ttl: + ttlSeconds: description: |- - ttl defines the time to live for this token. Defaults to 24h. - Expires and TTL are mutually exclusive. - type: string + ttlSeconds defines the time to live for this token. Defaults to 24h. + Expires and ttlSeconds are mutually exclusive. + format: int32 + minimum: 0 + type: integer usages: description: |- usages describes the ways in which this token can be used. Can by default be used @@ -7223,23 +7225,29 @@ spec: More info: http://kubernetes.io/docs/user-guide/labels type: object type: object - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the machine controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. If no value is provided, the default value for this property of the Machine resource will be used. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a controlplane node The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer readinessGates: description: |- readinessGates specifies additional conditions to include when evaluating Machine Ready condition; @@ -7301,38 +7309,42 @@ spec: and then be\n\tremediated; such operation is considered a retry, remediation-retry #1.\n\tIf M1-2 (replacement of M1-1) becomes unhealthy, remediation-retry #2 will happen, etc.\n\nA retry - could happen only after RetryPeriod from the previous retry.\nIf - a machine is marked as unhealthy after MinHealthyPeriod from - the previous remediation expired,\nthis is not considered a - retry anymore because the new issue is assumed unrelated from + could happen only after retryPeriodSeconds from the previous + retry.\nIf a machine is marked as unhealthy after minHealthyPeriodSeconds + from the previous remediation expired,\nthis is not considered + a retry anymore because the new issue is assumed unrelated from the previous one.\n\nIf not set, the remedation will be retried infinitely." format: int32 type: integer - minHealthyPeriod: - description: "minHealthyPeriod defines the duration after which - KCP will consider any failure to a machine unrelated\nfrom the - previous one. In this case the remediation is not considered + minHealthyPeriodSeconds: + description: "minHealthyPeriodSeconds defines the duration after + which KCP will consider any failure to a machine unrelated\nfrom + the previous one. In this case the remediation is not considered a retry anymore, and thus the retry\ncounter restarts from 0. - For example, assuming MinHealthyPeriod is set to 1h (default)\n\n\tM1 + For example, assuming minHealthyPeriodSeconds is set to 1h (default)\n\n\tM1 become unhealthy; remediation happens, and M1-1 is created as a replacement.\n\tIf M1-1 (replacement of M1) has problems within the 1hr after the creation, also\n\tthis machine will be remediated and this operation is considered a retry - a problem related\n\tto the original issue happened to M1 -.\n\n\tIf instead the problem - on M1-1 is happening after MinHealthyPeriod expired, e.g. four - days after\n\tm1-1 has been created as a remediation of M1, - the problem on M1-1 is considered unrelated to\n\tthe original - issue happened to M1.\n\nIf not set, this value is defaulted - to 1h." - type: string - retryPeriod: + on M1-1 is happening after minHealthyPeriodSeconds expired, + e.g. four days after\n\tm1-1 has been created as a remediation + of M1, the problem on M1-1 is considered unrelated to\n\tthe + original issue happened to M1.\n\nIf not set, this value is + defaulted to 1h." + format: int32 + minimum: 0 + type: integer + retryPeriodSeconds: description: |- - retryPeriod is the duration that KCP should wait before remediating a machine being created as a replacement + retryPeriodSeconds is the duration that KCP should wait before remediating a machine being created as a replacement for an unhealthy machine (a retry). If not set, a retry will happen immediately. - type: string + format: int32 + minimum: 0 + type: integer type: object replicas: description: |- diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml index 70071538deb1..14535e8a5aa8 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanetemplates.yaml @@ -4622,7 +4622,7 @@ spec: expires: description: |- expires specifies the timestamp when this token expires. Defaults to being set - dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + dynamically at runtime based on the ttlSeconds. Expires and ttlSeconds are mutually exclusive. format: date-time type: string groups: @@ -4640,11 +4640,13 @@ spec: token is used for establishing bidirectional trust between nodes and control-planes. Used for joining nodes in the cluster. type: string - ttl: + ttlSeconds: description: |- - ttl defines the time to live for this token. Defaults to 24h. - Expires and TTL are mutually exclusive. - type: string + ttlSeconds defines the time to live for this token. Defaults to 24h. + Expires and ttlSeconds are mutually exclusive. + format: int32 + minimum: 0 + type: integer usages: description: |- usages describes the ways in which this token can be used. Can by default be used @@ -5592,23 +5594,29 @@ spec: More info: http://kubernetes.io/docs/user-guide/labels type: object type: object - nodeDeletionTimeout: + nodeDeletionTimeoutSeconds: description: |- - nodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + nodeDeletionTimeoutSeconds defines how long the machine controller will attempt to delete the Node that the Machine hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. If no value is provided, the default value for this property of the Machine resource will be used. - type: string - nodeDrainTimeout: + format: int32 + minimum: 0 + type: integer + nodeDrainTimeoutSeconds: description: |- - nodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a controlplane node The default value is 0, meaning that the node can be drained without any time limitations. - NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - type: string - nodeVolumeDetachTimeout: + NOTE: nodeDrainTimeoutSeconds is different from `kubectl drain --timeout` + format: int32 + minimum: 0 + type: integer + nodeVolumeDetachTimeoutSeconds: description: |- - nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. - type: string + format: int32 + minimum: 0 + type: integer type: object remediationStrategy: description: remediationStrategy is the RemediationStrategy @@ -5626,39 +5634,44 @@ spec: and then be\n\tremediated; such operation is considered a retry, remediation-retry #1.\n\tIf M1-2 (replacement of M1-1) becomes unhealthy, remediation-retry #2 will - happen, etc.\n\nA retry could happen only after RetryPeriod + happen, etc.\n\nA retry could happen only after retryPeriodSeconds from the previous retry.\nIf a machine is marked as - unhealthy after MinHealthyPeriod from the previous remediation - expired,\nthis is not considered a retry anymore because - the new issue is assumed unrelated from the previous - one.\n\nIf not set, the remedation will be retried infinitely." + unhealthy after minHealthyPeriodSeconds from the previous + remediation expired,\nthis is not considered a retry + anymore because the new issue is assumed unrelated from + the previous one.\n\nIf not set, the remedation will + be retried infinitely." format: int32 type: integer - minHealthyPeriod: - description: "minHealthyPeriod defines the duration after - which KCP will consider any failure to a machine unrelated\nfrom - the previous one. In this case the remediation is not - considered a retry anymore, and thus the retry\ncounter - restarts from 0. For example, assuming MinHealthyPeriod + minHealthyPeriodSeconds: + description: "minHealthyPeriodSeconds defines the duration + after which KCP will consider any failure to a machine + unrelated\nfrom the previous one. In this case the remediation + is not considered a retry anymore, and thus the retry\ncounter + restarts from 0. For example, assuming minHealthyPeriodSeconds is set to 1h (default)\n\n\tM1 become unhealthy; remediation happens, and M1-1 is created as a replacement.\n\tIf M1-1 (replacement of M1) has problems within the 1hr after the creation, also\n\tthis machine will be remediated and this operation is considered a retry - a problem related\n\tto the original issue happened to M1 -.\n\n\tIf - instead the problem on M1-1 is happening after MinHealthyPeriod + instead the problem on M1-1 is happening after minHealthyPeriodSeconds expired, e.g. four days after\n\tm1-1 has been created as a remediation of M1, the problem on M1-1 is considered unrelated to\n\tthe original issue happened to M1.\n\nIf not set, this value is defaulted to 1h." - type: string - retryPeriod: + format: int32 + minimum: 0 + type: integer + retryPeriodSeconds: description: |- - retryPeriod is the duration that KCP should wait before remediating a machine being created as a replacement + retryPeriodSeconds is the duration that KCP should wait before remediating a machine being created as a replacement for an unhealthy machine (a retry). If not set, a retry will happen immediately. - type: string + format: int32 + minimum: 0 + type: integer type: object rolloutAfter: description: |- diff --git a/controlplane/kubeadm/config/crd/kustomization.yaml b/controlplane/kubeadm/config/crd/kustomization.yaml index 6f5ad889d9ce..4f77db6035a6 100644 --- a/controlplane/kubeadm/config/crd/kustomization.yaml +++ b/controlplane/kubeadm/config/crd/kustomization.yaml @@ -1,5 +1,8 @@ labels: - pairs: + # Note: This is needed so the topology reconciler can figure out + # the contract of v1beta1 when v1beta1 objects are used in ClusterClasses. + cluster.x-k8s.io/v1beta1: v1beta1 cluster.x-k8s.io/v1beta2: v1beta2 # This kustomization.yaml is not intended to be run by itself, diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index b5bb6bfa0172..b8f70b188d22 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -775,9 +775,9 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro } // Set all other in-place mutable fields that impact the ability to tear down existing machines. - m.Spec.NodeDrainTimeout = controlPlane.KCP.Spec.MachineTemplate.NodeDrainTimeout - m.Spec.NodeDeletionTimeout = controlPlane.KCP.Spec.MachineTemplate.NodeDeletionTimeout - m.Spec.NodeVolumeDetachTimeout = controlPlane.KCP.Spec.MachineTemplate.NodeVolumeDetachTimeout + m.Spec.NodeDrainTimeoutSeconds = controlPlane.KCP.Spec.MachineTemplate.NodeDrainTimeoutSeconds + m.Spec.NodeDeletionTimeoutSeconds = controlPlane.KCP.Spec.MachineTemplate.NodeDeletionTimeoutSeconds + m.Spec.NodeVolumeDetachTimeoutSeconds = controlPlane.KCP.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds // Note: We intentionally don't set "minReadySeconds" on Machines because we consider it enough to have machine availability driven by readiness of control plane components. if err := patchHelper.Patch(ctx, m); err != nil { diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 39a8d1335718..7a91e4aa0342 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -1683,8 +1683,8 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { defer teardown(t, g, namespace, testCluster) classicManager := "manager" - duration5s := &metav1.Duration{Duration: 5 * time.Second} - duration10s := &metav1.Duration{Duration: 10 * time.Second} + duration5s := ptr.To(int32(5)) + duration10s := ptr.To(int32(10)) // Existing InfraMachine infraMachineSpec := map[string]interface{}{ @@ -1781,13 +1781,13 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ ConfigRef: bootstrapRef, }, - InfrastructureRef: *infraMachineRef, - Version: ptr.To("v1.25.3"), - FailureDomain: fd, - ProviderID: ptr.To("provider-id"), - NodeDrainTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, - NodeDeletionTimeout: duration5s, + InfrastructureRef: *infraMachineRef, + Version: ptr.To("v1.25.3"), + FailureDomain: fd, + ProviderID: ptr.To("provider-id"), + NodeDrainTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, }, } // Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0. @@ -1814,10 +1814,10 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To("machine-bootstrap-secret"), }, - NodeDrainTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, - NodeDeletionTimeout: duration5s, - ReadinessGates: mandatoryMachineReadinessGates, + NodeDrainTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + ReadinessGates: mandatoryMachineReadinessGates, }, } g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed()) @@ -1889,9 +1889,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Name: "infra-foo", APIVersion: clusterv1.GroupVersionInfrastructure.String(), }, - NodeDrainTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, - NodeDeletionTimeout: duration5s, + NodeDrainTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, }, }, } @@ -1998,9 +1998,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { "modified-annotation": "modified-value-2", // Modify the value of the annotation // Drop "dropped-annotation" } - kcp.Spec.MachineTemplate.NodeDrainTimeout = duration10s - kcp.Spec.MachineTemplate.NodeDeletionTimeout = duration10s - kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout = duration10s + kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds = duration10s + kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds = duration10s + kcp.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds = duration10s // Use the updated KCP. controlPlane.KCP = kcp @@ -2020,17 +2020,17 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { expectedAnnotations[controlplanev1.PreTerminateHookCleanupAnnotation] = "" g.Expect(updatedInplaceMutatingMachine.Annotations).Should(Equal(expectedAnnotations)) // Verify Node timeout values - g.Expect(updatedInplaceMutatingMachine.Spec.NodeDrainTimeout).Should(And( + g.Expect(updatedInplaceMutatingMachine.Spec.NodeDrainTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDrainTimeout)), + HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds)), )) - g.Expect(updatedInplaceMutatingMachine.Spec.NodeDeletionTimeout).Should(And( + g.Expect(updatedInplaceMutatingMachine.Spec.NodeDeletionTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDeletionTimeout)), + HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds)), )) - g.Expect(updatedInplaceMutatingMachine.Spec.NodeVolumeDetachTimeout).Should(And( + g.Expect(updatedInplaceMutatingMachine.Spec.NodeVolumeDetachTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout)), + HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds)), )) // Verify that the non in-place mutating fields remain the same. g.Expect(updatedInplaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain)) @@ -2077,13 +2077,13 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) // Verify Node timeout values - g.Expect(updatedDeletingMachine.Spec.NodeDrainTimeout).Should(Equal(kcp.Spec.MachineTemplate.NodeDrainTimeout)) - g.Expect(updatedDeletingMachine.Spec.NodeDeletionTimeout).Should(Equal(kcp.Spec.MachineTemplate.NodeDeletionTimeout)) - g.Expect(updatedDeletingMachine.Spec.NodeVolumeDetachTimeout).Should(Equal(kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout)) + g.Expect(updatedDeletingMachine.Spec.NodeDrainTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds)) + g.Expect(updatedDeletingMachine.Spec.NodeDeletionTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds)) + g.Expect(updatedDeletingMachine.Spec.NodeVolumeDetachTimeoutSeconds).Should(Equal(kcp.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds)) // Verify the machine spec is otherwise unchanged. - deletingMachine.Spec.NodeDrainTimeout = kcp.Spec.MachineTemplate.NodeDrainTimeout - deletingMachine.Spec.NodeDeletionTimeout = kcp.Spec.MachineTemplate.NodeDeletionTimeout - deletingMachine.Spec.NodeVolumeDetachTimeout = kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout + deletingMachine.Spec.NodeDrainTimeoutSeconds = kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds + deletingMachine.Spec.NodeDeletionTimeoutSeconds = kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds + deletingMachine.Spec.NodeVolumeDetachTimeoutSeconds = kcp.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds g.Expect(updatedDeletingMachine.Spec).Should(BeComparableTo(deletingMachine.Spec)) } diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index d23974bf251a..4d40ecfad9f7 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -473,9 +473,9 @@ func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev } // Set other in-place mutable fields - desiredMachine.Spec.NodeDrainTimeout = kcp.Spec.MachineTemplate.NodeDrainTimeout - desiredMachine.Spec.NodeDeletionTimeout = kcp.Spec.MachineTemplate.NodeDeletionTimeout - desiredMachine.Spec.NodeVolumeDetachTimeout = kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout + desiredMachine.Spec.NodeDrainTimeoutSeconds = kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds + desiredMachine.Spec.NodeDeletionTimeoutSeconds = kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds + desiredMachine.Spec.NodeVolumeDetachTimeoutSeconds = kcp.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds // Note: We intentionally don't set "minReadySeconds" on Machines because we consider it enough to have machine availability driven by readiness of control plane components. if existingMachine != nil { diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index 8002c75cbc80..d4fbdb5da229 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -19,7 +19,6 @@ package controllers import ( "fmt" "testing" - "time" . "github.com/onsi/gomega" gomegatypes "github.com/onsi/gomega/types" @@ -486,8 +485,8 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Namespace: metav1.NamespaceDefault, }, } - duration5s := &metav1.Duration{Duration: 5 * time.Second} - duration10s := &metav1.Duration{Duration: 10 * time.Second} + duration5s := ptr.To(int32(5)) + duration10s := ptr.To(int32(10)) kcpMachineTemplateObjectMeta := clusterv1.ObjectMeta{ Labels: map[string]string{ "machineTemplateLabel": "machineTemplateLabelValue", @@ -534,9 +533,9 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { ReadinessGates: []clusterv1.MachineReadinessGate{ {ConditionType: "Foo"}, }, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -566,10 +565,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -594,10 +593,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -627,10 +626,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -655,10 +654,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -685,11 +684,11 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "Bar"}}, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "Bar"}}, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -712,10 +711,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, ReadinessGates: []clusterv1.MachineReadinessGate{ {ConditionType: "Foo"}, }, @@ -745,10 +744,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, ReadinessGates: []clusterv1.MachineReadinessGate{ {ConditionType: "Foo"}, }, @@ -779,10 +778,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, - NodeDrainTimeout: duration5s, - NodeDeletionTimeout: duration5s, - NodeVolumeDetachTimeout: duration5s, + ObjectMeta: kcpMachineTemplateObjectMeta, + NodeDrainTimeoutSeconds: duration5s, + NodeDeletionTimeoutSeconds: duration5s, + NodeVolumeDetachTimeoutSeconds: duration5s, ReadinessGates: []clusterv1.MachineReadinessGate{ {ConditionType: "Foo"}, }, @@ -829,11 +828,11 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { }, }, Spec: clusterv1.MachineSpec{ - Version: machineVersion, - FailureDomain: failureDomain, - NodeDrainTimeout: duration10s, - NodeDeletionTimeout: duration10s, - NodeVolumeDetachTimeout: duration10s, + Version: machineVersion, + FailureDomain: failureDomain, + NodeDrainTimeoutSeconds: duration10s, + NodeDeletionTimeoutSeconds: duration10s, + NodeVolumeDetachTimeoutSeconds: duration10s, Bootstrap: clusterv1.Bootstrap{ ConfigRef: bootstrapRef, }, @@ -860,12 +859,12 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ ConfigRef: bootstrapRef, }, - InfrastructureRef: *infraRef, - FailureDomain: failureDomain, - NodeDrainTimeout: tt.kcp.Spec.MachineTemplate.NodeDrainTimeout, - NodeDeletionTimeout: tt.kcp.Spec.MachineTemplate.NodeDeletionTimeout, - NodeVolumeDetachTimeout: tt.kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout, - ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.ReadinessGates...), + InfrastructureRef: *infraRef, + FailureDomain: failureDomain, + NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds, + NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds, + NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds, + ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.ReadinessGates...), } // Verify the Name and UID of the Machine remain unchanged @@ -895,13 +894,13 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) expectedMachineSpec = clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: ptr.To(tt.kcp.Spec.Version), - FailureDomain: failureDomain, - NodeDrainTimeout: tt.kcp.Spec.MachineTemplate.NodeDrainTimeout, - NodeDeletionTimeout: tt.kcp.Spec.MachineTemplate.NodeDeletionTimeout, - NodeVolumeDetachTimeout: tt.kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout, - ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.ReadinessGates...), + ClusterName: cluster.Name, + Version: ptr.To(tt.kcp.Spec.Version), + FailureDomain: failureDomain, + NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds, + NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds, + NodeVolumeDetachTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds, + ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.ReadinessGates...), } // Verify Name. for _, matcher := range tt.want { diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index fab44da4a871..03f90278b2fd 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -491,18 +491,18 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin return remediationInProgressData, true, nil } - // Gets MinHealthyPeriod and RetryPeriod from the remediation strategy, or use defaults. - minHealthyPeriod := controlplanev1.DefaultMinHealthyPeriod - if controlPlane.KCP.Spec.RemediationStrategy != nil && controlPlane.KCP.Spec.RemediationStrategy.MinHealthyPeriod != nil { - minHealthyPeriod = controlPlane.KCP.Spec.RemediationStrategy.MinHealthyPeriod.Duration + // Gets MinHealthyPeriodSeconds and RetryPeriodSeconds from the remediation strategy, or use defaults. + minHealthyPeriod := time.Duration(controlplanev1.DefaultMinHealthyPeriodSeconds) * time.Second + if controlPlane.KCP.Spec.RemediationStrategy != nil && controlPlane.KCP.Spec.RemediationStrategy.MinHealthyPeriodSeconds != nil { + minHealthyPeriod = time.Duration(*controlPlane.KCP.Spec.RemediationStrategy.MinHealthyPeriodSeconds) * time.Second } retryPeriod := time.Duration(0) if controlPlane.KCP.Spec.RemediationStrategy != nil { - retryPeriod = controlPlane.KCP.Spec.RemediationStrategy.RetryPeriod.Duration + retryPeriod = time.Duration(controlPlane.KCP.Spec.RemediationStrategy.RetryPeriodSeconds) * time.Second } // Gets the timestamp of the last remediation; if missing, default to a value - // that ensures both MinHealthyPeriod and RetryPeriod are expired. + // that ensures both MinHealthyPeriodSeconds and RetryPeriodSeconds are expired. // NOTE: this could potentially lead to executing more retries than expected or to executing retries before than // expected, but this is considered acceptable when the system recovers from someone/something changes or deletes // the RemediationForAnnotation on Machines. @@ -533,13 +533,13 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin // Check if remediation can happen because retryPeriod is passed. if lastRemediationTime.Add(retryPeriod).After(reconciliationTime) { log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed in the latest %s. Skipping remediation", retryPeriod)) - v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriodSeconds)", retryPeriod) conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, - Message: fmt.Sprintf("KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod), + Message: fmt.Sprintf("KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriodSeconds)", retryPeriod), }) return remediationInProgressData, false, nil } diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index b70ad9692001..87c2d211c689 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -336,7 +336,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed(), withWaitBeforeDeleteFinalizer(), withRemediateForAnnotation(MustMarshalRemediationData(&RemediationData{ Machine: "m0", - Timestamp: metav1.Time{Time: time.Now().Add(-controlplanev1.DefaultMinHealthyPeriod / 2).UTC()}, // minHealthy not expired yet. + Timestamp: metav1.Time{Time: time.Now().Add(-time.Duration(controlplanev1.DefaultMinHealthyPeriodSeconds) * time.Second / 2).UTC()}, // minHealthy not expired yet. RetryCount: 3, }))) m2 := createMachine(ctx, g, ns.Name, "m2-healthy-", withHealthyEtcdMember()) @@ -388,7 +388,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed(), withWaitBeforeDeleteFinalizer(), withRemediateForAnnotation(MustMarshalRemediationData(&RemediationData{ Machine: "m0", - Timestamp: metav1.Time{Time: time.Now().Add(-2 * controlplanev1.DefaultMinHealthyPeriod).UTC()}, // minHealthyPeriod already expired. + Timestamp: metav1.Time{Time: time.Now().Add(-2 * time.Duration(controlplanev1.DefaultMinHealthyPeriodSeconds) * time.Second).UTC()}, // minHealthyPeriod already expired. RetryCount: 3, }))) m2 := createMachine(ctx, g, ns.Name, "m2-healthy-", withHealthyEtcdMember()) @@ -442,11 +442,11 @@ func TestReconcileUnhealthyMachines(t *testing.T) { t.Run("Retry history is ignored if min healthy period is expired", func(t *testing.T) { g := NewWithT(t) - minHealthyPeriod := 4 * controlplanev1.DefaultMinHealthyPeriod // big min healthy period, so we are user that we are not using DefaultMinHealthyPeriod. + minHealthyPeriod := 4 * controlplanev1.DefaultMinHealthyPeriodSeconds // big min healthy period, so we are user that we are not using DefaultMinHealthyPeriodSeconds. m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed(), withWaitBeforeDeleteFinalizer(), withRemediateForAnnotation(MustMarshalRemediationData(&RemediationData{ Machine: "m0", - Timestamp: metav1.Time{Time: time.Now().Add(-2 * minHealthyPeriod).UTC()}, // minHealthyPeriod already expired. + Timestamp: metav1.Time{Time: time.Now().Add(-2 * time.Duration(minHealthyPeriod) * time.Second).UTC()}, // minHealthyPeriod already expired. RetryCount: 3, }))) m2 := createMachine(ctx, g, ns.Name, "m2-healthy-", withHealthyEtcdMember()) @@ -458,8 +458,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilptr.To[int32](3), - MinHealthyPeriod: &metav1.Duration{Duration: minHealthyPeriod}, + MaxRetry: utilptr.To[int32](3), + MinHealthyPeriodSeconds: utilptr.To(minHealthyPeriod), }, }, }, @@ -498,12 +498,12 @@ func TestReconcileUnhealthyMachines(t *testing.T) { removeFinalizer(g, m1) g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) - t.Run("Remediation does not happen if RetryPeriod is not yet passed", func(t *testing.T) { + t.Run("Remediation does not happen if RetryPeriodSeconds is not yet passed", func(t *testing.T) { g := NewWithT(t) m1 := createMachine(ctx, g, ns.Name, "m1-unhealthy-", withMachineHealthCheckFailed(), withWaitBeforeDeleteFinalizer(), withRemediateForAnnotation(MustMarshalRemediationData(&RemediationData{ Machine: "m0", - Timestamp: metav1.Time{Time: time.Now().Add(-controlplanev1.DefaultMinHealthyPeriod / 2).UTC()}, // minHealthyPeriod not yet expired. + Timestamp: metav1.Time{Time: time.Now().Add(-time.Duration(controlplanev1.DefaultMinHealthyPeriodSeconds) * time.Second / 2).UTC()}, // minHealthyPeriod not yet expired. RetryCount: 2, }))) m2 := createMachine(ctx, g, ns.Name, "m2-healthy-", withHealthyEtcdMember()) @@ -515,8 +515,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilptr.To[int32](3), - RetryPeriod: metav1.Duration{Duration: controlplanev1.DefaultMinHealthyPeriod}, // RetryPeriod not yet expired. + MaxRetry: utilptr.To[int32](3), + RetryPeriodSeconds: controlplanev1.DefaultMinHealthyPeriodSeconds, // RetryPeriodSeconds not yet expired. }, }, }, @@ -541,8 +541,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") + assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriodSeconds)") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriodSeconds)") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) diff --git a/controlplane/kubeadm/internal/filters.go b/controlplane/kubeadm/internal/filters.go index 2b0544cfcb06..f7c63d0b5bde 100644 --- a/controlplane/kubeadm/internal/filters.go +++ b/controlplane/kubeadm/internal/filters.go @@ -40,7 +40,7 @@ import ( // Kubernetes version, infrastructure template, and KubeadmConfig field need to be equivalent. // Note: We don't need to compare the entire MachineSpec to determine if a Machine needs to be rolled out, // because all the fields in the MachineSpec, except for version, the infrastructureRef and bootstrap.ConfigRef, are either: -// - mutated in-place (ex: NodeDrainTimeout) +// - mutated in-place (ex: NodeDrainTimeoutSeconds) // - are not dictated by KCP (ex: ProviderID) // - are not relevant for the rollout decision (ex: failureDomain). func matchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, machineConfigs map[string]*bootstrapv1.KubeadmConfig, kcp *controlplanev1.KubeadmControlPlane, machine *clusterv1.Machine) (bool, []string, []string, error) { diff --git a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go index 9aedd5ace325..d4821f2a5e9f 100644 --- a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go +++ b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane.go @@ -226,9 +226,9 @@ func (webhook *KubeadmControlPlane) ValidateUpdate(_ context.Context, oldObj, ne {spec, "machineTemplate", "infrastructureRef", "apiVersion"}, {spec, "machineTemplate", "infrastructureRef", "name"}, {spec, "machineTemplate", "infrastructureRef", "kind"}, - {spec, "machineTemplate", "nodeDrainTimeout"}, - {spec, "machineTemplate", "nodeVolumeDetachTimeout"}, - {spec, "machineTemplate", "nodeDeletionTimeout"}, + {spec, "machineTemplate", "nodeDrainTimeoutSeconds"}, + {spec, "machineTemplate", "nodeVolumeDetachTimeoutSeconds"}, + {spec, "machineTemplate", "nodeDeletionTimeoutSeconds"}, // spec {spec, "replicas"}, {spec, "version"}, diff --git a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go index 58d48d56cdb6..57e53cc0cbce 100644 --- a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go +++ b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go @@ -316,9 +316,9 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { Namespace: "foo", Name: "infraTemplate", }, - NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, - NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second}, - NodeDeletionTimeout: &metav1.Duration{Duration: time.Second}, + NodeDrainTimeoutSeconds: ptr.To(int32(1)), + NodeVolumeDetachTimeoutSeconds: ptr.To(int32(1)), + NodeDeletionTimeoutSeconds: ptr.To(int32(1)), }, Replicas: ptr.To[int32](1), RolloutStrategy: &controlplanev1.RolloutStrategy{ @@ -445,9 +445,9 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { } validUpdate.Spec.MachineTemplate.InfrastructureRef.APIVersion = "test/v1alpha2" validUpdate.Spec.MachineTemplate.InfrastructureRef.Name = "orange" - validUpdate.Spec.MachineTemplate.NodeDrainTimeout = &metav1.Duration{Duration: 10 * time.Second} - validUpdate.Spec.MachineTemplate.NodeVolumeDetachTimeout = &metav1.Duration{Duration: 10 * time.Second} - validUpdate.Spec.MachineTemplate.NodeDeletionTimeout = &metav1.Duration{Duration: 10 * time.Second} + validUpdate.Spec.MachineTemplate.NodeDrainTimeoutSeconds = ptr.To(int32(10)) + validUpdate.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds = ptr.To(int32(10)) + validUpdate.Spec.MachineTemplate.NodeDeletionTimeoutSeconds = ptr.To(int32(10)) validUpdate.Spec.Replicas = ptr.To[int32](5) now := metav1.NewTime(time.Now()) validUpdate.Spec.RolloutAfter = &now @@ -455,9 +455,9 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { CertificatesExpiryDays: ptr.To[int32](14), } validUpdate.Spec.RemediationStrategy = &controlplanev1.RemediationStrategy{ - MaxRetry: ptr.To[int32](50), - MinHealthyPeriod: &metav1.Duration{Duration: 10 * time.Hour}, - RetryPeriod: metav1.Duration{Duration: 10 * time.Minute}, + MaxRetry: ptr.To[int32](50), + MinHealthyPeriodSeconds: ptr.To(int32(10 * 60 * 60)), + RetryPeriodSeconds: 10 * 60, } validUpdate.Spec.KubeadmConfigSpec.Format = bootstrapv1.CloudConfig diff --git a/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate_test.go b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate_test.go index ecabd6a313bb..9455a0994353 100644 --- a/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate_test.go +++ b/controlplane/kubeadm/internal/webhooks/kubeadmcontrolplanetemplate_test.go @@ -19,11 +19,11 @@ package webhooks import ( "strings" "testing" - "time" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" @@ -45,14 +45,14 @@ func TestKubeadmControlPlaneTemplateDefault(t *testing.T) { Template: controlplanev1.KubeadmControlPlaneTemplateResource{ Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, + NodeDrainTimeoutSeconds: ptr.To(int32(10)), }, }, }, }, } updateDefaultingValidationKCPTemplate := kcpTemplate.DeepCopy() - updateDefaultingValidationKCPTemplate.Spec.Template.Spec.MachineTemplate.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} + updateDefaultingValidationKCPTemplate.Spec.Template.Spec.MachineTemplate.NodeDrainTimeoutSeconds = ptr.To(int32(20)) webhook := &KubeadmControlPlaneTemplate{} t.Run("for KubeadmControlPlaneTemplate", util.CustomDefaultValidateTest(ctx, updateDefaultingValidationKCPTemplate, webhook)) g.Expect(webhook.Default(ctx, kcpTemplate)).To(Succeed()) @@ -77,7 +77,7 @@ func TestKubeadmControlPlaneTemplateValidationFeatureGateEnabled(t *testing.T) { Template: controlplanev1.KubeadmControlPlaneTemplateResource{ Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, + NodeDrainTimeoutSeconds: ptr.To(int32(1)), }, }, }, @@ -104,7 +104,7 @@ func TestKubeadmControlPlaneTemplateValidationFeatureGateDisabled(t *testing.T) Template: controlplanev1.KubeadmControlPlaneTemplateResource{ Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, + NodeDrainTimeoutSeconds: ptr.To(int32(1)), }, }, }, @@ -165,7 +165,7 @@ func TestKubeadmControlPlaneTemplateUpdateValidation(t *testing.T) { Template: controlplanev1.KubeadmControlPlaneTemplateResource{ Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + NodeDrainTimeoutSeconds: ptr.To(int32(10 * 60)), }, }, }, @@ -180,7 +180,7 @@ func TestKubeadmControlPlaneTemplateUpdateValidation(t *testing.T) { Format: bootstrapv1.CloudConfig, }, MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + NodeDrainTimeoutSeconds: ptr.To(int32(10 * 60)), }, }, }, @@ -198,7 +198,7 @@ func TestKubeadmControlPlaneTemplateUpdateValidation(t *testing.T) { Template: controlplanev1.KubeadmControlPlaneTemplateResource{ Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + NodeDrainTimeoutSeconds: ptr.To(int32(10 * 60)), }, }, }, @@ -217,7 +217,7 @@ func TestKubeadmControlPlaneTemplateUpdateValidation(t *testing.T) { }, }, MachineTemplate: &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: &metav1.Duration{Duration: time.Duration(10) * time.Minute}, + NodeDrainTimeoutSeconds: ptr.To(int32(10 * 60)), }, }, }, diff --git a/controlplane/kubeadm/internal/webhooks/scale_test.go b/controlplane/kubeadm/internal/webhooks/scale_test.go index 275b033da9be..ebae0fdcca43 100644 --- a/controlplane/kubeadm/internal/webhooks/scale_test.go +++ b/controlplane/kubeadm/internal/webhooks/scale_test.go @@ -19,7 +19,6 @@ package webhooks import ( "context" "testing" - "time" . "github.com/onsi/gomega" admissionv1 "k8s.io/api/admission/v1" @@ -60,7 +59,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { Namespace: "foo", Name: "infraTemplate", }, - NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, + NodeDrainTimeoutSeconds: ptr.To(int32(1)), }, Replicas: ptr.To[int32](1), RolloutStrategy: &controlplanev1.RolloutStrategy{ diff --git a/docs/book/src/developer/providers/contracts/control-plane.md b/docs/book/src/developer/providers/contracts/control-plane.md index 921280486c94..fbe9ee3d2ea4 100644 --- a/docs/book/src/developer/providers/contracts/control-plane.md +++ b/docs/book/src/developer/providers/contracts/control-plane.md @@ -401,30 +401,45 @@ type FooControlPlaneMachineTemplate struct { // offered by an infrastructure provider. InfrastructureRef corev1.ObjectReference `json:"infrastructureRef"` - // nodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + // nodeDrainTimeoutSeconds is the total amount of time that the controller will spend on draining a controlplane node // The default value is 0, meaning that the node can be drained without any time limitations. // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDrainTimeoutSeconds *int32 `json:"nodeDrainTimeoutSeconds,omitempty"` - // nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // nodeVolumeDetachTimeoutSeconds is the total amount of time that the controller will spend on waiting for all volumes // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeVolumeDetachTimeoutSeconds *int32 `json:"nodeVolumeDetachTimeoutSeconds,omitempty"` - // nodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + // nodeDeletionTimeoutSeconds defines how long the machine controller will attempt to delete the Node that the Machine // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. // If no value is provided, the default value for this property of the Machine resource will be used. // +optional - NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // +kubebuilder:validation:Minimum=0 + NodeDeletionTimeoutSeconds *int32 `json:"nodeDeletionTimeoutSeconds,omitempty"` // Other fields SHOULD be added based on the needs of your provider. } ``` -Please note that some of the above fields (`metadata`, `nodeDrainTimeout`, `nodeVolumeDetachTimeout`, `nodeDeletionTimeout`) +Please note that some of the above fields (`metadata`, `nodeDrainTimeoutSeconds`, `nodeVolumeDetachTimeoutSeconds`, `nodeDeletionTimeoutSeconds`) must be propagated to machines without triggering rollouts. See [In place propagation of changes affecting Kubernetes objects only] as well as [Metadata propagation] for more details. + + In case you are developing a control plane provider that allows definition of machine readiness gates, you SHOULD also implement the following `machineTemplate` field. diff --git a/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md b/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md index 1f870629e16c..a2b614beae70 100644 --- a/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md +++ b/docs/book/src/developer/providers/migrations/v1.10-to-v1.11.md @@ -110,6 +110,15 @@ proposal because most of the changes described below are a consequence of the wo - The `unhealthyConditions` field has been renamed to `unhealthyNodeConditions` in following struct: - `spec.topology.controlPlane.machineHealthCheck` - `spec.topology.workers.machineDeployments[].machineHealthCheck` +- All fields of type Duration in `spec.topology.{controlPlane,workers.machineDeployments[],workers.machinePools[]` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeDrainTimeout` => `nodeDrainTimeoutSeconds` + - `nodeVolumeDetachTimeout` => `nodeVolumeDetachTimeoutSeconds` + - `nodeDeletionTimeout` => `nodeDeletionTimeoutSeconds` +- All fields of type Duration in `spec.topology.{controlPlane.machineHealthCheck,workers.machineDeployments[].machineHealthCheck` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeStartupTimeout` => `nodeStartupTimeoutSeconds` + - `unhealthyNodeConditions[].timeout` => `unhealthyNodeConditions[].timeoutSeconds` - Information about the initial provisioning process are now surfacing under the new `status.initialization` field. - `status.infrastructureReady` has been replaced by `status.initialization.infrastructureProvisioned` - `status.controlPlaneReady` has been replaced by `status.initialization.controlPlaneInitialized` @@ -128,6 +137,11 @@ proposal because most of the changes described below are a consequence of the wo - See changes that apply to [all CRDs](#all-crds) - The `spec.progressDeadlineSeconds` field (deprecated since CAPI v1.9) has been removed +- All fields of type Duration in `spec.template.spec` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeDrainTimeout` => `nodeDrainTimeoutSeconds` + - `nodeVolumeDetachTimeout` => `nodeVolumeDetachTimeoutSeconds` + - `nodeDeletionTimeout` => `nodeDeletionTimeoutSeconds` - Replica counters are now consistent with replica counters from other resources - `status.replicas` was made a pointer and omitempty was added - `status.readyReplicas` has now a new semantic based on machine's `Ready` condition @@ -144,6 +158,11 @@ proposal because most of the changes described below are a consequence of the wo ### MachineSet - See changes that apply to [all CRDs](#all-crds) +- All fields of type Duration in `spec.template.spec` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeDrainTimeout` => `nodeDrainTimeoutSeconds` + - `nodeVolumeDetachTimeout` => `nodeVolumeDetachTimeoutSeconds` + - `nodeDeletionTimeout` => `nodeDeletionTimeoutSeconds` - Replica counters fields are now consistent with replica counters from other resources - `status.replicas` was made a pointer and omitempty was added - `status.readyReplicas` has now a new semantic based on machine's `Ready` condition @@ -156,6 +175,11 @@ proposal because most of the changes described below are a consequence of the wo ### MachinePool - See changes that apply to [all CRDs](#all-crds) +- All fields of type Duration in `spec.template.spec` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeDrainTimeout` => `nodeDrainTimeoutSeconds` + - `nodeVolumeDetachTimeout` => `nodeVolumeDetachTimeoutSeconds` + - `nodeDeletionTimeout` => `nodeDeletionTimeoutSeconds` - `status.replicas` was made a pointer and omitempty was added - Support for terminal errors has been dropped. - `status.failureReason` and `status.failureMessage` will continue to exist temporarily under `status.deprecated.v1beta1`. @@ -164,6 +188,11 @@ proposal because most of the changes described below are a consequence of the wo ### Machine - See changes that apply to [all CRDs](#all-crds) +- All fields of type Duration in `spec` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeDrainTimeout` => `nodeDrainTimeoutSeconds` + - `nodeVolumeDetachTimeout` => `nodeVolumeDetachTimeoutSeconds` + - `nodeDeletionTimeout` => `nodeDeletionTimeoutSeconds` - Information about the initial provisioning process is now surfacing under the new `status.initialization` field. - `status.infrastructureReady` has been replaced by `status.initialization.infrastructureProvisioned` - `status.bootstrapReady` has been replaced by `status.initialization.bootstrapDataSecretCreated` @@ -174,6 +203,12 @@ proposal because most of the changes described below are a consequence of the wo ### MachineHealthCheck - See changes that apply to [all CRDs](#all-crds) +- All fields of type Duration in `spec` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeStartupTimeout` => `nodeStartupTimeoutSeconds` +- All fields of type Duration in `spec.unhealthyNodeConditions[]` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `timeout` => `timeoutSeconds` - The `spec.unhealthyConditions` field has been renamed to `spec.unhealthyNodeConditions` ### ClusterClass @@ -188,6 +223,15 @@ proposal because most of the changes described below are a consequence of the wo to `spec.variables[].deprecatedV1Beta1Metadata` and `.status.variables[].definitions[].deprecatedV1Beta1Metadata` - These fields are deprecated and will be removed when support for v1beta1 will be dropped. - Please use `XMetadata` in `JSONSchemaProps` instead. +- All fields of type Duration in `spec.{controlPlane,workers.machineDeployments[],workers.machinePools[]` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeDrainTimeout` => `nodeDrainTimeoutSeconds` + - `nodeVolumeDetachTimeout` => `nodeVolumeDetachTimeoutSeconds` + - `nodeDeletionTimeout` => `nodeDeletionTimeoutSeconds` +- All fields of type Duration in `spec.{controlPlane.machineHealthCheck,workers.machineDeployments[].machineHealthCheck` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeStartupTimeout` => `nodeStartupTimeoutSeconds` + - `unhealthyNodeConditions[].timeout` => `unhealthyNodeConditions[].timeoutSeconds` - The `builtin.cluster.classRef.Name` and `builtin.cluster.classRef.Namespace` variables have been added - The `builtin.cluster.class` and `builtin.cluster.classNamespace` are deprecated and will be removed with the next apiVersion. - The `builtin.cluster.network.ipFamily` variable has been removed and it cannot be used anymore in patches @@ -230,6 +274,9 @@ proposal because most of the changes described below are a consequence of the wo - `controlPlaneEndpoint` (can still be set via `Cluster.spec.controlPlaneEndpoint`) - `clusterName` (can still be set via `Cluster.metadata.name`) Note: The ClusterConfiguration fields could previously be used to overwrite the fields from Cluster, now we only use the fields from Cluster. +- All fields of type Duration in `spec.initConfiguration.bootstrapTokens[]` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `.ttl` => `.ttlSeconds` - Information about the initial provisioning process is now surfacing under the new `status.initialization` field. - `status.ready` has been replaced by `status.initialization.dataSecretCreated` - Support for terminal errors has been dropped (see [dataSecretCreated](#cluster-api-contract-changes)). @@ -277,6 +324,18 @@ KubeadmConfigTemplate `spec.template.spec` has been aligned to changes in the [K - `controlPlaneEndpoint` (can still be set via `Cluster.spec.controlPlaneEndpoint`) - `clusterName` (can still be set via `Cluster.metadata.name`) Note: The ClusterConfiguration fields could previously be used to overwrite the fields from Cluster, now we only use the fields from Cluster. +- All fields of type Duration in `spec.kubeadmConfigSpec.initConfiguration.bootstrapTokens[]` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `.ttl` => `.ttlSeconds` +- All fields of type Duration in `spec.machineTemplate` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `nodeDrainTimeout` => `nodeDrainTimeoutSeconds` + - `nodeVolumeDetachTimeout` => `nodeVolumeDetachTimeoutSeconds` + - `nodeDeletionTimeout` => `nodeDeletionTimeoutSeconds` +- All fields of type Duration in `spec.remediationStrategy` have + been renamed by adding the `Seconds` suffix and their type was changed to int32, thus aligning to K8s guidelines. + - `retryPeriod` => `retryPeriodSeconds` + - `minHealthyPeriod` => `minHealthyPeriodSeconds` - Replica counters fields are now consistent with replica counters from other resources. - `status.replicas` was made a pointer and omitempty was added - `status.readyReplicas` has now a new semantic based on machine's `Ready` condition @@ -362,6 +421,7 @@ for providers still implementing the v1beta1 contract. Following rules have been changed or are not supported anymore; please read corresponding notes about compatibility for providers still implementing the v1beta1 contract. +- [ControlPlane: machines](../contracts/control-plane.md#controlplane-machines) - [ControlPlane: initialization completed](../contracts/control-plane.md#controlplane-initialization-completed) - [ControlPlane: replicas](../contracts/control-plane.md#controlplane-replicas) - [ControlPlane: conditions](../contracts/control-plane.md#controlplane-conditions) diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index 1b3686d1aace..0193a2aa9be7 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -314,9 +314,9 @@ func (r *MachinePoolReconciler) reconcileDeleteNodes(ctx context.Context, cluste // isMachinePoolDeleteTimeoutPassed check the machinePool node delete time out. func (r *MachinePoolReconciler) isMachinePoolNodeDeleteTimeoutPassed(machinePool *clusterv1.MachinePool) bool { - if !machinePool.DeletionTimestamp.IsZero() && machinePool.Spec.Template.Spec.NodeDeletionTimeout != nil { - if machinePool.Spec.Template.Spec.NodeDeletionTimeout.Nanoseconds() != 0 { - deleteTimePlusDuration := machinePool.DeletionTimestamp.Add(machinePool.Spec.Template.Spec.NodeDeletionTimeout.Duration) + if !machinePool.DeletionTimestamp.IsZero() && machinePool.Spec.Template.Spec.NodeDeletionTimeoutSeconds != nil { + if *machinePool.Spec.Template.Spec.NodeDeletionTimeoutSeconds != 0 { + deleteTimePlusDuration := machinePool.DeletionTimestamp.Add(time.Duration(*machinePool.Spec.Template.Spec.NodeDeletionTimeoutSeconds) * time.Second) return deleteTimePlusDuration.Before(time.Now()) } } diff --git a/exp/internal/controllers/machinepool_controller_test.go b/exp/internal/controllers/machinepool_controller_test.go index 7ea68fc3ed4c..81851fc980a1 100644 --- a/exp/internal/controllers/machinepool_controller_test.go +++ b/exp/internal/controllers/machinepool_controller_test.go @@ -389,7 +389,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { }, }, { - name: "Successfully reconcile MachinePool with deletionTimestamp & NodeDeletionTimeout not passed when Nodes can be deleted (MP should go away)", + name: "Successfully reconcile MachinePool with deletionTimestamp & NodeDeletionTimeoutSeconds not passed when Nodes can be deleted (MP should go away)", machinePool: clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "deleted", @@ -412,8 +412,8 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Name: "infra-config1-already-deleted", // Use an InfrastructureMachinePool that doesn't exist, so reconcileDelete doesn't get stuck on deletion Namespace: metav1.NamespaceDefault, }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - NodeDeletionTimeout: &metav1.Duration{Duration: 10 * time.Minute}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + NodeDeletionTimeoutSeconds: ptr.To(int32(10 * 60)), }, }, ProviderIDList: []string{"aws:///us-test-2a/i-013ab00756982217f"}, @@ -452,7 +452,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { }, }, { - name: "Fail reconcile MachinePool with deletionTimestamp & NodeDeletionTimeout not passed when Nodes cannot be deleted (MP should stay around)", + name: "Fail reconcile MachinePool with deletionTimestamp & NodeDeletionTimeoutSeconds not passed when Nodes cannot be deleted (MP should stay around)", machinePool: clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "deleted", @@ -475,8 +475,8 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Name: "infra-config1-already-deleted", // Use an InfrastructureMachinePool that doesn't exist, so reconcileDelete doesn't get stuck on deletion Namespace: metav1.NamespaceDefault, }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - NodeDeletionTimeout: &metav1.Duration{Duration: 10 * time.Minute}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + NodeDeletionTimeoutSeconds: ptr.To(int32(10 * 60)), }, }, ProviderIDList: []string{"aws:///us-test-2a/i-013ab00756982217f"}, @@ -515,7 +515,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { }, }, { - name: "Successfully reconcile MachinePool with deletionTimestamp & NodeDeletionTimeout passed when Nodes cannot be deleted (MP should go away)", + name: "Successfully reconcile MachinePool with deletionTimestamp & NodeDeletionTimeoutSeconds passed when Nodes cannot be deleted (MP should go away)", machinePool: clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "deleted", @@ -538,8 +538,8 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Name: "infra-config1-already-deleted", // Use an InfrastructureMachinePool that doesn't exist, so reconcileDelete doesn't get stuck on deletion Namespace: metav1.NamespaceDefault, }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - NodeDeletionTimeout: &metav1.Duration{Duration: 10 * time.Second}, // timeout passed + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + NodeDeletionTimeoutSeconds: ptr.To(int32(10)), // timeout passed }, }, ProviderIDList: []string{"aws:///us-test-2a/i-013ab00756982217f"}, @@ -654,7 +654,7 @@ func TestMachinePoolNodeDeleteTimeoutPassed(t *testing.T) { want: false, }, { - name: "false if deletionTimestamp set to now and NodeDeletionTimeout not set", + name: "false if deletionTimestamp set to now and NodeDeletionTimeoutSeconds not set", machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool", @@ -665,7 +665,7 @@ func TestMachinePoolNodeDeleteTimeoutPassed(t *testing.T) { want: false, }, { - name: "false if deletionTimestamp set to now and NodeDeletionTimeout set to 0", + name: "false if deletionTimestamp set to now and NodeDeletionTimeoutSeconds set to 0", machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool", @@ -675,7 +675,7 @@ func TestMachinePoolNodeDeleteTimeoutPassed(t *testing.T) { Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - NodeDeletionTimeout: &metav1.Duration{Duration: 0 * time.Second}, + NodeDeletionTimeoutSeconds: ptr.To(int32(0)), }, }, }, @@ -683,7 +683,7 @@ func TestMachinePoolNodeDeleteTimeoutPassed(t *testing.T) { want: false, }, { - name: "false if deletionTimestamp set to now and NodeDeletionTimeout set to 1m", + name: "false if deletionTimestamp set to now and NodeDeletionTimeoutSeconds set to 1m", machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool", @@ -693,7 +693,7 @@ func TestMachinePoolNodeDeleteTimeoutPassed(t *testing.T) { Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - NodeDeletionTimeout: &metav1.Duration{Duration: 1 * time.Minute}, + NodeDeletionTimeoutSeconds: ptr.To(int32(60)), }, }, }, @@ -701,7 +701,7 @@ func TestMachinePoolNodeDeleteTimeoutPassed(t *testing.T) { want: false, }, { - name: "true if deletionTimestamp set to now-1m and NodeDeletionTimeout set to 10s", + name: "true if deletionTimestamp set to now-1m and NodeDeletionTimeoutSeconds set to 10s", machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "machinepool", @@ -711,7 +711,7 @@ func TestMachinePoolNodeDeleteTimeoutPassed(t *testing.T) { Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - NodeDeletionTimeout: &metav1.Duration{Duration: 10 * time.Second}, + NodeDeletionTimeoutSeconds: ptr.To(int32(10)), }, }, }, diff --git a/exp/internal/webhooks/machinepool.go b/exp/internal/webhooks/machinepool.go index 57c4b4a4d14c..1d684744927e 100644 --- a/exp/internal/webhooks/machinepool.go +++ b/exp/internal/webhooks/machinepool.go @@ -21,12 +21,10 @@ import ( "fmt" "strconv" "strings" - "time" "github.com/pkg/errors" v1 "k8s.io/api/admission/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" @@ -39,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api/util/version" ) -const defaultNodeDeletionTimeout = 10 * time.Second +const defaultNodeDeletionTimeoutSeconds = int32(10) func (webhook *MachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { if webhook.decoder == nil { @@ -108,8 +106,8 @@ func (webhook *MachinePool) Default(ctx context.Context, obj runtime.Object) err } // Set the default value for the node deletion timeout. - if m.Spec.Template.Spec.NodeDeletionTimeout == nil { - m.Spec.Template.Spec.NodeDeletionTimeout = &metav1.Duration{Duration: defaultNodeDeletionTimeout} + if m.Spec.Template.Spec.NodeDeletionTimeoutSeconds == nil { + m.Spec.Template.Spec.NodeDeletionTimeoutSeconds = ptr.To(defaultNodeDeletionTimeoutSeconds) } // tolerate version strings without a "v" prefix: prepend it if it's not there. diff --git a/exp/internal/webhooks/machinepool_test.go b/exp/internal/webhooks/machinepool_test.go index d9096c737a5e..fd02f9b8df54 100644 --- a/exp/internal/webhooks/machinepool_test.go +++ b/exp/internal/webhooks/machinepool_test.go @@ -60,7 +60,7 @@ func TestMachinePoolDefault(t *testing.T) { g.Expect(mp.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace).To(Equal(mp.Namespace)) g.Expect(mp.Spec.Template.Spec.InfrastructureRef.Namespace).To(Equal(mp.Namespace)) g.Expect(mp.Spec.Template.Spec.Version).To(Equal(ptr.To("v1.20.0"))) - g.Expect(mp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(&metav1.Duration{Duration: defaultNodeDeletionTimeout})) + g.Expect(*mp.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(defaultNodeDeletionTimeoutSeconds)) } func TestCalculateMachinePoolReplicas(t *testing.T) { diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index 97b3cca45a04..01528a15179c 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -22,6 +22,7 @@ import ( "fmt" "slices" "strings" + "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -371,36 +372,60 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } } - // If it is required to manage the NodeDrainTimeout for the control plane, set the corresponding field. - nodeDrainTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDrainTimeout - if s.Blueprint.Topology.ControlPlane.NodeDrainTimeout != nil { - nodeDrainTimeout = s.Blueprint.Topology.ControlPlane.NodeDrainTimeout + // Determine contract version used by the ControlPlane. + contractVersion, err := contract.GetContractVersionForVersion(ctx, g.Client, controlPlane.GroupVersionKind(), controlPlane.GroupVersionKind().Version) + if err != nil { + return nil, errors.Wrapf(err, "failed to get contract version for the ControlPlane object") + } + + // If it is required to manage the NodeDrainTimeoutSeconds for the control plane, set the corresponding field. + nodeDrainTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDrainTimeoutSeconds + if s.Blueprint.Topology.ControlPlane.NodeDrainTimeoutSeconds != nil { + nodeDrainTimeout = s.Blueprint.Topology.ControlPlane.NodeDrainTimeoutSeconds } if nodeDrainTimeout != nil { - if err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Set(controlPlane, *nodeDrainTimeout); err != nil { - return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()) + if contractVersion == "v1beta1" { + if err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Set(controlPlane, metav1.Duration{Duration: time.Duration(*nodeDrainTimeout) * time.Second}); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()) + } + } else { + if err := contract.ControlPlane().MachineTemplate().NodeDrainTimeoutSeconds().Set(controlPlane, *nodeDrainTimeout); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDrainTimeoutSeconds().Path()) + } } } - // If it is required to manage the NodeVolumeDetachTimeout for the control plane, set the corresponding field. - nodeVolumeDetachTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeVolumeDetachTimeout - if s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout != nil { - nodeVolumeDetachTimeout = s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout + // If it is required to manage the NodeVolumeDetachTimeoutSeconds for the control plane, set the corresponding field. + nodeVolumeDetachTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeVolumeDetachTimeoutSeconds + if s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeoutSeconds != nil { + nodeVolumeDetachTimeout = s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeoutSeconds } if nodeVolumeDetachTimeout != nil { - if err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Set(controlPlane, *nodeVolumeDetachTimeout); err != nil { - return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()) + if contractVersion == "v1beta1" { + if err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Set(controlPlane, metav1.Duration{Duration: time.Duration(*nodeVolumeDetachTimeout) * time.Second}); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()) + } + } else { + if err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeoutSeconds().Set(controlPlane, *nodeVolumeDetachTimeout); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeoutSeconds().Path()) + } } } - // If it is required to manage the NodeDeletionTimeout for the control plane, set the corresponding field. - nodeDeletionTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDeletionTimeout - if s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout != nil { - nodeDeletionTimeout = s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout + // If it is required to manage the NodeDeletionTimeoutSeconds for the control plane, set the corresponding field. + nodeDeletionTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDeletionTimeoutSeconds + if s.Blueprint.Topology.ControlPlane.NodeDeletionTimeoutSeconds != nil { + nodeDeletionTimeout = s.Blueprint.Topology.ControlPlane.NodeDeletionTimeoutSeconds } if nodeDeletionTimeout != nil { - if err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Set(controlPlane, *nodeDeletionTimeout); err != nil { - return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()) + if contractVersion == "v1beta1" { + if err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Set(controlPlane, metav1.Duration{Duration: time.Duration(*nodeDeletionTimeout) * time.Second}); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()) + } + } else { + if err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeoutSeconds().Set(controlPlane, *nodeDeletionTimeout); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDeletionTimeoutSeconds().Path()) + } } } @@ -763,19 +788,19 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope failureDomain = machineDeploymentTopology.FailureDomain } - nodeDrainTimeout := machineDeploymentClass.NodeDrainTimeout - if machineDeploymentTopology.NodeDrainTimeout != nil { - nodeDrainTimeout = machineDeploymentTopology.NodeDrainTimeout + nodeDrainTimeout := machineDeploymentClass.NodeDrainTimeoutSeconds + if machineDeploymentTopology.NodeDrainTimeoutSeconds != nil { + nodeDrainTimeout = machineDeploymentTopology.NodeDrainTimeoutSeconds } - nodeVolumeDetachTimeout := machineDeploymentClass.NodeVolumeDetachTimeout - if machineDeploymentTopology.NodeVolumeDetachTimeout != nil { - nodeVolumeDetachTimeout = machineDeploymentTopology.NodeVolumeDetachTimeout + nodeVolumeDetachTimeout := machineDeploymentClass.NodeVolumeDetachTimeoutSeconds + if machineDeploymentTopology.NodeVolumeDetachTimeoutSeconds != nil { + nodeVolumeDetachTimeout = machineDeploymentTopology.NodeVolumeDetachTimeoutSeconds } - nodeDeletionTimeout := machineDeploymentClass.NodeDeletionTimeout - if machineDeploymentTopology.NodeDeletionTimeout != nil { - nodeDeletionTimeout = machineDeploymentTopology.NodeDeletionTimeout + nodeDeletionTimeout := machineDeploymentClass.NodeDeletionTimeoutSeconds + if machineDeploymentTopology.NodeDeletionTimeoutSeconds != nil { + nodeDeletionTimeout = machineDeploymentTopology.NodeDeletionTimeoutSeconds } readinessGates := machineDeploymentClass.ReadinessGates @@ -817,16 +842,16 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope Strategy: strategy, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - ClusterName: s.Current.Cluster.Name, - Version: ptr.To(version), - Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef}, - InfrastructureRef: *desiredInfraMachineTemplateRef, - FailureDomain: failureDomain, - NodeDrainTimeout: nodeDrainTimeout, - NodeVolumeDetachTimeout: nodeVolumeDetachTimeout, - NodeDeletionTimeout: nodeDeletionTimeout, - ReadinessGates: readinessGates, - MinReadySeconds: minReadySeconds, + ClusterName: s.Current.Cluster.Name, + Version: ptr.To(version), + Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef}, + InfrastructureRef: *desiredInfraMachineTemplateRef, + FailureDomain: failureDomain, + NodeDrainTimeoutSeconds: nodeDrainTimeout, + NodeVolumeDetachTimeoutSeconds: nodeVolumeDetachTimeout, + NodeDeletionTimeoutSeconds: nodeDeletionTimeout, + ReadinessGates: readinessGates, + MinReadySeconds: minReadySeconds, }, }, }, @@ -1089,19 +1114,19 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin failureDomains = machinePoolTopology.FailureDomains } - nodeDrainTimeout := machinePoolClass.NodeDrainTimeout - if machinePoolTopology.NodeDrainTimeout != nil { - nodeDrainTimeout = machinePoolTopology.NodeDrainTimeout + nodeDrainTimeout := machinePoolClass.NodeDrainTimeoutSeconds + if machinePoolTopology.NodeDrainTimeoutSeconds != nil { + nodeDrainTimeout = machinePoolTopology.NodeDrainTimeoutSeconds } - nodeVolumeDetachTimeout := machinePoolClass.NodeVolumeDetachTimeout - if machinePoolTopology.NodeVolumeDetachTimeout != nil { - nodeVolumeDetachTimeout = machinePoolTopology.NodeVolumeDetachTimeout + nodeVolumeDetachTimeout := machinePoolClass.NodeVolumeDetachTimeoutSeconds + if machinePoolTopology.NodeVolumeDetachTimeoutSeconds != nil { + nodeVolumeDetachTimeout = machinePoolTopology.NodeVolumeDetachTimeoutSeconds } - nodeDeletionTimeout := machinePoolClass.NodeDeletionTimeout - if machinePoolTopology.NodeDeletionTimeout != nil { - nodeDeletionTimeout = machinePoolTopology.NodeDeletionTimeout + nodeDeletionTimeout := machinePoolClass.NodeDeletionTimeoutSeconds + if machinePoolTopology.NodeDeletionTimeoutSeconds != nil { + nodeDeletionTimeout = machinePoolTopology.NodeDeletionTimeoutSeconds } // Compute the MachinePool object. @@ -1138,14 +1163,14 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin FailureDomains: failureDomains, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - ClusterName: s.Current.Cluster.Name, - Version: ptr.To(version), - Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef}, - InfrastructureRef: *desiredInfraMachinePoolRef, - NodeDrainTimeout: nodeDrainTimeout, - NodeVolumeDetachTimeout: nodeVolumeDetachTimeout, - NodeDeletionTimeout: nodeDeletionTimeout, - MinReadySeconds: minReadySeconds, + ClusterName: s.Current.Cluster.Name, + Version: ptr.To(version), + Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef}, + InfrastructureRef: *desiredInfraMachinePoolRef, + NodeDrainTimeoutSeconds: nodeDrainTimeout, + NodeVolumeDetachTimeoutSeconds: nodeVolumeDetachTimeout, + NodeDeletionTimeoutSeconds: nodeDeletionTimeout, + MinReadySeconds: minReadySeconds, }, }, }, @@ -1424,13 +1449,13 @@ func computeMachineHealthCheck(ctx context.Context, healthCheckTarget client.Obj }, }, Spec: clusterv1.MachineHealthCheckSpec{ - ClusterName: cluster.Name, - Selector: *selector, - UnhealthyNodeConditions: check.UnhealthyNodeConditions, - MaxUnhealthy: check.MaxUnhealthy, - UnhealthyRange: check.UnhealthyRange, - NodeStartupTimeout: check.NodeStartupTimeout, - RemediationTemplate: check.RemediationTemplate, + ClusterName: cluster.Name, + Selector: *selector, + UnhealthyNodeConditions: check.UnhealthyNodeConditions, + MaxUnhealthy: check.MaxUnhealthy, + UnhealthyRange: check.UnhealthyRange, + NodeStartupTimeoutSeconds: check.NodeStartupTimeoutSeconds, + RemediationTemplate: check.RemediationTemplate, }, } diff --git a/exp/topology/desiredstate/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go index 7a229320da41..58a70afb069a 100644 --- a/exp/topology/desiredstate/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -18,6 +18,7 @@ package desiredstate import ( "encoding/json" + "fmt" "strings" "testing" "time" @@ -313,7 +314,7 @@ func TestComputeControlPlane(t *testing.T) { Labels: controlPlaneMachineTemplateLabels, Annotations: controlPlaneMachineTemplateAnnotations, }) - clusterClassDuration := 20 * time.Second + clusterClassDuration := int32(20) clusterClassReadinessGates := []clusterv1.MachineReadinessGate{ {ConditionType: "foo"}, } @@ -321,18 +322,18 @@ func TestComputeControlPlane(t *testing.T) { WithControlPlaneMetadata(labels, annotations). WithControlPlaneReadinessGates(clusterClassReadinessGates). WithControlPlaneTemplate(controlPlaneTemplate). - WithControlPlaneNodeDrainTimeout(&metav1.Duration{Duration: clusterClassDuration}). - WithControlPlaneNodeVolumeDetachTimeout(&metav1.Duration{Duration: clusterClassDuration}). - WithControlPlaneNodeDeletionTimeout(&metav1.Duration{Duration: clusterClassDuration}). + WithControlPlaneNodeDrainTimeout(&clusterClassDuration). + WithControlPlaneNodeVolumeDetachTimeout(&clusterClassDuration). + WithControlPlaneNodeDeletionTimeout(&clusterClassDuration). Build() // TODO: Replace with object builder. // current cluster objects version := "v1.21.2" replicas := int32(3) - topologyDuration := 10 * time.Second - nodeDrainTimeout := metav1.Duration{Duration: topologyDuration} - nodeVolumeDetachTimeout := metav1.Duration{Duration: topologyDuration} - nodeDeletionTimeout := metav1.Duration{Duration: topologyDuration} + topologyDuration := int32(10) + nodeDrainTimeout := topologyDuration + nodeVolumeDetachTimeout := topologyDuration + nodeDeletionTimeout := topologyDuration readinessGates := []clusterv1.MachineReadinessGate{ {ConditionType: "foo"}, {ConditionType: "bar"}, @@ -350,11 +351,11 @@ func TestComputeControlPlane(t *testing.T) { Labels: map[string]string{"l2": ""}, Annotations: map[string]string{"a2": ""}, }, - ReadinessGates: readinessGates, - Replicas: &replicas, - NodeDrainTimeout: &nodeDrainTimeout, - NodeVolumeDetachTimeout: &nodeVolumeDetachTimeout, - NodeDeletionTimeout: &nodeDeletionTimeout, + ReadinessGates: readinessGates, + Replicas: &replicas, + NodeDrainTimeoutSeconds: &nodeDrainTimeout, + NodeVolumeDetachTimeoutSeconds: &nodeVolumeDetachTimeout, + NodeDeletionTimeoutSeconds: &nodeDeletionTimeout, }, }, }, @@ -369,7 +370,22 @@ func TestComputeControlPlane(t *testing.T) { var expectedReadinessGates []interface{} g.Expect(json.Unmarshal(jsonValue, &expectedReadinessGates)).ToNot(HaveOccurred()) - t.Run("Generates the ControlPlane from the template", func(t *testing.T) { + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + crd := builder.GenericControlPlaneCRD.DeepCopy() + crd.Labels = map[string]string{ + // Set contract label for tt.contract. + fmt.Sprintf("%s/%s", clusterv1.GroupVersion.Group, "v1beta1"): clusterv1.GroupVersionInfrastructure.Version, + } + clientWithV1Beta1ContractCRD := fake.NewClientBuilder().WithScheme(scheme).WithObjects(crd).Build() + crd = builder.GenericControlPlaneCRD.DeepCopy() + crd.Labels = map[string]string{ + // Set contract label for tt.contract. + fmt.Sprintf("%s/%s", clusterv1.GroupVersion.Group, "v1beta2"): clusterv1.GroupVersionInfrastructure.Version, + } + clientWithV1Beta2ContractCRD := fake.NewClientBuilder().WithScheme(scheme).WithObjects(crd).Build() + + t.Run("Generates the ControlPlane from the template (v1beta1 contract)", func(t *testing.T) { g := NewWithT(t) blueprint := &scope.ClusterBlueprint{ @@ -384,7 +400,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) + obj, err := (&generator{Client: clientWithV1Beta1ContractCRD}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -401,15 +417,55 @@ func TestComputeControlPlane(t *testing.T) { assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...) assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...) assertNestedField(g, obj, expectedReadinessGates, contract.ControlPlane().MachineTemplate().ReadinessGates().Path()...) - assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...) - assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...) - assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...) + assertNestedField(g, obj, (time.Duration(topologyDuration) * time.Second).String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...) + assertNestedField(g, obj, (time.Duration(topologyDuration) * time.Second).String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...) + assertNestedField(g, obj, (time.Duration(topologyDuration) * time.Second).String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...) assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...) // Ensure no ownership is added to generated ControlPlane. g.Expect(obj.GetOwnerReferences()).To(BeEmpty()) }) - t.Run("Generates the ControlPlane from the template using ClusterClass defaults", func(t *testing.T) { + t.Run("Generates the ControlPlane from the template (v1beta2 contract)", func(t *testing.T) { + g := NewWithT(t) + + blueprint := &scope.ClusterBlueprint{ + Topology: cluster.Spec.Topology, + ClusterClass: clusterClass, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplate, + }, + } + + // aggregating current cluster objects into ClusterState (simulating getCurrentState) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, scope, nil) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj).ToNot(BeNil()) + + assertTemplateToObject(g, assertTemplateInput{ + cluster: scope.Current.Cluster, + templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref, + template: blueprint.ControlPlane.Template, + currentRef: nil, + obj: obj, + labels: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels), + annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations), + }) + + assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...) + assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...) + assertNestedField(g, obj, expectedReadinessGates, contract.ControlPlane().MachineTemplate().ReadinessGates().Path()...) + assertNestedField(g, obj, int64(topologyDuration), contract.ControlPlane().MachineTemplate().NodeDrainTimeoutSeconds().Path()...) + assertNestedField(g, obj, int64(topologyDuration), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeoutSeconds().Path()...) + assertNestedField(g, obj, int64(topologyDuration), contract.ControlPlane().MachineTemplate().NodeDeletionTimeoutSeconds().Path()...) + assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...) + + // Ensure no ownership is added to generated ControlPlane. + g.Expect(obj.GetOwnerReferences()).To(BeEmpty()) + }) + t.Run("Generates the ControlPlane from the template using ClusterClass defaults (v1beta1 contract)", func(t *testing.T) { g := NewWithT(t) cluster := &clusterv1.Cluster{ @@ -426,7 +482,7 @@ func TestComputeControlPlane(t *testing.T) { Annotations: map[string]string{"a2": ""}, }, Replicas: &replicas, - // no values for ReadinessGates, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout + // no values for ReadinessGates, NodeDrainTimeoutSeconds, NodeVolumeDetachTimeoutSeconds, NodeDeletionTimeoutSeconds }, }, }, @@ -444,15 +500,60 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) + obj, err := (&generator{Client: clientWithV1Beta1ContractCRD}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) // checking only values from CC defaults assertNestedField(g, obj, expectedClusterClassReadinessGates, contract.ControlPlane().MachineTemplate().ReadinessGates().Path()...) - assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...) - assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...) - assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...) + assertNestedField(g, obj, (time.Duration(clusterClassDuration) * time.Second).String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...) + assertNestedField(g, obj, (time.Duration(clusterClassDuration) * time.Second).String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...) + assertNestedField(g, obj, (time.Duration(clusterClassDuration) * time.Second).String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...) + }) + t.Run("Generates the ControlPlane from the template using ClusterClass defaults (v1beta2 contract)", func(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.ClusterSpec{ + Topology: &clusterv1.Topology{ + Version: version, + ControlPlane: clusterv1.ControlPlaneTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"l2": ""}, + Annotations: map[string]string{"a2": ""}, + }, + Replicas: &replicas, + // no values for ReadinessGates, NodeDrainTimeoutSeconds, NodeVolumeDetachTimeoutSeconds, NodeDeletionTimeoutSeconds + }, + }, + }, + } + + blueprint := &scope.ClusterBlueprint{ + Topology: cluster.Spec.Topology, + ClusterClass: clusterClass, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplate, + }, + } + + // aggregating current cluster objects into ClusterState (simulating getCurrentState) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, scope, nil) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj).ToNot(BeNil()) + + // checking only values from CC defaults + assertNestedField(g, obj, expectedClusterClassReadinessGates, contract.ControlPlane().MachineTemplate().ReadinessGates().Path()...) + assertNestedField(g, obj, int64(clusterClassDuration), contract.ControlPlane().MachineTemplate().NodeDrainTimeoutSeconds().Path()...) + assertNestedField(g, obj, int64(clusterClassDuration), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeoutSeconds().Path()...) + assertNestedField(g, obj, int64(clusterClassDuration), contract.ControlPlane().MachineTemplate().NodeDeletionTimeoutSeconds().Path()...) }) t.Run("Skips setting replicas if required", func(t *testing.T) { g := NewWithT(t) @@ -473,7 +574,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithoutReplicas) scope.Blueprint = blueprint - obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -512,7 +613,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithoutReadinessGates) scope.Blueprint = blueprint - obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -543,7 +644,7 @@ func TestComputeControlPlane(t *testing.T) { s.Blueprint = blueprint s.Current.ControlPlane = &scope.ControlPlaneState{} - obj, err := (&generator{}).computeControlPlane(ctx, s, infrastructureMachineTemplate) + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, s, infrastructureMachineTemplate) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -602,7 +703,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithControlPlaneRef) scope.Blueprint = blueprint - obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -670,7 +771,7 @@ func TestComputeControlPlane(t *testing.T) { Object: tt.currentControlPlane, } - obj, err := (&generator{}).computeControlPlane(ctx, s, nil) + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) assertNestedField(g, obj, tt.expectedVersion, contract.ControlPlane().Version().Path()...) @@ -708,7 +809,7 @@ func TestComputeControlPlane(t *testing.T) { s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) s.Blueprint = blueprint - obj, err := (&generator{}).computeControlPlane(ctx, s, nil) + obj, err := (&generator{Client: clientWithV1Beta2ContractCRD}).computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue()) @@ -1420,20 +1521,20 @@ func TestComputeMachineDeployment(t *testing.T) { unhealthyNodeConditions := []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 5 * 60, }, } - nodeTimeoutDuration := &metav1.Duration{Duration: time.Duration(1)} + nodeTimeoutDuration := ptr.To(int32(1)) clusterClassFailureDomain := "A" - clusterClassDuration := metav1.Duration{Duration: 20 * time.Second} + clusterClassDuration := int32(20) var clusterClassMinReadySeconds int32 = 20 clusterClassStrategy := clusterv1.MachineDeploymentStrategy{ Type: clusterv1.OnDeleteMachineDeploymentStrategyType, @@ -1450,8 +1551,8 @@ func TestComputeMachineDeployment(t *testing.T) { WithInfrastructureTemplate(workerInfrastructureMachineTemplate). WithBootstrapTemplate(workerBootstrapTemplate). WithMachineHealthCheckClass(&clusterv1.MachineHealthCheckClass{ - UnhealthyNodeConditions: unhealthyNodeConditions, - NodeStartupTimeout: nodeTimeoutDuration, + UnhealthyNodeConditions: unhealthyNodeConditions, + NodeStartupTimeoutSeconds: nodeTimeoutDuration, }). WithReadinessGates(clusterClassReadinessGates). WithFailureDomain(&clusterClassFailureDomain). @@ -1491,10 +1592,8 @@ func TestComputeMachineDeployment(t *testing.T) { BootstrapTemplate: workerBootstrapTemplate, InfrastructureMachineTemplate: workerInfrastructureMachineTemplate, MachineHealthCheck: &clusterv1.MachineHealthCheckClass{ - UnhealthyNodeConditions: unhealthyNodeConditions, - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(1), - }, + UnhealthyNodeConditions: unhealthyNodeConditions, + NodeStartupTimeoutSeconds: ptr.To(int32(1)), }, }, }, @@ -1502,7 +1601,7 @@ func TestComputeMachineDeployment(t *testing.T) { replicas := int32(5) topologyFailureDomain := "B" - topologyDuration := metav1.Duration{Duration: 10 * time.Second} + topologyDuration := int32(10) var topologyMinReadySeconds int32 = 10 topologyStrategy := clusterv1.MachineDeploymentStrategy{ Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, @@ -1528,16 +1627,16 @@ func TestComputeMachineDeployment(t *testing.T) { clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", }, }, - Class: "linux-worker", - Name: "big-pool-of-machines", - Replicas: &replicas, - FailureDomain: &topologyFailureDomain, - ReadinessGates: readinessGates, - NodeDrainTimeout: &topologyDuration, - NodeVolumeDetachTimeout: &topologyDuration, - NodeDeletionTimeout: &topologyDuration, - MinReadySeconds: &topologyMinReadySeconds, - Strategy: &topologyStrategy, + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: &replicas, + FailureDomain: &topologyFailureDomain, + ReadinessGates: readinessGates, + NodeDrainTimeoutSeconds: &topologyDuration, + NodeVolumeDetachTimeoutSeconds: &topologyDuration, + NodeDeletionTimeoutSeconds: &topologyDuration, + MinReadySeconds: &topologyMinReadySeconds, + Strategy: &topologyStrategy, } t.Run("Generates the machine deployment and the referenced templates", func(t *testing.T) { @@ -1569,9 +1668,9 @@ func TestComputeMachineDeployment(t *testing.T) { g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(topologyStrategy)) g.Expect(actualMd.Spec.Template.Spec.MinReadySeconds).To(HaveValue(Equal(topologyMinReadySeconds))) g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain)) - g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration)) - g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration)) - g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration)) + g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(topologyDuration)) + g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(topologyDuration)) + g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(topologyDuration)) g.Expect(actualMd.Spec.Template.Spec.ReadinessGates).To(Equal(readinessGates)) g.Expect(actualMd.Spec.ClusterName).To(Equal("cluster1")) g.Expect(actualMd.Name).To(ContainSubstring("cluster1")) @@ -1614,7 +1713,7 @@ func TestComputeMachineDeployment(t *testing.T) { Class: "linux-worker", Name: "big-pool-of-machines", Replicas: &replicas, - // missing ReadinessGates, FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy + // missing ReadinessGates, FailureDomain, NodeDrainTimeoutSeconds, NodeVolumeDetachTimeoutSeconds, NodeDeletionTimeoutSeconds, MinReadySeconds, Strategy } e := generator{} @@ -1628,9 +1727,9 @@ func TestComputeMachineDeployment(t *testing.T) { g.Expect(actualMd.Spec.Template.Spec.MinReadySeconds).To(HaveValue(Equal(clusterClassMinReadySeconds))) g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(clusterClassFailureDomain)) g.Expect(actualMd.Spec.Template.Spec.ReadinessGates).To(Equal(clusterClassReadinessGates)) - g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration)) - g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration)) - g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration)) + g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(clusterClassDuration)) + g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(clusterClassDuration)) + g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(clusterClassDuration)) }) t.Run("Skips setting readinessGates if not set in Cluster and ClusterClass", func(t *testing.T) { @@ -1651,10 +1750,8 @@ func TestComputeMachineDeployment(t *testing.T) { BootstrapTemplate: workerBootstrapTemplate, InfrastructureMachineTemplate: workerInfrastructureMachineTemplate, MachineHealthCheck: &clusterv1.MachineHealthCheckClass{ - UnhealthyNodeConditions: unhealthyNodeConditions, - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(1), - }, + UnhealthyNodeConditions: unhealthyNodeConditions, + NodeStartupTimeoutSeconds: ptr.To(int32(1)), }, }, }, @@ -1909,7 +2006,7 @@ func TestComputeMachineDeployment(t *testing.T) { }})) // Check that the NodeStartupTime is set as expected. - g.Expect(actual.MachineHealthCheck.Spec.NodeStartupTimeout).To(Equal(nodeTimeoutDuration)) + g.Expect(actual.MachineHealthCheck.Spec.NodeStartupTimeoutSeconds).To(Equal(nodeTimeoutDuration)) // Check that UnhealthyNodeConditions are set as expected. g.Expect(actual.MachineHealthCheck.Spec.UnhealthyNodeConditions).To(BeComparableTo(unhealthyNodeConditions)) @@ -1928,7 +2025,7 @@ func TestComputeMachinePool(t *testing.T) { labels := map[string]string{"fizzLabel": "buzz", "fooLabel": "bar"} annotations := map[string]string{"fizzAnnotation": "buzz", "fooAnnotation": "bar"} - clusterClassDuration := metav1.Duration{Duration: 20 * time.Second} + clusterClassDuration := int32(20) clusterClassFailureDomains := []string{"A", "B"} var clusterClassMinReadySeconds int32 = 20 mp1 := builder.MachinePoolClass("linux-worker"). @@ -1977,7 +2074,7 @@ func TestComputeMachinePool(t *testing.T) { replicas := int32(5) topologyFailureDomains := []string{"A", "B"} - topologyDuration := metav1.Duration{Duration: 10 * time.Second} + topologyDuration := int32(10) var topologyMinReadySeconds int32 = 10 mpTopology := clusterv1.MachinePoolTopology{ Metadata: clusterv1.ObjectMeta{ @@ -1993,14 +2090,14 @@ func TestComputeMachinePool(t *testing.T) { clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", }, }, - Class: "linux-worker", - Name: "big-pool-of-machines", - Replicas: &replicas, - FailureDomains: topologyFailureDomains, - NodeDrainTimeout: &topologyDuration, - NodeVolumeDetachTimeout: &topologyDuration, - NodeDeletionTimeout: &topologyDuration, - MinReadySeconds: &topologyMinReadySeconds, + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: &replicas, + FailureDomains: topologyFailureDomains, + NodeDrainTimeoutSeconds: &topologyDuration, + NodeVolumeDetachTimeoutSeconds: &topologyDuration, + NodeDeletionTimeoutSeconds: &topologyDuration, + MinReadySeconds: &topologyMinReadySeconds, } t.Run("Generates the machine pool and the referenced templates", func(t *testing.T) { @@ -2031,9 +2128,9 @@ func TestComputeMachinePool(t *testing.T) { g.Expect(*actualMp.Spec.Replicas).To(Equal(replicas)) g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains)) g.Expect(actualMp.Spec.Template.Spec.MinReadySeconds).To(HaveValue(Equal(topologyMinReadySeconds))) - g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration)) - g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration)) - g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(topologyDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(topologyDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(topologyDuration)) g.Expect(actualMp.Spec.ClusterName).To(Equal("cluster1")) g.Expect(actualMp.Name).To(ContainSubstring("cluster1")) g.Expect(actualMp.Name).To(ContainSubstring("big-pool-of-machines")) @@ -2070,7 +2167,7 @@ func TestComputeMachinePool(t *testing.T) { Class: "linux-worker", Name: "big-pool-of-machines", Replicas: &replicas, - // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy + // missing FailureDomain, NodeDrainTimeoutSeconds, NodeVolumeDetachTimeoutSeconds, NodeDeletionTimeoutSeconds, MinReadySeconds, Strategy } e := generator{} @@ -2082,9 +2179,9 @@ func TestComputeMachinePool(t *testing.T) { actualMp := actual.Object g.Expect(actualMp.Spec.FailureDomains).To(Equal(clusterClassFailureDomains)) g.Expect(actualMp.Spec.Template.Spec.MinReadySeconds).To(HaveValue(Equal(clusterClassMinReadySeconds))) - g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration)) - g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration)) - g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(clusterClassDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(clusterClassDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(clusterClassDuration)) }) t.Run("If there is already a machine pool, it preserves the object name and the reference names", func(t *testing.T) { @@ -3045,19 +3142,17 @@ func Test_computeMachineHealthCheck(t *testing.T) { mhcSpec := &clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 5 * 60, }, }, - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(1), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(1)), } selector := &metav1.LabelSelector{MatchLabels: map[string]string{ "foo": "bar", @@ -3090,19 +3185,17 @@ func Test_computeMachineHealthCheck(t *testing.T) { MaxUnhealthy: &maxUnhealthyValue, UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 5 * 60, }, }, - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(1), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(1)), }, } diff --git a/exp/topology/scope/blueprint_test.go b/exp/topology/scope/blueprint_test.go index a2a531a3351e..f2a30d921c48 100644 --- a/exp/topology/scope/blueprint_test.go +++ b/exp/topology/scope/blueprint_test.go @@ -18,7 +18,6 @@ package scope import ( "testing" - "time" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -114,9 +113,9 @@ func TestIsControlPlaneMachineHealthCheckEnabled(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -138,9 +137,9 @@ func TestIsControlPlaneMachineHealthCheckEnabled(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -162,9 +161,9 @@ func TestIsControlPlaneMachineHealthCheckEnabled(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -187,9 +186,9 @@ func TestControlPlaneMachineHealthCheckClass(t *testing.T) { mhcInClusterClass := &clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 10 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 10 * 60, }, }, } @@ -198,9 +197,9 @@ func TestControlPlaneMachineHealthCheckClass(t *testing.T) { mhcInClusterTopology := &clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 20 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 20 * 60, }, }, MaxUnhealthy: &percent50, @@ -327,9 +326,9 @@ func TestIsMachineDeploymentMachineHealthCheckEnabled(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -351,9 +350,9 @@ func TestIsMachineDeploymentMachineHealthCheckEnabled(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -375,9 +374,9 @@ func TestIsMachineDeploymentMachineHealthCheckEnabled(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -399,9 +398,9 @@ func TestMachineDeploymentMachineHealthCheckClass(t *testing.T) { mhcInClusterClass := &clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 10 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 10 * 60, }, }, } @@ -410,9 +409,9 @@ func TestMachineDeploymentMachineHealthCheckClass(t *testing.T) { mhcInClusterTopology := &clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 20 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 20 * 60, }, }, MaxUnhealthy: &percent50, diff --git a/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go b/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go index 5b8e98f304da..f86567a2ed36 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go @@ -24,6 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterv1alpha3 "sigs.k8s.io/cluster-api/internal/api/core/v1alpha3" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -153,7 +154,7 @@ func (src *KubeadmConfigSpec) ConvertTo(dst *bootstrapv1.KubeadmConfigSpec) { if dst.InitConfiguration.Timeouts == nil { dst.InitConfiguration.Timeouts = &bootstrapv1.Timeouts{} } - dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = bootstrapv1.ConvertToSeconds(src.ClusterConfiguration.APIServer.TimeoutForControlPlane) + dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = clusterv1.ConvertToSeconds(src.ClusterConfiguration.APIServer.TimeoutForControlPlane) initControlPlaneComponentHealthCheckSeconds = dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds } if (src.JoinConfiguration != nil && src.JoinConfiguration.Discovery.Timeout != nil) || initControlPlaneComponentHealthCheckSeconds != nil { @@ -165,7 +166,7 @@ func (src *KubeadmConfigSpec) ConvertTo(dst *bootstrapv1.KubeadmConfigSpec) { } dst.JoinConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = initControlPlaneComponentHealthCheckSeconds if src.JoinConfiguration != nil && src.JoinConfiguration.Discovery.Timeout != nil { - dst.JoinConfiguration.Timeouts.TLSBootstrapSeconds = bootstrapv1.ConvertToSeconds(src.JoinConfiguration.Discovery.Timeout) + dst.JoinConfiguration.Timeouts.TLSBootstrapSeconds = clusterv1.ConvertToSeconds(src.JoinConfiguration.Discovery.Timeout) } } @@ -214,7 +215,7 @@ func (dst *KubeadmConfigSpec) ConvertFrom(src *bootstrapv1.KubeadmConfigSpec) { if dst.ClusterConfiguration == nil { dst.ClusterConfiguration = &ClusterConfiguration{} } - dst.ClusterConfiguration.APIServer.TimeoutForControlPlane = bootstrapv1.ConvertFromSeconds(src.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds) + dst.ClusterConfiguration.APIServer.TimeoutForControlPlane = clusterv1.ConvertFromSeconds(src.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds) } if reflect.DeepEqual(dst.InitConfiguration, &InitConfiguration{}) { dst.InitConfiguration = nil @@ -223,7 +224,7 @@ func (dst *KubeadmConfigSpec) ConvertFrom(src *bootstrapv1.KubeadmConfigSpec) { if dst.JoinConfiguration == nil { dst.JoinConfiguration = &JoinConfiguration{} } - dst.JoinConfiguration.Discovery.Timeout = bootstrapv1.ConvertFromSeconds(src.JoinConfiguration.Timeouts.TLSBootstrapSeconds) + dst.JoinConfiguration.Discovery.Timeout = clusterv1.ConvertFromSeconds(src.JoinConfiguration.Timeouts.TLSBootstrapSeconds) } if reflect.DeepEqual(dst.JoinConfiguration, &JoinConfiguration{}) { dst.JoinConfiguration = nil @@ -277,6 +278,14 @@ func Convert_v1alpha3_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in *Kub return autoConvert_v1alpha3_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in, out, s) } +func Convert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *bootstrapv1.BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTLSeconds = clusterv1.ConvertToSeconds(in.TTL) + return nil +} + func Convert_v1beta2_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec(in *bootstrapv1.KubeadmConfigSpec, out *KubeadmConfigSpec, s apimachineryconversion.Scope) error { // KubeadmConfigSpec.Ignition does not exist in kubeadm v1alpha3 API. return autoConvert_v1beta2_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec(in, out, s) @@ -302,6 +311,14 @@ func Convert_v1beta2_KubeadmConfigStatus_To_v1alpha3_KubeadmConfigStatus(in *boo return autoConvert_v1beta2_KubeadmConfigStatus_To_v1alpha3_KubeadmConfigStatus(in, out, s) } +func Convert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(in *bootstrapv1.BootstrapToken, out *BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTL = clusterv1.ConvertFromSeconds(in.TTLSeconds) + return nil +} + // Implement local conversion func because conversion-gen is not aware of conversion func in other packages (see https://github.com/kubernetes/code-generator/issues/94) func Convert_v1_Condition_To_v1alpha3_Condition(in *metav1.Condition, out *clusterv1alpha3.Condition, s apimachineryconversion.Scope) error { diff --git a/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go b/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go index 32cd53893d65..4d8a8557aae4 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go +++ b/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go @@ -65,6 +65,7 @@ func KubeadmConfigFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeBootstrapToken, } } @@ -79,6 +80,7 @@ func KubeadmConfigTemplateFuzzFuncs(_ runtimeserializer.CodecFactory) []interfac spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeBootstrapToken, } } @@ -173,6 +175,14 @@ func spokeAPIServer(in *APIServer, c randfill.Continue) { } } +func spokeBootstrapToken(in *BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeDiscovery(in *Discovery, c randfill.Continue) { c.FillNoCustom(in) diff --git a/internal/api/bootstrap/kubeadm/v1alpha3/upstream_conversion.go b/internal/api/bootstrap/kubeadm/v1alpha3/upstream_conversion.go index 868bc6ec9615..744397e883dd 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha3/upstream_conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha3/upstream_conversion.go @@ -20,6 +20,7 @@ import ( apimachineryconversion "k8s.io/apimachinery/pkg/conversion" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // Custom conversion from this API, kubeadm v1beta1, to the hub version, CABPK v1beta2. @@ -37,7 +38,7 @@ func Convert_v1alpha3_JoinConfiguration_To_v1beta2_JoinConfiguration(in *JoinCon if out.Timeouts == nil { out.Timeouts = &bootstrapv1.Timeouts{} } - out.Timeouts.TLSBootstrapSeconds = bootstrapv1.ConvertToSeconds(in.Discovery.Timeout) + out.Timeouts.TLSBootstrapSeconds = clusterv1.ConvertToSeconds(in.Discovery.Timeout) } return nil } @@ -104,7 +105,7 @@ func Convert_v1beta2_JoinConfiguration_To_v1alpha3_JoinConfiguration(in *bootstr } if in.Timeouts != nil { - out.Discovery.Timeout = bootstrapv1.ConvertFromSeconds(in.Timeouts.TLSBootstrapSeconds) + out.Discovery.Timeout = clusterv1.ConvertFromSeconds(in.Timeouts.TLSBootstrapSeconds) } return nil } diff --git a/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go b/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go index 20d0bf38e6ba..ed8b08d6dd54 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go @@ -54,16 +54,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BootstrapTokenDiscovery)(nil), (*v1beta2.BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(a.(*BootstrapTokenDiscovery), b.(*v1beta2.BootstrapTokenDiscovery), scope) }); err != nil { @@ -294,6 +284,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*ClusterConfiguration)(nil), (*v1beta2.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_ClusterConfiguration_To_v1beta2_ClusterConfiguration(a.(*ClusterConfiguration), b.(*v1beta2.ClusterConfiguration), scope) }); err != nil { @@ -344,6 +339,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.ControlPlaneComponent)(nil), (*ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ControlPlaneComponent_To_v1alpha3_ControlPlaneComponent(a.(*v1beta2.ControlPlaneComponent), b.(*ControlPlaneComponent), scope) }); err != nil { @@ -449,33 +449,23 @@ func Convert_v1beta2_APIServer_To_v1alpha3_APIServer(in *v1beta2.APIServer, out func autoConvert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { out.Token = (*v1beta2.BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTL requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken is an autogenerated conversion function. -func Convert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { - return autoConvert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s) -} - func autoConvert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTLSeconds requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken is an autogenerated conversion function. -func Convert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { - return autoConvert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(in, out, s) -} - func autoConvert_v1alpha3_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *v1beta2.BootstrapTokenDiscovery, s conversion.Scope) error { out.Token = in.Token out.APIServerEndpoint = in.APIServerEndpoint @@ -882,7 +872,17 @@ func Convert_v1beta2_ImageMeta_To_v1alpha3_ImageMeta(in *v1beta2.ImageMeta, out } func autoConvert_v1alpha3_InitConfiguration_To_v1beta2_InitConfiguration(in *InitConfiguration, out *v1beta2.InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]v1beta2.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]v1beta2.BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_BootstrapToken_To_v1beta2_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1alpha3_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } @@ -898,7 +898,17 @@ func Convert_v1alpha3_InitConfiguration_To_v1beta2_InitConfiguration(in *InitCon } func autoConvert_v1beta2_InitConfiguration_To_v1alpha3_InitConfiguration(in *v1beta2.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1beta2_BootstrapToken_To_v1alpha3_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1beta2_NodeRegistrationOptions_To_v1alpha3_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } diff --git a/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go b/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go index 9692b5b3d754..b4d1304376a3 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go @@ -24,6 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterv1alpha4 "sigs.k8s.io/cluster-api/internal/api/core/v1alpha4" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -151,7 +152,7 @@ func (src *KubeadmConfigSpec) ConvertTo(dst *bootstrapv1.KubeadmConfigSpec) { if dst.InitConfiguration.Timeouts == nil { dst.InitConfiguration.Timeouts = &bootstrapv1.Timeouts{} } - dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = bootstrapv1.ConvertToSeconds(src.ClusterConfiguration.APIServer.TimeoutForControlPlane) + dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = clusterv1.ConvertToSeconds(src.ClusterConfiguration.APIServer.TimeoutForControlPlane) initControlPlaneComponentHealthCheckSeconds = dst.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds } if (src.JoinConfiguration != nil && src.JoinConfiguration.Discovery.Timeout != nil) || initControlPlaneComponentHealthCheckSeconds != nil { @@ -163,7 +164,7 @@ func (src *KubeadmConfigSpec) ConvertTo(dst *bootstrapv1.KubeadmConfigSpec) { } dst.JoinConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds = initControlPlaneComponentHealthCheckSeconds if src.JoinConfiguration != nil && src.JoinConfiguration.Discovery.Timeout != nil { - dst.JoinConfiguration.Timeouts.TLSBootstrapSeconds = bootstrapv1.ConvertToSeconds(src.JoinConfiguration.Discovery.Timeout) + dst.JoinConfiguration.Timeouts.TLSBootstrapSeconds = clusterv1.ConvertToSeconds(src.JoinConfiguration.Discovery.Timeout) } } @@ -212,7 +213,7 @@ func (dst *KubeadmConfigSpec) ConvertFrom(src *bootstrapv1.KubeadmConfigSpec) { if dst.ClusterConfiguration == nil { dst.ClusterConfiguration = &ClusterConfiguration{} } - dst.ClusterConfiguration.APIServer.TimeoutForControlPlane = bootstrapv1.ConvertFromSeconds(src.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds) + dst.ClusterConfiguration.APIServer.TimeoutForControlPlane = clusterv1.ConvertFromSeconds(src.InitConfiguration.Timeouts.ControlPlaneComponentHealthCheckSeconds) } if reflect.DeepEqual(dst.InitConfiguration, &InitConfiguration{}) { dst.InitConfiguration = nil @@ -221,7 +222,7 @@ func (dst *KubeadmConfigSpec) ConvertFrom(src *bootstrapv1.KubeadmConfigSpec) { if dst.JoinConfiguration == nil { dst.JoinConfiguration = &JoinConfiguration{} } - dst.JoinConfiguration.Discovery.Timeout = bootstrapv1.ConvertFromSeconds(src.JoinConfiguration.Timeouts.TLSBootstrapSeconds) + dst.JoinConfiguration.Discovery.Timeout = clusterv1.ConvertFromSeconds(src.JoinConfiguration.Timeouts.TLSBootstrapSeconds) } if reflect.DeepEqual(dst.JoinConfiguration, &JoinConfiguration{}) { dst.JoinConfiguration = nil @@ -336,6 +337,14 @@ func Convert_v1beta2_KubeadmConfigStatus_To_v1alpha4_KubeadmConfigStatus(in *boo return autoConvert_v1beta2_KubeadmConfigStatus_To_v1alpha4_KubeadmConfigStatus(in, out, s) } +func Convert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(in *bootstrapv1.BootstrapToken, out *BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTL = clusterv1.ConvertFromSeconds(in.TTLSeconds) + return nil +} + func Convert_v1alpha4_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in *KubeadmConfigStatus, out *bootstrapv1.KubeadmConfigStatus, s apimachineryconversion.Scope) error { return autoConvert_v1alpha4_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in, out, s) } @@ -365,6 +374,14 @@ func Convert_v1alpha4_Discovery_To_v1beta2_Discovery(in *Discovery, out *bootstr return autoConvert_v1alpha4_Discovery_To_v1beta2_Discovery(in, out, s) } +func Convert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *bootstrapv1.BootstrapToken, s apimachineryconversion.Scope) error { + if err := autoConvert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s); err != nil { + return err + } + out.TTLSeconds = clusterv1.ConvertToSeconds(in.TTL) + return nil +} + // Implement local conversion func because conversion-gen is not aware of conversion func in other packages (see https://github.com/kubernetes/code-generator/issues/94) func Convert_v1_Condition_To_v1alpha4_Condition(in *metav1.Condition, out *clusterv1alpha4.Condition, s apimachineryconversion.Scope) error { diff --git a/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go b/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go index f7a65fc7631a..14ea5bd8bfa3 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go +++ b/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go @@ -63,6 +63,7 @@ func KubeadmConfigFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeBootstrapToken, } } @@ -75,6 +76,7 @@ func KubeadmConfigTemplateFuzzFuncs(_ runtimeserializer.CodecFactory) []interfac spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeBootstrapToken, } } @@ -152,6 +154,14 @@ func spokeAPIServer(in *APIServer, c randfill.Continue) { } } +func spokeBootstrapToken(in *BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeDiscovery(in *Discovery, c randfill.Continue) { c.FillNoCustom(in) diff --git a/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go b/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go index d1ad1a0b5c74..a327efe0f338 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go @@ -54,16 +54,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BootstrapTokenDiscovery)(nil), (*v1beta2.BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(a.(*BootstrapTokenDiscovery), b.(*v1beta2.BootstrapTokenDiscovery), scope) }); err != nil { @@ -304,6 +294,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*BootstrapToken)(nil), (*v1beta2.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(a.(*BootstrapToken), b.(*v1beta2.BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*ClusterConfiguration)(nil), (*v1beta2.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_ClusterConfiguration_To_v1beta2_ClusterConfiguration(a.(*ClusterConfiguration), b.(*v1beta2.ClusterConfiguration), scope) }); err != nil { @@ -344,6 +339,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(a.(*v1beta2.BootstrapToken), b.(*BootstrapToken), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.ControlPlaneComponent)(nil), (*ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_ControlPlaneComponent_To_v1alpha4_ControlPlaneComponent(a.(*v1beta2.ControlPlaneComponent), b.(*ControlPlaneComponent), scope) }); err != nil { @@ -449,33 +449,23 @@ func Convert_v1beta2_APIServer_To_v1alpha4_APIServer(in *v1beta2.APIServer, out func autoConvert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { out.Token = (*v1beta2.BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTL requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken is an autogenerated conversion function. -func Convert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(in *BootstrapToken, out *v1beta2.BootstrapToken, s conversion.Scope) error { - return autoConvert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s) -} - func autoConvert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token)) out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + // WARNING: in.TTLSeconds requires manual conversion: does not exist in peer-type out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) return nil } -// Convert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken is an autogenerated conversion function. -func Convert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(in *v1beta2.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { - return autoConvert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(in, out, s) -} - func autoConvert_v1alpha4_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *v1beta2.BootstrapTokenDiscovery, s conversion.Scope) error { out.Token = in.Token out.APIServerEndpoint = in.APIServerEndpoint @@ -885,7 +875,17 @@ func Convert_v1beta2_ImageMeta_To_v1alpha4_ImageMeta(in *v1beta2.ImageMeta, out } func autoConvert_v1alpha4_InitConfiguration_To_v1beta2_InitConfiguration(in *InitConfiguration, out *v1beta2.InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]v1beta2.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]v1beta2.BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_BootstrapToken_To_v1beta2_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1alpha4_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } @@ -901,7 +901,17 @@ func Convert_v1alpha4_InitConfiguration_To_v1beta2_InitConfiguration(in *InitCon } func autoConvert_v1beta2_InitConfiguration_To_v1alpha4_InitConfiguration(in *v1beta2.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]BootstrapToken, len(*in)) + for i := range *in { + if err := Convert_v1beta2_BootstrapToken_To_v1alpha4_BootstrapToken(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.BootstrapTokens = nil + } if err := Convert_v1beta2_NodeRegistrationOptions_To_v1alpha4_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { return err } diff --git a/internal/api/controlplane/kubeadm/v1alpha3/conversion.go b/internal/api/controlplane/kubeadm/v1alpha3/conversion.go index 0a5866e3d7a5..e892e5304292 100644 --- a/internal/api/controlplane/kubeadm/v1alpha3/conversion.go +++ b/internal/api/controlplane/kubeadm/v1alpha3/conversion.go @@ -23,6 +23,7 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bootstrapv1alpha3 "sigs.k8s.io/cluster-api/internal/api/bootstrap/kubeadm/v1alpha3" clusterv1alpha3 "sigs.k8s.io/cluster-api/internal/api/core/v1alpha3" utilconversion "sigs.k8s.io/cluster-api/util/conversion" @@ -68,8 +69,8 @@ func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { if ok { dst.Spec.MachineTemplate.ObjectMeta = restored.Spec.MachineTemplate.ObjectMeta dst.Spec.MachineTemplate.ReadinessGates = restored.Spec.MachineTemplate.ReadinessGates - dst.Spec.MachineTemplate.NodeDeletionTimeout = restored.Spec.MachineTemplate.NodeDeletionTimeout - dst.Spec.MachineTemplate.NodeVolumeDetachTimeout = restored.Spec.MachineTemplate.NodeVolumeDetachTimeout + dst.Spec.MachineTemplate.NodeDeletionTimeoutSeconds = restored.Spec.MachineTemplate.NodeDeletionTimeoutSeconds + dst.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds = restored.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds dst.Spec.RolloutBefore = restored.Spec.RolloutBefore if restored.Spec.RemediationStrategy != nil { @@ -141,7 +142,7 @@ func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { func Convert_v1beta2_KubeadmControlPlaneSpec_To_v1alpha3_KubeadmControlPlaneSpec(in *controlplanev1.KubeadmControlPlaneSpec, out *KubeadmControlPlaneSpec, s apimachineryconversion.Scope) error { out.UpgradeAfter = in.RolloutAfter out.InfrastructureTemplate = in.MachineTemplate.InfrastructureRef - out.NodeDrainTimeout = in.MachineTemplate.NodeDrainTimeout + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.MachineTemplate.NodeDrainTimeoutSeconds) return autoConvert_v1beta2_KubeadmControlPlaneSpec_To_v1alpha3_KubeadmControlPlaneSpec(in, out, s) } @@ -154,7 +155,7 @@ func Convert_v1beta2_KubeadmControlPlaneStatus_To_v1alpha3_KubeadmControlPlaneSt func Convert_v1alpha3_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneSpec(in *KubeadmControlPlaneSpec, out *controlplanev1.KubeadmControlPlaneSpec, s apimachineryconversion.Scope) error { out.RolloutAfter = in.UpgradeAfter out.MachineTemplate.InfrastructureRef = in.InfrastructureTemplate - out.MachineTemplate.NodeDrainTimeout = in.NodeDrainTimeout + out.MachineTemplate.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) return autoConvert_v1alpha3_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneSpec(in, out, s) } diff --git a/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go b/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go index ad2a888b4c5e..998d193b5242 100644 --- a/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go +++ b/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go @@ -32,7 +32,6 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" bootstrapv1alpha3 "sigs.k8s.io/cluster-api/internal/api/bootstrap/kubeadm/v1alpha3" - bootstrapv1alpha4 "sigs.k8s.io/cluster-api/internal/api/bootstrap/kubeadm/v1alpha4" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -54,15 +53,16 @@ func TestFuzzyConversion(t *testing.T) { func KubeadmControlPlaneFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubKubeadmControlPlaneStatus, + spokeKubeadmControlPlaneSpec, spokeKubeadmControlPlaneStatus, spokeDNS, spokeKubeadmClusterConfiguration, hubBootstrapTokenString, - spokeBootstrapTokenString, spokeKubeadmConfigSpec, spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeBootstrapToken, } } @@ -109,6 +109,14 @@ func hubKubeadmControlPlaneStatus(in *controlplanev1.KubeadmControlPlaneStatus, } } +func spokeKubeadmControlPlaneSpec(in *KubeadmControlPlaneSpec, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeKubeadmControlPlaneStatus(in *KubeadmControlPlaneStatus, c randfill.Continue) { c.FillNoCustom(in) @@ -121,11 +129,6 @@ func hubBootstrapTokenString(in *bootstrapv1.BootstrapTokenString, _ randfill.Co in.Secret = fakeSecret } -func spokeBootstrapTokenString(in *bootstrapv1alpha4.BootstrapTokenString, _ randfill.Continue) { - in.ID = fakeID - in.Secret = fakeSecret -} - func spokeDNS(obj *bootstrapv1alpha3.DNS, c randfill.Continue) { c.FillNoCustom(obj) @@ -163,6 +166,14 @@ func spokeAPIServer(in *bootstrapv1alpha3.APIServer, c randfill.Continue) { } } +func spokeBootstrapToken(in *bootstrapv1alpha3.BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeDiscovery(in *bootstrapv1alpha3.Discovery, c randfill.Continue) { c.FillNoCustom(in) diff --git a/internal/api/controlplane/kubeadm/v1alpha4/conversion.go b/internal/api/controlplane/kubeadm/v1alpha4/conversion.go index 8e8073c9b098..d1c0a4d544ca 100644 --- a/internal/api/controlplane/kubeadm/v1alpha4/conversion.go +++ b/internal/api/controlplane/kubeadm/v1alpha4/conversion.go @@ -68,8 +68,8 @@ func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { } if ok { dst.Spec.MachineTemplate.ReadinessGates = restored.Spec.MachineTemplate.ReadinessGates - dst.Spec.MachineTemplate.NodeDeletionTimeout = restored.Spec.MachineTemplate.NodeDeletionTimeout - dst.Spec.MachineTemplate.NodeVolumeDetachTimeout = restored.Spec.MachineTemplate.NodeVolumeDetachTimeout + dst.Spec.MachineTemplate.NodeDeletionTimeoutSeconds = restored.Spec.MachineTemplate.NodeDeletionTimeoutSeconds + dst.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds = restored.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds dst.Spec.RolloutBefore = restored.Spec.RolloutBefore if restored.Spec.RemediationStrategy != nil { @@ -160,8 +160,8 @@ func (src *KubeadmControlPlaneTemplate) ConvertTo(dstRaw conversion.Hub) error { if dst.Spec.Template.Spec.MachineTemplate == nil { dst.Spec.Template.Spec.MachineTemplate = restored.Spec.Template.Spec.MachineTemplate } else if restored.Spec.Template.Spec.MachineTemplate != nil { - dst.Spec.Template.Spec.MachineTemplate.NodeDeletionTimeout = restored.Spec.Template.Spec.MachineTemplate.NodeDeletionTimeout - dst.Spec.Template.Spec.MachineTemplate.NodeVolumeDetachTimeout = restored.Spec.Template.Spec.MachineTemplate.NodeVolumeDetachTimeout + dst.Spec.Template.Spec.MachineTemplate.NodeDeletionTimeoutSeconds = restored.Spec.Template.Spec.MachineTemplate.NodeDeletionTimeoutSeconds + dst.Spec.Template.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds = restored.Spec.Template.Spec.MachineTemplate.NodeVolumeDetachTimeoutSeconds } dst.Spec.Template.Spec.RolloutBefore = restored.Spec.Template.Spec.RolloutBefore @@ -197,7 +197,7 @@ func (dst *KubeadmControlPlaneTemplate) ConvertFrom(srcRaw conversion.Hub) error func Convert_v1alpha4_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneTemplateResourceSpec(in *KubeadmControlPlaneSpec, out *controlplanev1.KubeadmControlPlaneTemplateResourceSpec, s apimachineryconversion.Scope) error { out.MachineTemplate = &controlplanev1.KubeadmControlPlaneTemplateMachineTemplate{ - NodeDrainTimeout: in.MachineTemplate.NodeDrainTimeout, + NodeDrainTimeoutSeconds: clusterv1.ConvertToSeconds(in.MachineTemplate.NodeDrainTimeout), } if err := bootstrapv1alpha4.Convert_v1alpha4_KubeadmConfigSpec_To_v1beta2_KubeadmConfigSpec(&in.KubeadmConfigSpec, &out.KubeadmConfigSpec, s); err != nil { @@ -225,7 +225,7 @@ func Convert_v1alpha4_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneTemp func Convert_v1beta2_KubeadmControlPlaneTemplateResourceSpec_To_v1alpha4_KubeadmControlPlaneSpec(in *controlplanev1.KubeadmControlPlaneTemplateResourceSpec, out *KubeadmControlPlaneSpec, s apimachineryconversion.Scope) error { if in.MachineTemplate != nil { - out.MachineTemplate.NodeDrainTimeout = in.MachineTemplate.NodeDrainTimeout + out.MachineTemplate.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.MachineTemplate.NodeDrainTimeoutSeconds) } if err := bootstrapv1alpha4.Convert_v1beta2_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(&in.KubeadmConfigSpec, &out.KubeadmConfigSpec, s); err != nil { @@ -253,7 +253,11 @@ func Convert_v1beta2_KubeadmControlPlaneTemplateResourceSpec_To_v1alpha4_Kubeadm func Convert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1alpha4_KubeadmControlPlaneMachineTemplate(in *controlplanev1.KubeadmControlPlaneMachineTemplate, out *KubeadmControlPlaneMachineTemplate, s apimachineryconversion.Scope) error { // .NodeDrainTimeout was added in v1beta1. - return autoConvert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1alpha4_KubeadmControlPlaneMachineTemplate(in, out, s) + if err := autoConvert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1alpha4_KubeadmControlPlaneMachineTemplate(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + return nil } func Convert_v1beta2_KubeadmControlPlaneSpec_To_v1alpha4_KubeadmControlPlaneSpec(in *controlplanev1.KubeadmControlPlaneSpec, out *KubeadmControlPlaneSpec, scope apimachineryconversion.Scope) error { @@ -273,6 +277,14 @@ func Convert_v1beta2_KubeadmControlPlaneTemplateResource_To_v1alpha4_KubeadmCont return autoConvert_v1beta2_KubeadmControlPlaneTemplateResource_To_v1alpha4_KubeadmControlPlaneTemplateResource(in, out, scope) } +func Convert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in *KubeadmControlPlaneMachineTemplate, out *controlplanev1.KubeadmControlPlaneMachineTemplate, s apimachineryconversion.Scope) error { + if err := autoConvert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + return nil +} + func Convert_v1alpha4_KubeadmControlPlaneStatus_To_v1beta2_KubeadmControlPlaneStatus(in *KubeadmControlPlaneStatus, out *controlplanev1.KubeadmControlPlaneStatus, scope apimachineryconversion.Scope) error { return autoConvert_v1alpha4_KubeadmControlPlaneStatus_To_v1beta2_KubeadmControlPlaneStatus(in, out, scope) } diff --git a/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go b/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go index 27a491952135..159db46e7930 100644 --- a/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go +++ b/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go @@ -63,6 +63,7 @@ func KubeadmControlPlaneFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{ hubKubeadmControlPlaneStatus, spokeKubeadmControlPlaneStatus, spokeKubeadmControlPlaneTemplateResource, + spokeKubeadmControlPlaneMachineTemplate, hubBootstrapTokenString, spokeBootstrapTokenString, spokeKubeadmConfigSpec, @@ -70,12 +71,14 @@ func KubeadmControlPlaneFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{ spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeBootstrapToken, } } func KubeadmControlPlaneTemplateFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ spokeKubeadmControlPlaneTemplateResource, + spokeKubeadmControlPlaneMachineTemplate, hubBootstrapTokenString, spokeBootstrapTokenString, spokeKubeadmConfigSpec, @@ -83,6 +86,7 @@ func KubeadmControlPlaneTemplateFuzzFuncs(_ runtimeserializer.CodecFactory) []in spokeAPIServer, spokeDiscovery, hubKubeadmConfigSpec, + spokeBootstrapToken, } } @@ -156,6 +160,14 @@ func spokeKubeadmControlPlaneTemplateResource(in *KubeadmControlPlaneTemplateRes in.Spec.MachineTemplate.InfrastructureRef = corev1.ObjectReference{} } +func spokeKubeadmControlPlaneMachineTemplate(in *KubeadmControlPlaneMachineTemplate, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeKubeadmConfigSpec(in *bootstrapv1alpha4.KubeadmConfigSpec, c randfill.Continue) { c.FillNoCustom(in) @@ -183,6 +195,14 @@ func spokeAPIServer(in *bootstrapv1alpha4.APIServer, c randfill.Continue) { } } +func spokeBootstrapToken(in *bootstrapv1alpha4.BootstrapToken, c randfill.Continue) { + c.FillNoCustom(in) + + if in.TTL != nil { + in.TTL = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeDiscovery(in *bootstrapv1alpha4.Discovery, c randfill.Continue) { c.FillNoCustom(in) diff --git a/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go b/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go index 41fa5066a8bd..b25b81abb2c1 100644 --- a/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go +++ b/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go @@ -62,11 +62,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*KubeadmControlPlaneMachineTemplate)(nil), (*v1beta2.KubeadmControlPlaneMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(a.(*KubeadmControlPlaneMachineTemplate), b.(*v1beta2.KubeadmControlPlaneMachineTemplate), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*KubeadmControlPlaneSpec)(nil), (*v1beta2.KubeadmControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneSpec(a.(*KubeadmControlPlaneSpec), b.(*v1beta2.KubeadmControlPlaneSpec), scope) }); err != nil { @@ -142,6 +137,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*KubeadmControlPlaneMachineTemplate)(nil), (*v1beta2.KubeadmControlPlaneMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(a.(*KubeadmControlPlaneMachineTemplate), b.(*v1beta2.KubeadmControlPlaneMachineTemplate), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*KubeadmControlPlaneSpec)(nil), (*v1beta2.KubeadmControlPlaneTemplateResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_KubeadmControlPlaneSpec_To_v1beta2_KubeadmControlPlaneTemplateResourceSpec(a.(*KubeadmControlPlaneSpec), b.(*v1beta2.KubeadmControlPlaneTemplateResourceSpec), scope) }); err != nil { @@ -274,24 +274,19 @@ func autoConvert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmC return err } out.InfrastructureRef = in.InfrastructureRef - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate is an autogenerated conversion function. -func Convert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in *KubeadmControlPlaneMachineTemplate, out *v1beta2.KubeadmControlPlaneMachineTemplate, s conversion.Scope) error { - return autoConvert_v1alpha4_KubeadmControlPlaneMachineTemplate_To_v1beta2_KubeadmControlPlaneMachineTemplate(in, out, s) -} - func autoConvert_v1beta2_KubeadmControlPlaneMachineTemplate_To_v1alpha4_KubeadmControlPlaneMachineTemplate(in *v1beta2.KubeadmControlPlaneMachineTemplate, out *KubeadmControlPlaneMachineTemplate, s conversion.Scope) error { if err := Convert_v1beta2_ObjectMeta_To_v1alpha4_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } out.InfrastructureRef = in.InfrastructureRef // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type return nil } diff --git a/internal/api/core/v1alpha3/conversion.go b/internal/api/core/v1alpha3/conversion.go index 18385de0f709..8b2e55452130 100644 --- a/internal/api/core/v1alpha3/conversion.go +++ b/internal/api/core/v1alpha3/conversion.go @@ -165,8 +165,8 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.MinReadySeconds = restored.Spec.MinReadySeconds dst.Spec.ReadinessGates = restored.Spec.ReadinessGates - dst.Spec.NodeDeletionTimeout = restored.Spec.NodeDeletionTimeout - dst.Spec.NodeVolumeDetachTimeout = restored.Spec.NodeVolumeDetachTimeout + dst.Spec.NodeDeletionTimeoutSeconds = restored.Spec.NodeDeletionTimeoutSeconds + dst.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.NodeVolumeDetachTimeoutSeconds dst.Status.NodeInfo = restored.Status.NodeInfo dst.Status.CertificatesExpiryDate = restored.Status.CertificatesExpiryDate dst.Status.Deletion = restored.Status.Deletion @@ -243,8 +243,8 @@ func (src *MachineSet) ConvertTo(dstRaw conversion.Hub) error { return err } dst.Spec.Template.Spec.ReadinessGates = restored.Spec.Template.Spec.ReadinessGates - dst.Spec.Template.Spec.NodeDeletionTimeout = restored.Spec.Template.Spec.NodeDeletionTimeout - dst.Spec.Template.Spec.NodeVolumeDetachTimeout = restored.Spec.Template.Spec.NodeVolumeDetachTimeout + dst.Spec.Template.Spec.NodeDeletionTimeoutSeconds = restored.Spec.Template.Spec.NodeDeletionTimeoutSeconds + dst.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds if restored.Status.Deprecated != nil && restored.Status.Deprecated.V1Beta1 != nil { dst.Status.Deprecated.V1Beta1.Conditions = restored.Status.Deprecated.V1Beta1.Conditions } @@ -336,8 +336,8 @@ func (src *MachineDeployment) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.Spec.ReadinessGates = restored.Spec.Template.Spec.ReadinessGates - dst.Spec.Template.Spec.NodeDeletionTimeout = restored.Spec.Template.Spec.NodeDeletionTimeout - dst.Spec.Template.Spec.NodeVolumeDetachTimeout = restored.Spec.Template.Spec.NodeVolumeDetachTimeout + dst.Spec.Template.Spec.NodeDeletionTimeoutSeconds = restored.Spec.Template.Spec.NodeDeletionTimeoutSeconds + dst.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds dst.Spec.RolloutAfter = restored.Spec.RolloutAfter if restored.Status.Deprecated != nil && restored.Status.Deprecated.V1Beta1 != nil { dst.Status.Deprecated.V1Beta1.Conditions = restored.Status.Deprecated.V1Beta1.Conditions @@ -483,8 +483,8 @@ func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.Spec.ReadinessGates = restored.Spec.Template.Spec.ReadinessGates - dst.Spec.Template.Spec.NodeDeletionTimeout = restored.Spec.Template.Spec.NodeDeletionTimeout - dst.Spec.Template.Spec.NodeVolumeDetachTimeout = restored.Spec.Template.Spec.NodeVolumeDetachTimeout + dst.Spec.Template.Spec.NodeDeletionTimeoutSeconds = restored.Spec.Template.Spec.NodeDeletionTimeoutSeconds + dst.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds dst.Status.Conditions = restored.Status.Conditions dst.Status.AvailableReplicas = restored.Status.AvailableReplicas dst.Status.ReadyReplicas = restored.Status.ReadyReplicas @@ -581,11 +581,12 @@ func Convert_v1alpha3_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSpec(i for _, c := range in.UnhealthyConditions { out.UnhealthyNodeConditions = append(out.UnhealthyNodeConditions, clusterv1.UnhealthyNodeCondition{ - Type: c.Type, - Status: c.Status, - Timeout: c.Timeout, + Type: c.Type, + Status: c.Status, + TimeoutSeconds: ptr.Deref(clusterv1.ConvertToSeconds(&c.Timeout), 0), }) } + out.NodeStartupTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeStartupTimeout) return nil } @@ -599,9 +600,10 @@ func Convert_v1beta2_MachineHealthCheckSpec_To_v1alpha3_MachineHealthCheckSpec(i out.UnhealthyConditions = append(out.UnhealthyConditions, UnhealthyCondition{ Type: c.Type, Status: c.Status, - Timeout: c.Timeout, + Timeout: ptr.Deref(clusterv1.ConvertFromSeconds(&c.TimeoutSeconds), metav1.Duration{}), }) } + out.NodeStartupTimeout = clusterv1.ConvertFromSeconds(in.NodeStartupTimeoutSeconds) return nil } @@ -643,7 +645,11 @@ func Convert_v1beta2_MachineStatus_To_v1alpha3_MachineStatus(in *clusterv1.Machi func Convert_v1beta2_MachineSpec_To_v1alpha3_MachineSpec(in *clusterv1.MachineSpec, out *MachineSpec, s apimachineryconversion.Scope) error { // spec.nodeDeletionTimeout was added in v1beta1. // ReadinessGates was added in v1beta1. - return autoConvert_v1beta2_MachineSpec_To_v1alpha3_MachineSpec(in, out, s) + if err := autoConvert_v1beta2_MachineSpec_To_v1alpha3_MachineSpec(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + return nil } func Convert_v1beta2_MachineDeploymentSpec_To_v1alpha3_MachineDeploymentSpec(in *clusterv1.MachineDeploymentSpec, out *MachineDeploymentSpec, s apimachineryconversion.Scope) error { @@ -750,3 +756,11 @@ func Convert_v1beta2_MachinePoolStatus_To_v1alpha3_MachinePoolStatus(in *cluster func Convert_v1alpha3_MachinePoolStatus_To_v1beta2_MachinePoolStatus(in *MachinePoolStatus, out *clusterv1.MachinePoolStatus, s apimachineryconversion.Scope) error { return autoConvert_v1alpha3_MachinePoolStatus_To_v1beta2_MachinePoolStatus(in, out, s) } + +func Convert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out *clusterv1.MachineSpec, s apimachineryconversion.Scope) error { + if err := autoConvert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + return nil +} diff --git a/internal/api/core/v1alpha3/conversion_test.go b/internal/api/core/v1alpha3/conversion_test.go index 52d1dd546105..ea88e16d0c3e 100644 --- a/internal/api/core/v1alpha3/conversion_test.go +++ b/internal/api/core/v1alpha3/conversion_test.go @@ -21,9 +21,11 @@ package v1alpha3 import ( "reflect" "testing" + "time" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -77,6 +79,7 @@ func TestFuzzyConversion(t *testing.T) { func MachineFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachineStatus, + spokeMachineSpec, spokeMachineStatus, spokeBootstrap, } @@ -99,6 +102,14 @@ func hubMachineStatus(in *clusterv1.MachineStatus, c randfill.Continue) { } } +func spokeMachineSpec(in *MachineSpec, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeMachineStatus(in *MachineStatus, c randfill.Continue) { c.FillNoCustom(in) @@ -112,6 +123,7 @@ func MachineSetFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { hubMachineSetStatus, spokeObjectMeta, spokeBootstrap, + spokeMachineSpec, } } @@ -137,6 +149,7 @@ func MachineDeploymentFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { spokeMachineDeploymentSpec, spokeObjectMeta, spokeBootstrap, + spokeMachineSpec, } } @@ -240,6 +253,8 @@ func hubClusterVariable(in *clusterv1.ClusterVariable, c randfill.Continue) { func MachineHealthCheckFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachineHealthCheckStatus, + spokeMachineHealthCheckSpec, + spokeUnhealthyCondition, } } @@ -259,6 +274,7 @@ func MachinePoolFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { spokeObjectMeta, spokeMachinePoolSpec, hubMachinePoolStatus, + spokeMachineSpec, } } @@ -293,3 +309,17 @@ func spokeMachinePoolSpec(in *MachinePoolSpec, c randfill.Continue) { // data is going to be lost, so we're forcing zero values here. in.Strategy = nil } + +func spokeMachineHealthCheckSpec(in *MachineHealthCheckSpec, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeStartupTimeout != nil { + in.NodeStartupTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeUnhealthyCondition(in *UnhealthyCondition, c randfill.Continue) { + c.FillNoCustom(in) + + in.Timeout = metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second} +} diff --git a/internal/api/core/v1alpha3/zz_generated.conversion.go b/internal/api/core/v1alpha3/zz_generated.conversion.go index e0672900bb85..ffd830dd8926 100644 --- a/internal/api/core/v1alpha3/zz_generated.conversion.go +++ b/internal/api/core/v1alpha3/zz_generated.conversion.go @@ -219,11 +219,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MachineSpec)(nil), (*v1beta2.MachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(a.(*MachineSpec), b.(*v1beta2.MachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachineTemplateSpec)(nil), (*v1beta2.MachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_MachineTemplateSpec_To_v1beta2_MachineTemplateSpec(a.(*MachineTemplateSpec), b.(*v1beta2.MachineTemplateSpec), scope) }); err != nil { @@ -309,6 +304,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*MachineSpec)(nil), (*v1beta2.MachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(a.(*MachineSpec), b.(*v1beta2.MachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*MachineStatus)(nil), (*v1beta2.MachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_MachineStatus_To_v1beta2_MachineStatus(a.(*MachineStatus), b.(*v1beta2.MachineStatus), scope) }); err != nil { @@ -959,7 +959,7 @@ func autoConvert_v1alpha3_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSp out.Selector = in.Selector // WARNING: in.UnhealthyConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeout requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -970,7 +970,7 @@ func autoConvert_v1beta2_MachineHealthCheckSpec_To_v1alpha3_MachineHealthCheckSp // WARNING: in.UnhealthyNodeConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) // WARNING: in.UnhealthyRange requires manual conversion: does not exist in peer-type - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeoutSeconds requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -1379,15 +1379,10 @@ func autoConvert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, ou out.Version = (*string)(unsafe.Pointer(in.Version)) out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec is an autogenerated conversion function. -func Convert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out *v1beta2.MachineSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(in, out, s) -} - func autoConvert_v1beta2_MachineSpec_To_v1alpha3_MachineSpec(in *v1beta2.MachineSpec, out *MachineSpec, s conversion.Scope) error { out.ClusterName = in.ClusterName if err := Convert_v1beta2_Bootstrap_To_v1alpha3_Bootstrap(&in.Bootstrap, &out.Bootstrap, s); err != nil { @@ -1399,9 +1394,9 @@ func autoConvert_v1beta2_MachineSpec_To_v1alpha3_MachineSpec(in *v1beta2.Machine out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type return nil } diff --git a/internal/api/core/v1alpha4/conversion.go b/internal/api/core/v1alpha4/conversion.go index b192fc750fff..11832238f73c 100644 --- a/internal/api/core/v1alpha4/conversion.go +++ b/internal/api/core/v1alpha4/conversion.go @@ -78,16 +78,16 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Topology.ControlPlane.MachineHealthCheck = restored.Spec.Topology.ControlPlane.MachineHealthCheck } - if restored.Spec.Topology.ControlPlane.NodeDrainTimeout != nil { - dst.Spec.Topology.ControlPlane.NodeDrainTimeout = restored.Spec.Topology.ControlPlane.NodeDrainTimeout + if restored.Spec.Topology.ControlPlane.NodeDrainTimeoutSeconds != nil { + dst.Spec.Topology.ControlPlane.NodeDrainTimeoutSeconds = restored.Spec.Topology.ControlPlane.NodeDrainTimeoutSeconds } - if restored.Spec.Topology.ControlPlane.NodeVolumeDetachTimeout != nil { - dst.Spec.Topology.ControlPlane.NodeVolumeDetachTimeout = restored.Spec.Topology.ControlPlane.NodeVolumeDetachTimeout + if restored.Spec.Topology.ControlPlane.NodeVolumeDetachTimeoutSeconds != nil { + dst.Spec.Topology.ControlPlane.NodeVolumeDetachTimeoutSeconds = restored.Spec.Topology.ControlPlane.NodeVolumeDetachTimeoutSeconds } - if restored.Spec.Topology.ControlPlane.NodeDeletionTimeout != nil { - dst.Spec.Topology.ControlPlane.NodeDeletionTimeout = restored.Spec.Topology.ControlPlane.NodeDeletionTimeout + if restored.Spec.Topology.ControlPlane.NodeDeletionTimeoutSeconds != nil { + dst.Spec.Topology.ControlPlane.NodeDeletionTimeoutSeconds = restored.Spec.Topology.ControlPlane.NodeDeletionTimeoutSeconds } dst.Spec.Topology.ControlPlane.ReadinessGates = restored.Spec.Topology.ControlPlane.ReadinessGates @@ -99,9 +99,9 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Topology.Workers.MachineDeployments[i].FailureDomain = restored.Spec.Topology.Workers.MachineDeployments[i].FailureDomain dst.Spec.Topology.Workers.MachineDeployments[i].Variables = restored.Spec.Topology.Workers.MachineDeployments[i].Variables dst.Spec.Topology.Workers.MachineDeployments[i].ReadinessGates = restored.Spec.Topology.Workers.MachineDeployments[i].ReadinessGates - dst.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeout = restored.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeout - dst.Spec.Topology.Workers.MachineDeployments[i].NodeVolumeDetachTimeout = restored.Spec.Topology.Workers.MachineDeployments[i].NodeVolumeDetachTimeout - dst.Spec.Topology.Workers.MachineDeployments[i].NodeDeletionTimeout = restored.Spec.Topology.Workers.MachineDeployments[i].NodeDeletionTimeout + dst.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeoutSeconds = restored.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeoutSeconds + dst.Spec.Topology.Workers.MachineDeployments[i].NodeVolumeDetachTimeoutSeconds = restored.Spec.Topology.Workers.MachineDeployments[i].NodeVolumeDetachTimeoutSeconds + dst.Spec.Topology.Workers.MachineDeployments[i].NodeDeletionTimeoutSeconds = restored.Spec.Topology.Workers.MachineDeployments[i].NodeDeletionTimeoutSeconds dst.Spec.Topology.Workers.MachineDeployments[i].MinReadySeconds = restored.Spec.Topology.Workers.MachineDeployments[i].MinReadySeconds dst.Spec.Topology.Workers.MachineDeployments[i].Strategy = restored.Spec.Topology.Workers.MachineDeployments[i].Strategy dst.Spec.Topology.Workers.MachineDeployments[i].MachineHealthCheck = restored.Spec.Topology.Workers.MachineDeployments[i].MachineHealthCheck @@ -177,9 +177,9 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.ControlPlane.ReadinessGates = restored.Spec.ControlPlane.ReadinessGates dst.Spec.ControlPlane.NamingStrategy = restored.Spec.ControlPlane.NamingStrategy dst.Spec.Infrastructure.NamingStrategy = restored.Spec.Infrastructure.NamingStrategy - dst.Spec.ControlPlane.NodeDrainTimeout = restored.Spec.ControlPlane.NodeDrainTimeout - dst.Spec.ControlPlane.NodeVolumeDetachTimeout = restored.Spec.ControlPlane.NodeVolumeDetachTimeout - dst.Spec.ControlPlane.NodeDeletionTimeout = restored.Spec.ControlPlane.NodeDeletionTimeout + dst.Spec.ControlPlane.NodeDrainTimeoutSeconds = restored.Spec.ControlPlane.NodeDrainTimeoutSeconds + dst.Spec.ControlPlane.NodeVolumeDetachTimeoutSeconds = restored.Spec.ControlPlane.NodeVolumeDetachTimeoutSeconds + dst.Spec.ControlPlane.NodeDeletionTimeoutSeconds = restored.Spec.ControlPlane.NodeDeletionTimeoutSeconds dst.Spec.Workers.MachinePools = restored.Spec.Workers.MachinePools for i := range restored.Spec.Workers.MachineDeployments { @@ -187,9 +187,9 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Workers.MachineDeployments[i].ReadinessGates = restored.Spec.Workers.MachineDeployments[i].ReadinessGates dst.Spec.Workers.MachineDeployments[i].FailureDomain = restored.Spec.Workers.MachineDeployments[i].FailureDomain dst.Spec.Workers.MachineDeployments[i].NamingStrategy = restored.Spec.Workers.MachineDeployments[i].NamingStrategy - dst.Spec.Workers.MachineDeployments[i].NodeDrainTimeout = restored.Spec.Workers.MachineDeployments[i].NodeDrainTimeout - dst.Spec.Workers.MachineDeployments[i].NodeVolumeDetachTimeout = restored.Spec.Workers.MachineDeployments[i].NodeVolumeDetachTimeout - dst.Spec.Workers.MachineDeployments[i].NodeDeletionTimeout = restored.Spec.Workers.MachineDeployments[i].NodeDeletionTimeout + dst.Spec.Workers.MachineDeployments[i].NodeDrainTimeoutSeconds = restored.Spec.Workers.MachineDeployments[i].NodeDrainTimeoutSeconds + dst.Spec.Workers.MachineDeployments[i].NodeVolumeDetachTimeoutSeconds = restored.Spec.Workers.MachineDeployments[i].NodeVolumeDetachTimeoutSeconds + dst.Spec.Workers.MachineDeployments[i].NodeDeletionTimeoutSeconds = restored.Spec.Workers.MachineDeployments[i].NodeDeletionTimeoutSeconds dst.Spec.Workers.MachineDeployments[i].MinReadySeconds = restored.Spec.Workers.MachineDeployments[i].MinReadySeconds dst.Spec.Workers.MachineDeployments[i].Strategy = restored.Spec.Workers.MachineDeployments[i].Strategy } @@ -252,9 +252,9 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.MinReadySeconds = restored.Spec.MinReadySeconds dst.Spec.ReadinessGates = restored.Spec.ReadinessGates - dst.Spec.NodeDeletionTimeout = restored.Spec.NodeDeletionTimeout + dst.Spec.NodeDeletionTimeoutSeconds = restored.Spec.NodeDeletionTimeoutSeconds dst.Status.CertificatesExpiryDate = restored.Status.CertificatesExpiryDate - dst.Spec.NodeVolumeDetachTimeout = restored.Spec.NodeVolumeDetachTimeout + dst.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.NodeVolumeDetachTimeoutSeconds dst.Status.Deletion = restored.Status.Deletion dst.Status.Conditions = restored.Status.Conditions @@ -333,8 +333,8 @@ func (src *MachineSet) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.Spec.ReadinessGates = restored.Spec.Template.Spec.ReadinessGates - dst.Spec.Template.Spec.NodeDeletionTimeout = restored.Spec.Template.Spec.NodeDeletionTimeout - dst.Spec.Template.Spec.NodeVolumeDetachTimeout = restored.Spec.Template.Spec.NodeVolumeDetachTimeout + dst.Spec.Template.Spec.NodeDeletionTimeoutSeconds = restored.Spec.Template.Spec.NodeDeletionTimeoutSeconds + dst.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds dst.Status.Conditions = restored.Status.Conditions dst.Status.AvailableReplicas = restored.Status.AvailableReplicas dst.Status.ReadyReplicas = restored.Status.ReadyReplicas @@ -414,8 +414,8 @@ func (src *MachineDeployment) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.Spec.ReadinessGates = restored.Spec.Template.Spec.ReadinessGates - dst.Spec.Template.Spec.NodeDeletionTimeout = restored.Spec.Template.Spec.NodeDeletionTimeout - dst.Spec.Template.Spec.NodeVolumeDetachTimeout = restored.Spec.Template.Spec.NodeVolumeDetachTimeout + dst.Spec.Template.Spec.NodeDeletionTimeoutSeconds = restored.Spec.Template.Spec.NodeDeletionTimeoutSeconds + dst.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds dst.Spec.RolloutAfter = restored.Spec.RolloutAfter if restored.Spec.Strategy != nil { @@ -562,8 +562,8 @@ func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error { return err } dst.Spec.Template.Spec.ReadinessGates = restored.Spec.Template.Spec.ReadinessGates - dst.Spec.Template.Spec.NodeDeletionTimeout = restored.Spec.Template.Spec.NodeDeletionTimeout - dst.Spec.Template.Spec.NodeVolumeDetachTimeout = restored.Spec.Template.Spec.NodeVolumeDetachTimeout + dst.Spec.Template.Spec.NodeDeletionTimeoutSeconds = restored.Spec.Template.Spec.NodeDeletionTimeoutSeconds + dst.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = restored.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds dst.Status.Conditions = restored.Status.Conditions dst.Status.AvailableReplicas = restored.Status.AvailableReplicas dst.Status.ReadyReplicas = restored.Status.ReadyReplicas @@ -643,7 +643,11 @@ func Convert_v1alpha4_LocalObjectTemplate_To_v1beta2_InfrastructureClass(in *Loc func Convert_v1beta2_MachineSpec_To_v1alpha4_MachineSpec(in *clusterv1.MachineSpec, out *MachineSpec, s apimachineryconversion.Scope) error { // spec.nodeDeletionTimeout was added in v1beta1. // ReadinessGates was added in v1beta1. - return autoConvert_v1beta2_MachineSpec_To_v1alpha4_MachineSpec(in, out, s) + if err := autoConvert_v1beta2_MachineSpec_To_v1alpha4_MachineSpec(in, out, s); err != nil { + return err + } + out.NodeDrainTimeout = clusterv1.ConvertFromSeconds(in.NodeDrainTimeoutSeconds) + return nil } func Convert_v1beta2_MachineDeploymentSpec_To_v1alpha4_MachineDeploymentSpec(in *clusterv1.MachineDeploymentSpec, out *MachineDeploymentSpec, s apimachineryconversion.Scope) error { @@ -761,11 +765,12 @@ func Convert_v1alpha4_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSpec(i for _, c := range in.UnhealthyConditions { out.UnhealthyNodeConditions = append(out.UnhealthyNodeConditions, clusterv1.UnhealthyNodeCondition{ - Type: c.Type, - Status: c.Status, - Timeout: c.Timeout, + Type: c.Type, + Status: c.Status, + TimeoutSeconds: ptr.Deref(clusterv1.ConvertToSeconds(&c.Timeout), 0), }) } + out.NodeStartupTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeStartupTimeout) return nil } @@ -779,9 +784,10 @@ func Convert_v1beta2_MachineHealthCheckSpec_To_v1alpha4_MachineHealthCheckSpec(i out.UnhealthyConditions = append(out.UnhealthyConditions, UnhealthyCondition{ Type: c.Type, Status: c.Status, - Timeout: c.Timeout, + Timeout: ptr.Deref(clusterv1.ConvertFromSeconds(&c.TimeoutSeconds), metav1.Duration{}), }) } + out.NodeStartupTimeout = clusterv1.ConvertFromSeconds(in.NodeStartupTimeoutSeconds) return nil } @@ -861,6 +867,14 @@ func Convert_v1alpha4_MachinePoolStatus_To_v1beta2_MachinePoolStatus(in *Machine return autoConvert_v1alpha4_MachinePoolStatus_To_v1beta2_MachinePoolStatus(in, out, scope) } +func Convert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out *clusterv1.MachineSpec, s apimachineryconversion.Scope) error { + if err := autoConvert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(in, out, s); err != nil { + return err + } + out.NodeDrainTimeoutSeconds = clusterv1.ConvertToSeconds(in.NodeDrainTimeout) + return nil +} + // Implement local conversion func because conversion-gen is not aware of conversion func in other packages (see https://github.com/kubernetes/code-generator/issues/94) func Convert_v1alpha4_MachinePoolSpec_To_v1beta2_MachinePoolSpec(in *MachinePoolSpec, out *clusterv1.MachinePoolSpec, s apimachineryconversion.Scope) error { diff --git a/internal/api/core/v1alpha4/conversion_test.go b/internal/api/core/v1alpha4/conversion_test.go index 31f98fff4702..d6180a22a133 100644 --- a/internal/api/core/v1alpha4/conversion_test.go +++ b/internal/api/core/v1alpha4/conversion_test.go @@ -22,9 +22,11 @@ import ( "reflect" "strconv" "testing" + "time" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/utils/ptr" "sigs.k8s.io/randfill" @@ -76,6 +78,7 @@ func TestFuzzyConversion(t *testing.T) { func MachineFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachineStatus, + spokeMachineSpec, spokeMachineStatus, } } @@ -97,6 +100,14 @@ func hubMachineStatus(in *clusterv1.MachineStatus, c randfill.Continue) { } } +func spokeMachineSpec(in *MachineSpec, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeDrainTimeout != nil { + in.NodeDrainTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + func spokeMachineStatus(in *MachineStatus, c randfill.Continue) { c.FillNoCustom(in) @@ -203,6 +214,7 @@ func hubJSONSchemaProps(in *clusterv1.JSONSchemaProps, c randfill.Continue) { func MachineSetFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ + spokeMachineSpec, hubMachineSetStatus, } } @@ -227,6 +239,7 @@ func MachineDeploymentFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} return []interface{}{ hubMachineDeploymentStatus, spokeMachineDeploymentSpec, + spokeMachineSpec, } } @@ -259,6 +272,8 @@ func spokeMachineDeploymentSpec(in *MachineDeploymentSpec, c randfill.Continue) func MachineHealthCheckFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachineHealthCheckStatus, + spokeMachineHealthCheckSpec, + spokeUnhealthyCondition, } } @@ -275,6 +290,7 @@ func hubMachineHealthCheckStatus(in *clusterv1.MachineHealthCheckStatus, c randf func MachinePoolFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubMachinePoolStatus, + spokeMachineSpec, } } @@ -301,3 +317,17 @@ func hubMachinePoolStatus(in *clusterv1.MachinePoolStatus, c randfill.Continue) in.Replicas = ptr.To(int32(0)) } } + +func spokeMachineHealthCheckSpec(in *MachineHealthCheckSpec, c randfill.Continue) { + c.FillNoCustom(in) + + if in.NodeStartupTimeout != nil { + in.NodeStartupTimeout = ptr.To[metav1.Duration](metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second}) + } +} + +func spokeUnhealthyCondition(in *UnhealthyCondition, c randfill.Continue) { + c.FillNoCustom(in) + + in.Timeout = metav1.Duration{Duration: time.Duration(c.Int31()) * time.Second} +} diff --git a/internal/api/core/v1alpha4/zz_generated.conversion.go b/internal/api/core/v1alpha4/zz_generated.conversion.go index 1188d68118b5..a78fdc00aaf6 100644 --- a/internal/api/core/v1alpha4/zz_generated.conversion.go +++ b/internal/api/core/v1alpha4/zz_generated.conversion.go @@ -299,11 +299,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*MachineSpec)(nil), (*v1beta2.MachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(a.(*MachineSpec), b.(*v1beta2.MachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachineTemplateSpec)(nil), (*v1beta2.MachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_MachineTemplateSpec_To_v1beta2_MachineTemplateSpec(a.(*MachineTemplateSpec), b.(*v1beta2.MachineTemplateSpec), scope) }); err != nil { @@ -399,6 +394,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*MachineSpec)(nil), (*v1beta2.MachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(a.(*MachineSpec), b.(*v1beta2.MachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*MachineStatus)(nil), (*v1beta2.MachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_MachineStatus_To_v1beta2_MachineStatus(a.(*MachineStatus), b.(*v1beta2.MachineStatus), scope) }); err != nil { @@ -910,9 +910,9 @@ func autoConvert_v1beta2_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(in *v1b out.MachineInfrastructure = (*LocalObjectTemplate)(unsafe.Pointer(in.MachineInfrastructure)) // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type // WARNING: in.NamingStrategy requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type return nil } @@ -936,9 +936,9 @@ func autoConvert_v1beta2_ControlPlaneTopology_To_v1alpha4_ControlPlaneTopology(i } out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.Variables requires manual conversion: does not exist in peer-type return nil @@ -1071,9 +1071,9 @@ func autoConvert_v1beta2_MachineDeploymentClass_To_v1alpha4_MachineDeploymentCla // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type // WARNING: in.FailureDomain requires manual conversion: does not exist in peer-type // WARNING: in.NamingStrategy requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.Strategy requires manual conversion: does not exist in peer-type @@ -1303,9 +1303,9 @@ func autoConvert_v1beta2_MachineDeploymentTopology_To_v1alpha4_MachineDeployment // WARNING: in.FailureDomain requires manual conversion: does not exist in peer-type out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) // WARNING: in.MachineHealthCheck requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.Strategy requires manual conversion: does not exist in peer-type @@ -1393,7 +1393,7 @@ func autoConvert_v1alpha4_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSp // WARNING: in.UnhealthyConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeout requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -1404,7 +1404,7 @@ func autoConvert_v1beta2_MachineHealthCheckSpec_To_v1alpha4_MachineHealthCheckSp // WARNING: in.UnhealthyNodeConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) - out.NodeStartupTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeStartupTimeout)) + // WARNING: in.NodeStartupTimeoutSeconds requires manual conversion: does not exist in peer-type out.RemediationTemplate = (*corev1.ObjectReference)(unsafe.Pointer(in.RemediationTemplate)) return nil } @@ -1849,15 +1849,10 @@ func autoConvert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, ou out.Version = (*string)(unsafe.Pointer(in.Version)) out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) + // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type return nil } -// Convert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec is an autogenerated conversion function. -func Convert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out *v1beta2.MachineSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(in, out, s) -} - func autoConvert_v1beta2_MachineSpec_To_v1alpha4_MachineSpec(in *v1beta2.MachineSpec, out *MachineSpec, s conversion.Scope) error { out.ClusterName = in.ClusterName if err := Convert_v1beta2_Bootstrap_To_v1alpha4_Bootstrap(&in.Bootstrap, &out.Bootstrap, s); err != nil { @@ -1869,9 +1864,9 @@ func autoConvert_v1beta2_MachineSpec_To_v1alpha4_MachineSpec(in *v1beta2.Machine out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type - out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) - // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type - // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type return nil } diff --git a/internal/contract/controlplane.go b/internal/contract/controlplane.go index a0d9c8c31669..480c69d03361 100644 --- a/internal/contract/controlplane.go +++ b/internal/contract/controlplane.go @@ -362,6 +362,27 @@ func (c *ControlPlaneMachineTemplate) NodeDeletionTimeout() *Duration { } } +// NodeDrainTimeoutSeconds provides access to the nodeDrainTimeout of a MachineTemplate. +func (c *ControlPlaneMachineTemplate) NodeDrainTimeoutSeconds() *Int32 { + return &Int32{ + path: Path{"spec", "machineTemplate", "nodeDrainTimeoutSeconds"}, + } +} + +// NodeVolumeDetachTimeoutSeconds provides access to the nodeVolumeDetachTimeout of a MachineTemplate. +func (c *ControlPlaneMachineTemplate) NodeVolumeDetachTimeoutSeconds() *Int32 { + return &Int32{ + path: Path{"spec", "machineTemplate", "nodeVolumeDetachTimeoutSeconds"}, + } +} + +// NodeDeletionTimeoutSeconds provides access to the nodeDeletionTimeout of a MachineTemplate. +func (c *ControlPlaneMachineTemplate) NodeDeletionTimeoutSeconds() *Int32 { + return &Int32{ + path: Path{"spec", "machineTemplate", "nodeDeletionTimeoutSeconds"}, + } +} + // ReadinessGates provides access to control plane's ReadinessGates. func (c *ControlPlaneMachineTemplate) ReadinessGates() *ReadinessGates { return &ReadinessGates{} diff --git a/internal/contract/version.go b/internal/contract/version.go index a69cf9e2eb77..59df450095bf 100644 --- a/internal/contract/version.go +++ b/internal/contract/version.go @@ -121,3 +121,26 @@ func getLatestAPIVersionFromContract(metadata metav1.Object, currentContractVers return "", "", errors.Errorf("cannot find any versions matching contract versions %q for CRD %v as contract version label(s) are either missing or empty (see https://cluster-api.sigs.k8s.io/developer/providers/contracts.html#api-version-labels)", sortedCompatibleContractVersions, metadata.GetName()) } + +// GetContractVersionForVersion gets the contract version for a specific apiVersion. +func GetContractVersionForVersion(ctx context.Context, c client.Client, gvk schema.GroupVersionKind, version string) (string, error) { + crdMetadata, err := util.GetGVKMetadata(ctx, c, gvk) + if err != nil { + return "", errors.Wrapf(err, "failed to get contract version") + } + + contractPrefix := fmt.Sprintf("%s/", clusterv1.GroupVersion.Group) + for labelKey, labelValue := range crdMetadata.GetLabels() { + if !strings.HasPrefix(labelKey, contractPrefix) { + continue + } + + for _, v := range strings.Split(labelValue, "_") { + if v == version { + return strings.TrimPrefix(labelKey, contractPrefix), nil + } + } + } + + return "", errors.Errorf("cannot find any contract version matching version %q for CRD %v", version, crdMetadata.GetName()) +} diff --git a/internal/contract/version_test.go b/internal/contract/version_test.go index 51b5c966ee3c..1219af21dc4d 100644 --- a/internal/contract/version_test.go +++ b/internal/contract/version_test.go @@ -193,3 +193,94 @@ func TestGetLatestAPIVersionFromContract(t *testing.T) { }) } } + +func TestGetContractVersionForVersion(t *testing.T) { + testCases := []struct { + name string + crdLabels map[string]string + version string + expectedContractVersion string + expectError bool + }{ + { + name: "no contract labels", + crdLabels: nil, + expectedContractVersion: "", + version: "v1alpha3", + expectError: true, + }, + { + name: "pick v1beta1", + crdLabels: map[string]string{ + "cluster.x-k8s.io/v1beta1": "v1alpha1_v1alpha2", + "cluster.x-k8s.io/v1beta2": "v1alpha3_v1alpha4", + }, + version: "v1alpha1", + expectedContractVersion: "v1beta1", + expectError: false, + }, + { + name: "pick v1beta1", + crdLabels: map[string]string{ + "cluster.x-k8s.io/v1beta1": "v1alpha1_v1alpha2", + "cluster.x-k8s.io/v1beta2": "v1alpha3_v1alpha4", + }, + version: "v1alpha2", + expectedContractVersion: "v1beta1", + expectError: false, + }, + { + name: "pick v1beta2", + crdLabels: map[string]string{ + "cluster.x-k8s.io/v1beta1": "v1alpha1_v1alpha2", + "cluster.x-k8s.io/v1beta2": "v1alpha3_v1alpha4", + }, + version: "v1alpha3", + expectedContractVersion: "v1beta2", + expectError: false, + }, + { + name: "pick v1beta2", + crdLabels: map[string]string{ + "cluster.x-k8s.io/v1beta1": "v1alpha1_v1alpha2", + "cluster.x-k8s.io/v1beta2": "v1alpha3_v1alpha4", + }, + version: "v1alpha4", + expectedContractVersion: "v1beta2", + expectError: false, + }, + { + name: "error", + crdLabels: map[string]string{ + "cluster.x-k8s.io/v1beta1": "v1alpha1_v1alpha2", + "cluster.x-k8s.io/v1beta2": "v1alpha3_v1alpha4", + }, + version: "v1alpha5", + expectError: true, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + gvk := clusterv1.GroupVersionBootstrap.WithKind("TestBootstrapConfig") + + u := &unstructured.Unstructured{} + u.SetName(contract.CalculateCRDName(gvk.Group, gvk.Kind)) + u.SetGroupVersionKind(apiextensionsv1.SchemeGroupVersion.WithKind("CustomResourceDefinition")) + u.SetLabels(tt.crdLabels) + + fakeClient := fake.NewClientBuilder().WithObjects(u).Build() + + contractVersion, err := GetContractVersionForVersion(t.Context(), fakeClient, gvk, tt.version) + + if tt.expectError { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(contractVersion).To(Equal(tt.expectedContractVersion)) + }) + } +} diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 074604b27683..a5b7765b1847 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -616,7 +616,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr) // If the node deletion timeout is not expired yet, requeue the Machine for reconciliation. - if m.Spec.NodeDeletionTimeout == nil || m.Spec.NodeDeletionTimeout.Nanoseconds() == 0 || m.DeletionTimestamp.Add(m.Spec.NodeDeletionTimeout.Duration).After(time.Now()) { + if m.Spec.NodeDeletionTimeoutSeconds == nil || *m.Spec.NodeDeletionTimeoutSeconds == 0 || m.DeletionTimestamp.Add(time.Duration(*m.Spec.NodeDeletionTimeoutSeconds)*time.Second).After(time.Now()) { s.deletingReason = clusterv1.MachineDeletingDeletingNodeReason s.deletingMessage = "Error deleting Node, please check controller logs for errors" return ctrl.Result{}, deleteNodeErr @@ -676,8 +676,8 @@ func (r *Reconciler) isNodeVolumeDetachingAllowed(m *clusterv1.Machine) bool { } func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool { - // if the NodeDrainTimeout type is not set by user - if machine.Status.Deletion == nil || machine.Spec.NodeDrainTimeout == nil || machine.Spec.NodeDrainTimeout.Seconds() <= 0 { + // if the NodeDrainTimeoutSeconds type is not set by user + if machine.Status.Deletion == nil || machine.Spec.NodeDrainTimeoutSeconds == nil || *machine.Spec.NodeDrainTimeoutSeconds <= 0 { return false } @@ -688,15 +688,15 @@ func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool { now := time.Now() diff := now.Sub(machine.Status.Deletion.NodeDrainStartTime.Time) - return diff.Seconds() >= machine.Spec.NodeDrainTimeout.Seconds() + return diff.Seconds() >= float64(*machine.Spec.NodeDrainTimeoutSeconds) } -// nodeVolumeDetachTimeoutExceeded returns False if either NodeVolumeDetachTimeout is set to nil or <=0 OR +// nodeVolumeDetachTimeoutExceeded returns False if either NodeVolumeDetachTimeoutSeconds is set to nil or <=0 OR // WaitForNodeVolumeDetachStartTime is not set on the Machine. Otherwise returns true if the timeout is expired // since the WaitForNodeVolumeDetachStartTime. func (r *Reconciler) nodeVolumeDetachTimeoutExceeded(machine *clusterv1.Machine) bool { - // if the NodeVolumeDetachTimeout type is not set by user - if machine.Status.Deletion == nil || machine.Spec.NodeVolumeDetachTimeout == nil || machine.Spec.NodeVolumeDetachTimeout.Seconds() <= 0 { + // if the NodeVolumeDetachTimeoutSeconds type is not set by user + if machine.Status.Deletion == nil || machine.Spec.NodeVolumeDetachTimeoutSeconds == nil || *machine.Spec.NodeVolumeDetachTimeoutSeconds <= 0 { return false } @@ -707,7 +707,7 @@ func (r *Reconciler) nodeVolumeDetachTimeoutExceeded(machine *clusterv1.Machine) now := time.Now() diff := now.Sub(machine.Status.Deletion.WaitForNodeVolumeDetachStartTime.Time) - return diff.Seconds() >= machine.Spec.NodeVolumeDetachTimeout.Seconds() + return diff.Seconds() >= float64(*machine.Spec.NodeVolumeDetachTimeoutSeconds) } // isDeleteNodeAllowed returns nil only if the Machine's NodeRef is not nil diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 9f72b1d29431..3cef9a2f5bdd 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1485,10 +1485,10 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Finalizers: []string{clusterv1.MachineFinalizer}, }, Spec: clusterv1.MachineSpec{ - ClusterName: "test-cluster", - InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, + ClusterName: "test-cluster", + InfrastructureRef: corev1.ObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + NodeDrainTimeoutSeconds: ptr.To(int32(60)), }, Status: clusterv1.MachineStatus{ @@ -1508,10 +1508,10 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Finalizers: []string{clusterv1.MachineFinalizer}, }, Spec: clusterv1.MachineSpec{ - ClusterName: "test-cluster", - InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, + ClusterName: "test-cluster", + InfrastructureRef: corev1.ObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + NodeDrainTimeoutSeconds: ptr.To(int32(60)), }, Status: clusterv1.MachineStatus{ Deletion: &clusterv1.MachineDeletionStatus{ @@ -1522,7 +1522,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { expected: true, }, { - name: "NodeDrainTimeout option is set to its default value 0", + name: "NodeDrainTimeoutSeconds option is set to its default value 0", machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-machine", @@ -2046,10 +2046,10 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Finalizers: []string{clusterv1.MachineFinalizer}, }, Spec: clusterv1.MachineSpec{ - ClusterName: "test-cluster", - InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 30}, + ClusterName: "test-cluster", + InfrastructureRef: corev1.ObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + NodeVolumeDetachTimeoutSeconds: ptr.To(int32(30)), }, Status: clusterv1.MachineStatus{ @@ -2069,10 +2069,10 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Finalizers: []string{clusterv1.MachineFinalizer}, }, Spec: clusterv1.MachineSpec{ - ClusterName: "test-cluster", - InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 60}, + ClusterName: "test-cluster", + InfrastructureRef: corev1.ObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, + NodeVolumeDetachTimeoutSeconds: ptr.To(int32(60)), }, Status: clusterv1.MachineStatus{ Deletion: &clusterv1.MachineDeletionStatus{ @@ -3366,20 +3366,20 @@ func TestNodeDeletion(t *testing.T) { } testCases := []struct { - name string - deletionTimeout *metav1.Duration - resultErr bool - clusterDeleted bool - expectNodeDeletion bool - expectDeletingReason string - createFakeClient func(...client.Object) client.Client + name string + deletionTimeoutSeconds *int32 + resultErr bool + clusterDeleted bool + expectNodeDeletion bool + expectDeletingReason string + createFakeClient func(...client.Object) client.Client }{ { - name: "should return no error when deletion is successful", - deletionTimeout: &metav1.Duration{Duration: time.Second}, - resultErr: false, - expectNodeDeletion: true, - expectDeletingReason: clusterv1.MachineDeletingDeletionCompletedReason, + name: "should return no error when deletion is successful", + deletionTimeoutSeconds: ptr.To(int32(1)), + resultErr: false, + expectNodeDeletion: true, + expectDeletingReason: clusterv1.MachineDeletingDeletionCompletedReason, createFakeClient: func(initObjs ...client.Object) client.Client { return fake.NewClientBuilder(). WithObjects(initObjs...). @@ -3388,11 +3388,11 @@ func TestNodeDeletion(t *testing.T) { }, }, { - name: "should return an error when timeout is not expired and node deletion fails", - deletionTimeout: &metav1.Duration{Duration: time.Hour}, - resultErr: true, - expectNodeDeletion: false, - expectDeletingReason: clusterv1.MachineDeletingDeletingNodeReason, + name: "should return an error when timeout is not expired and node deletion fails", + deletionTimeoutSeconds: ptr.To(int32(60 * 60)), + resultErr: true, + expectNodeDeletion: false, + expectDeletingReason: clusterv1.MachineDeletingDeletingNodeReason, createFakeClient: func(initObjs ...client.Object) client.Client { fc := fake.NewClientBuilder(). WithObjects(initObjs...). @@ -3402,11 +3402,11 @@ func TestNodeDeletion(t *testing.T) { }, }, { - name: "should return an error when timeout is infinite and node deletion fails", - deletionTimeout: &metav1.Duration{Duration: 0}, // should lead to infinite timeout - resultErr: true, - expectNodeDeletion: false, - expectDeletingReason: clusterv1.MachineDeletingDeletingNodeReason, + name: "should return an error when timeout is infinite and node deletion fails", + deletionTimeoutSeconds: ptr.To(int32(0)), // should lead to infinite timeout + resultErr: true, + expectNodeDeletion: false, + expectDeletingReason: clusterv1.MachineDeletingDeletingNodeReason, createFakeClient: func(initObjs ...client.Object) client.Client { fc := fake.NewClientBuilder(). WithObjects(initObjs...). @@ -3416,11 +3416,11 @@ func TestNodeDeletion(t *testing.T) { }, }, { - name: "should not return an error when timeout is expired and node deletion fails", - deletionTimeout: &metav1.Duration{Duration: time.Millisecond}, - resultErr: false, - expectNodeDeletion: false, - expectDeletingReason: clusterv1.DeletionCompletedReason, + name: "should not return an error when timeout is expired and node deletion fails", + deletionTimeoutSeconds: ptr.To(int32(1)), + resultErr: false, + expectNodeDeletion: false, + expectDeletingReason: clusterv1.DeletionCompletedReason, createFakeClient: func(initObjs ...client.Object) client.Client { fc := fake.NewClientBuilder(). WithObjects(initObjs...). @@ -3430,12 +3430,12 @@ func TestNodeDeletion(t *testing.T) { }, }, { - name: "should not delete the node or return an error when the cluster is marked for deletion", - deletionTimeout: nil, // should lead to infinite timeout - resultErr: false, - clusterDeleted: true, - expectNodeDeletion: false, - expectDeletingReason: clusterv1.DeletionCompletedReason, + name: "should not delete the node or return an error when the cluster is marked for deletion", + deletionTimeoutSeconds: nil, // should lead to infinite timeout + resultErr: false, + clusterDeleted: true, + expectNodeDeletion: false, + expectDeletingReason: clusterv1.DeletionCompletedReason, createFakeClient: func(initObjs ...client.Object) client.Client { fc := fake.NewClientBuilder(). WithObjects(initObjs...). @@ -3451,7 +3451,7 @@ func TestNodeDeletion(t *testing.T) { g := NewWithT(t) m := testMachine.DeepCopy() - m.Spec.NodeDeletionTimeout = tc.deletionTimeout + m.Spec.NodeDeletionTimeoutSeconds = tc.deletionTimeoutSeconds fakeClient := tc.createFakeClient(node, m, cpmachine1) @@ -3559,19 +3559,19 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { } testCases := []struct { - name string - deletionTimeout *metav1.Duration - resultErr bool - expectNodeDeletion bool - expectDeletingReason string - createFakeClient func(...client.Object) client.Client + name string + deletionTimeoutSeconds *int32 + resultErr bool + expectNodeDeletion bool + expectDeletingReason string + createFakeClient func(...client.Object) client.Client }{ { - name: "should return no error when the node exists and matches the provider id", - deletionTimeout: &metav1.Duration{Duration: time.Second}, - resultErr: false, - expectNodeDeletion: true, - expectDeletingReason: clusterv1.MachineDeletingDeletionCompletedReason, + name: "should return no error when the node exists and matches the provider id", + deletionTimeoutSeconds: ptr.To(int32(1)), + resultErr: false, + expectNodeDeletion: true, + expectDeletingReason: clusterv1.MachineDeletingDeletionCompletedReason, createFakeClient: func(initObjs ...client.Object) client.Client { return fake.NewClientBuilder(). WithObjects(initObjs...). @@ -3585,7 +3585,7 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(*testing.T) { m := testMachine.DeepCopy() - m.Spec.NodeDeletionTimeout = tc.deletionTimeout + m.Spec.NodeDeletionTimeoutSeconds = tc.deletionTimeoutSeconds fakeClient := tc.createFakeClient(node, m, cpmachine1) diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index b1d9f2c67a9f..24775bd7dfa9 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -19,7 +19,6 @@ package machinedeployment import ( "context" "testing" - "time" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -342,40 +341,40 @@ func TestMachineDeploymentReconciler(t *testing.T) { g.Expect(machineSets.Items[1].Spec.Template.Labels).ShouldNot(HaveKeyWithValue("updated", "true")) }, timeout).Should(Succeed()) - // Update the NodeDrainTimout, NodeDeletionTimeout, NodeVolumeDetachTimeout of the MachineDeployment, + // Update the NodeDrainTimout, NodeDeletionTimeoutSeconds, NodeVolumeDetachTimeoutSeconds of the MachineDeployment, // expect the Reconcile to be called and the MachineSet to be updated in-place. - t.Log("Setting NodeDrainTimout, NodeDeletionTimeout, NodeVolumeDetachTimeout on the MachineDeployment") - duration10s := metav1.Duration{Duration: 10 * time.Second} + t.Log("Setting NodeDrainTimout, NodeDeletionTimeoutSeconds, NodeVolumeDetachTimeoutSeconds on the MachineDeployment") + duration10s := int32(10) modifyFunc = func(d *clusterv1.MachineDeployment) { - d.Spec.Template.Spec.NodeDrainTimeout = &duration10s - d.Spec.Template.Spec.NodeDeletionTimeout = &duration10s - d.Spec.Template.Spec.NodeVolumeDetachTimeout = &duration10s + d.Spec.Template.Spec.NodeDrainTimeoutSeconds = &duration10s + d.Spec.Template.Spec.NodeDeletionTimeoutSeconds = &duration10s + d.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = &duration10s } g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) g.Eventually(func(g Gomega) { g.Expect(env.List(ctx, machineSets, msListOpts...)).Should(Succeed()) // Verify we still only have 2 MachineSets. g.Expect(machineSets.Items).To(HaveLen(2)) - // Verify the NodeDrainTimeout value is updated - g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeDrainTimeout).Should(And( + // Verify the NodeDrainTimeoutSeconds value is updated + g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeDrainTimeoutSeconds).Should(And( Not(BeNil()), HaveValue(Equal(duration10s)), ), "NodeDrainTimout value does not match expected") - // Verify the NodeDeletionTimeout value is updated - g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeDeletionTimeout).Should(And( + // Verify the NodeDeletionTimeoutSeconds value is updated + g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeDeletionTimeoutSeconds).Should(And( Not(BeNil()), HaveValue(Equal(duration10s)), - ), "NodeDeletionTimeout value does not match expected") - // Verify the NodeVolumeDetachTimeout value is updated - g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeVolumeDetachTimeout).Should(And( + ), "NodeDeletionTimeoutSeconds value does not match expected") + // Verify the NodeVolumeDetachTimeoutSeconds value is updated + g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).Should(And( Not(BeNil()), HaveValue(Equal(duration10s)), - ), "NodeVolumeDetachTimeout value does not match expected") + ), "NodeVolumeDetachTimeoutSeconds value does not match expected") // Verify that the old machine set keeps the old values. - g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeDrainTimeout).Should(BeNil()) - g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeDeletionTimeout).Should(BeNil()) - g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeVolumeDetachTimeout).Should(BeNil()) + g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeDrainTimeoutSeconds).Should(BeNil()) + g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeDeletionTimeoutSeconds).Should(BeNil()) + g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).Should(BeNil()) }).Should(Succeed()) // Update the DeletePolicy of the MachineDeployment, diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 30a096566238..f59f91bc8256 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -327,9 +327,9 @@ func (r *Reconciler) computeDesiredMachineSet(ctx context.Context, deployment *c desiredMS.Spec.DeletePolicy = "" } desiredMS.Spec.Template.Spec.ReadinessGates = deployment.Spec.Template.Spec.ReadinessGates - desiredMS.Spec.Template.Spec.NodeDrainTimeout = deployment.Spec.Template.Spec.NodeDrainTimeout - desiredMS.Spec.Template.Spec.NodeDeletionTimeout = deployment.Spec.Template.Spec.NodeDeletionTimeout - desiredMS.Spec.Template.Spec.NodeVolumeDetachTimeout = deployment.Spec.Template.Spec.NodeVolumeDetachTimeout + desiredMS.Spec.Template.Spec.NodeDrainTimeoutSeconds = deployment.Spec.Template.Spec.NodeDrainTimeoutSeconds + desiredMS.Spec.Template.Spec.NodeDeletionTimeoutSeconds = deployment.Spec.Template.Spec.NodeDeletionTimeoutSeconds + desiredMS.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = deployment.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds desiredMS.Spec.MachineNamingStrategy = deployment.Spec.MachineNamingStrategy return desiredMS, nil diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index 1ecd8efdb83f..20fd6b4d6ce4 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -21,7 +21,6 @@ import ( "fmt" "strings" "testing" - "time" . "github.com/onsi/gomega" "github.com/pkg/errors" @@ -517,8 +516,8 @@ func TestSyncDeploymentStatus(t *testing.T) { } func TestComputeDesiredMachineSet(t *testing.T) { - duration5s := &metav1.Duration{Duration: 5 * time.Second} - duration10s := &metav1.Duration{Duration: 10 * time.Second} + duration5s := ptr.To(int32(5)) + duration10s := ptr.To(int32(10)) namingTemplateKey := "test" infraRef := corev1.ObjectReference{ @@ -566,11 +565,11 @@ func TestComputeDesiredMachineSet(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ ConfigRef: &bootstrapRef, }, - MinReadySeconds: ptr.To[int32](3), - ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "foo"}}, - NodeDrainTimeout: duration10s, - NodeVolumeDetachTimeout: duration10s, - NodeDeletionTimeout: duration10s, + MinReadySeconds: ptr.To[int32](3), + ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "foo"}}, + NodeDrainTimeoutSeconds: duration10s, + NodeVolumeDetachTimeoutSeconds: duration10s, + NodeDeletionTimeoutSeconds: duration10s, }, }, }, @@ -638,9 +637,9 @@ func TestComputeDesiredMachineSet(t *testing.T) { } existingMS.Spec.Template.Annotations = nil existingMS.Spec.Template.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{{ConditionType: "bar"}} - existingMS.Spec.Template.Spec.NodeDrainTimeout = duration5s - existingMS.Spec.Template.Spec.NodeDeletionTimeout = duration5s - existingMS.Spec.Template.Spec.NodeVolumeDetachTimeout = duration5s + existingMS.Spec.Template.Spec.NodeDrainTimeoutSeconds = duration5s + existingMS.Spec.Template.Spec.NodeDeletionTimeoutSeconds = duration5s + existingMS.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = duration5s existingMS.Spec.DeletePolicy = string(clusterv1.NewestMachineSetDeletePolicy) existingMS.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](0) @@ -678,9 +677,9 @@ func TestComputeDesiredMachineSet(t *testing.T) { } existingMS.Spec.Template.Annotations = nil existingMS.Spec.Template.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{{ConditionType: "bar"}} - existingMS.Spec.Template.Spec.NodeDrainTimeout = duration5s - existingMS.Spec.Template.Spec.NodeDeletionTimeout = duration5s - existingMS.Spec.Template.Spec.NodeVolumeDetachTimeout = duration5s + existingMS.Spec.Template.Spec.NodeDrainTimeoutSeconds = duration5s + existingMS.Spec.Template.Spec.NodeDeletionTimeoutSeconds = duration5s + existingMS.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = duration5s existingMS.Spec.DeletePolicy = string(clusterv1.NewestMachineSetDeletePolicy) existingMS.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](0) @@ -732,9 +731,9 @@ func TestComputeDesiredMachineSet(t *testing.T) { } existingMS.Spec.Template.Annotations = nil existingMS.Spec.Template.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{{ConditionType: "bar"}} - existingMS.Spec.Template.Spec.NodeDrainTimeout = duration5s - existingMS.Spec.Template.Spec.NodeDeletionTimeout = duration5s - existingMS.Spec.Template.Spec.NodeVolumeDetachTimeout = duration5s + existingMS.Spec.Template.Spec.NodeDrainTimeoutSeconds = duration5s + existingMS.Spec.Template.Spec.NodeDeletionTimeoutSeconds = duration5s + existingMS.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = duration5s existingMS.Spec.DeletePolicy = string(clusterv1.NewestMachineSetDeletePolicy) existingMS.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](0) diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 00291a3b03d5..941b3e5e3221 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -439,9 +439,9 @@ func MachineTemplateDeepCopyRolloutFields(template *clusterv1.MachineTemplateSpe // Drop node timeout values templateCopy.Spec.MinReadySeconds = nil templateCopy.Spec.ReadinessGates = nil - templateCopy.Spec.NodeDrainTimeout = nil - templateCopy.Spec.NodeDeletionTimeout = nil - templateCopy.Spec.NodeVolumeDetachTimeout = nil + templateCopy.Spec.NodeDrainTimeoutSeconds = nil + templateCopy.Spec.NodeDeletionTimeoutSeconds = nil + templateCopy.Spec.NodeVolumeDetachTimeoutSeconds = nil // Remove the version part from the references APIVersion field, // for more details see issue #2183 and #2140. diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index 752106557fb9..7c2adf4b459a 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -89,7 +89,7 @@ func generateDeployment(image string) clusterv1.MachineDeployment { Labels: machineLabels, }, Spec: clusterv1.MachineSpec{ - NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, + NodeDrainTimeoutSeconds: ptr.To(int32(10)), }, }, }, @@ -178,13 +178,13 @@ func TestMachineTemplateUpToDate(t *testing.T) { Annotations: map[string]string{"a1": "v1"}, }, Spec: clusterv1.MachineSpec{ - NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, - NodeDeletionTimeout: &metav1.Duration{Duration: 10 * time.Second}, - NodeVolumeDetachTimeout: &metav1.Duration{Duration: 10 * time.Second}, - ClusterName: "cluster1", - Version: ptr.To("v1.25.0"), - FailureDomain: ptr.To("failure-domain1"), - MinReadySeconds: ptr.To[int32](10), + NodeDrainTimeoutSeconds: ptr.To(int32(10)), + NodeDeletionTimeoutSeconds: ptr.To(int32(10)), + NodeVolumeDetachTimeoutSeconds: ptr.To(int32(10)), + ClusterName: "cluster1", + Version: ptr.To("v1.25.0"), + FailureDomain: ptr.To("failure-domain1"), + MinReadySeconds: ptr.To[int32](10), InfrastructureRef: corev1.ObjectReference{ Name: "infra1", Namespace: "default", @@ -218,9 +218,9 @@ func TestMachineTemplateUpToDate(t *testing.T) { machineTemplateWithDifferentInPlaceMutableSpecFields := machineTemplate.DeepCopy() machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{{ConditionType: "foo"}} - machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} - machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeDeletionTimeout = &metav1.Duration{Duration: 20 * time.Second} - machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeVolumeDetachTimeout = &metav1.Duration{Duration: 20 * time.Second} + machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeDrainTimeoutSeconds = ptr.To(int32(20)) + machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeDeletionTimeoutSeconds = ptr.To(int32(20)) + machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeVolumeDetachTimeoutSeconds = ptr.To(int32(20)) machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.MinReadySeconds = ptr.To[int32](20) machineTemplateWithDifferentClusterName := machineTemplate.DeepCopy() @@ -412,7 +412,7 @@ func TestFindNewMachineSet(t *testing.T) { matchingMSHigherReplicas.Spec.Replicas = ptr.To[int32](2) matchingMSDiffersInPlaceMutableFields := generateMS(deployment) - matchingMSDiffersInPlaceMutableFields.Spec.Template.Spec.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} + matchingMSDiffersInPlaceMutableFields.Spec.Template.Spec.NodeDrainTimeoutSeconds = ptr.To(int32(20)) oldMS := generateMS(deployment) oldMS.Spec.Template.Spec.InfrastructureRef.Name = "old-infra-ref" diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 25da8ef1dc5b..23cc2744e74e 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -232,13 +232,13 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster // do sort to avoid keep changing m.Status as the returned machines are not in order sort.Strings(m.Status.Targets) - nodeStartupTimeout := m.Spec.NodeStartupTimeout + nodeStartupTimeout := m.Spec.NodeStartupTimeoutSeconds if nodeStartupTimeout == nil { - nodeStartupTimeout = &clusterv1.DefaultNodeStartupTimeout + nodeStartupTimeout = &clusterv1.DefaultNodeStartupTimeoutSeconds } // health check all targets and reconcile mhc status - healthy, unhealthy, nextCheckTimes := r.healthCheckTargets(targets, logger, *nodeStartupTimeout) + healthy, unhealthy, nextCheckTimes := r.healthCheckTargets(targets, logger, metav1.Duration{Duration: time.Duration(*nodeStartupTimeout) * time.Second}) m.Status.CurrentHealthy = int32(len(healthy)) // check MHC current health against MaxUnhealthy diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 19eaad11c2db..fe19c9906d93 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -924,7 +924,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { assertMachinesOwnerRemediated(g, mhc, 0) }) - t.Run("when a Machine has no Node ref for less than the NodeStartupTimeout", func(t *testing.T) { + t.Run("when a Machine has no Node ref for less than the NodeStartupTimeoutSeconds", func(t *testing.T) { g := NewWithT(t) cluster := createCluster(g, ns.Name) @@ -937,7 +937,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: 5 * time.Hour} + mhc.Spec.NodeStartupTimeoutSeconds = ptr.To(int32(5 * 60 * 60)) g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { @@ -1004,12 +1004,12 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { assertMachinesOwnerRemediated(g, mhc, 0) }) - t.Run("when a Machine has no Node ref for longer than the NodeStartupTimeout", func(t *testing.T) { + t.Run("when a Machine has no Node ref for longer than the NodeStartupTimeoutSeconds", func(t *testing.T) { g := NewWithT(t) cluster := createCluster(g, ns.Name) mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: 10 * time.Second} + mhc.Spec.NodeStartupTimeoutSeconds = ptr.To(int32(10)) g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { @@ -1084,7 +1084,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { cluster := createCluster(g, ns.Name) mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) - mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: 10 * time.Second} + mhc.Spec.NodeStartupTimeoutSeconds = ptr.To(int32(10)) g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { @@ -1406,7 +1406,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { }, timeout, 100*time.Millisecond).Should(Equal(1)) // Create the MachineHealthCheck instance. - mhc.Spec.NodeStartupTimeout = &metav1.Duration{Duration: time.Second} + mhc.Spec.NodeStartupTimeoutSeconds = ptr.To(int32(1)) g.Expect(env.Create(ctx, mhc)).To(Succeed()) // defer cleanup for all the objects that have been created @@ -2340,8 +2340,8 @@ func TestIsAllowedRemediation(t *testing.T) { mhc := &clusterv1.MachineHealthCheck{ Spec: clusterv1.MachineHealthCheckSpec{ - MaxUnhealthy: tc.maxUnhealthy, - NodeStartupTimeout: &metav1.Duration{Duration: 1 * time.Millisecond}, + MaxUnhealthy: tc.maxUnhealthy, + NodeStartupTimeoutSeconds: ptr.To(int32(0)), }, Status: clusterv1.MachineHealthCheckStatus{ ExpectedMachines: tc.expectedMachines, @@ -2750,13 +2750,13 @@ func newMachineHealthCheck(namespace, clusterName string) *clusterv1.MachineHeal "selector": string(uuid.NewUUID()), }, }, - MaxUnhealthy: &maxUnhealthy, - NodeStartupTimeout: &metav1.Duration{Duration: 1 * time.Millisecond}, + MaxUnhealthy: &maxUnhealthy, + NodeStartupTimeoutSeconds: ptr.To(int32(1)), UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index 3ad6d6790d8d..f866365d0899 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -50,7 +50,7 @@ const ( var ( // We allow users to disable the nodeStartupTimeout by setting the duration to 0. - disabledNodeStartupTimeout = clusterv1.ZeroDuration + disabledNodeStartupTimeout = metav1.Duration{Duration: time.Duration(0)} ) // healthCheckTarget contains the information required to perform a health check @@ -192,21 +192,23 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi // If the condition has been in the unhealthy state for longer than the // timeout, return true with no requeue time. - if nodeCondition.LastTransitionTime.Add(c.Timeout.Duration).Before(now) { - v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.UnhealthyNodeConditionV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Condition %s on node is reporting status %s for more than %s", c.Type, c.Status, c.Timeout.Duration.String()) - logger.V(3).Info("Target is unhealthy: condition is in state longer than allowed timeout", "condition", c.Type, "state", c.Status, "timeout", c.Timeout.Duration.String()) + timeoutSecondsDuration := time.Duration(c.TimeoutSeconds) * time.Second + + if nodeCondition.LastTransitionTime.Add(timeoutSecondsDuration).Before(now) { + v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.UnhealthyNodeConditionV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Condition %s on node is reporting status %s for more than %s", c.Type, c.Status, timeoutSecondsDuration.String()) + logger.V(3).Info("Target is unhealthy: condition is in state longer than allowed timeout", "condition", c.Type, "state", c.Status, "timeout", timeoutSecondsDuration.String()) conditions.Set(t.Machine, metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckUnhealthyNodeReason, - Message: fmt.Sprintf("Health check failed: Condition %s on Node is reporting status %s for more than %s", c.Type, c.Status, c.Timeout.Duration.String()), + Message: fmt.Sprintf("Health check failed: Condition %s on Node is reporting status %s for more than %s", c.Type, c.Status, timeoutSecondsDuration.String()), }) return true, time.Duration(0) } durationUnhealthy := now.Sub(nodeCondition.LastTransitionTime.Time) - nextCheck := c.Timeout.Duration - durationUnhealthy + time.Second + nextCheck := timeoutSecondsDuration - durationUnhealthy + time.Second if nextCheck > 0 { nextCheckTimes = append(nextCheckTimes, nextCheck) } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index f6c6939fdfe0..254876a83917 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -64,9 +64,9 @@ func TestGetTargetsFromMHC(t *testing.T) { }, UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -199,7 +199,7 @@ func TestHealthCheckTargets(t *testing.T) { conditions.Set(cluster, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}) // Ensure the control plane was initialized earlier to prevent it interfering with - // NodeStartupTimeout testing. + // NodeStartupTimeoutSeconds testing. conds := []metav1.Condition{} for _, condition := range cluster.GetConditions() { condition.LastTransitionTime = metav1.NewTime(condition.LastTransitionTime.Add(-1 * time.Hour)) @@ -211,7 +211,7 @@ func TestHealthCheckTargets(t *testing.T) { timeoutForMachineToHaveNode := 10 * time.Minute disabledTimeoutForMachineToHaveNode := time.Duration(0) - timeoutForUnhealthyNodeConditions := 5 * time.Minute + timeoutForUnhealthyNodeConditions := int32(5 * 60) // Create a test MHC testMHC := &clusterv1.MachineHealthCheck{ @@ -226,14 +226,14 @@ func TestHealthCheckTargets(t *testing.T) { ClusterName: clusterName, UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: timeoutForUnhealthyNodeConditions}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: timeoutForUnhealthyNodeConditions, }, { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: timeoutForUnhealthyNodeConditions}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: timeoutForUnhealthyNodeConditions, }, }, }, @@ -364,8 +364,8 @@ func TestHealthCheckTargets(t *testing.T) { Node: testNodeUnknown400, nodeMissing: false, } - nodeUnknown400Condition := newFailedHealthCheckV1Beta1Condition(clusterv1.UnhealthyNodeConditionV1Beta1Reason, "Condition Ready on node is reporting status Unknown for more than %s", timeoutForUnhealthyNodeConditions) - nodeUnknown400V1Beta2Condition := newFailedHealthCheckCondition(clusterv1.MachineHealthCheckUnhealthyNodeReason, "Health check failed: Condition Ready on Node is reporting status Unknown for more than %s", timeoutForUnhealthyNodeConditions) + nodeUnknown400Condition := newFailedHealthCheckV1Beta1Condition(clusterv1.UnhealthyNodeConditionV1Beta1Reason, "Condition Ready on node is reporting status Unknown for more than %s", (time.Duration(timeoutForUnhealthyNodeConditions) * time.Second).String()) + nodeUnknown400V1Beta2Condition := newFailedHealthCheckCondition(clusterv1.MachineHealthCheckUnhealthyNodeReason, "Health check failed: Condition Ready on Node is reporting status Unknown for more than %s", (time.Duration(timeoutForUnhealthyNodeConditions) * time.Second).String()) // Target for when a node is healthy testNodeHealthy := newTestNode("node1") diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 9d649bde5327..4c3c074b4d60 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -513,9 +513,9 @@ func (r *Reconciler) syncMachines(ctx context.Context, s *scope) (ctrl.Result, e // Set all other in-place mutable fields that impact the ability to tear down existing machines. m.Spec.ReadinessGates = machineSet.Spec.Template.Spec.ReadinessGates - m.Spec.NodeDrainTimeout = machineSet.Spec.Template.Spec.NodeDrainTimeout - m.Spec.NodeDeletionTimeout = machineSet.Spec.Template.Spec.NodeDeletionTimeout - m.Spec.NodeVolumeDetachTimeout = machineSet.Spec.Template.Spec.NodeVolumeDetachTimeout + m.Spec.NodeDrainTimeoutSeconds = machineSet.Spec.Template.Spec.NodeDrainTimeoutSeconds + m.Spec.NodeDeletionTimeoutSeconds = machineSet.Spec.Template.Spec.NodeDeletionTimeoutSeconds + m.Spec.NodeVolumeDetachTimeoutSeconds = machineSet.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds m.Spec.MinReadySeconds = machineSet.Spec.Template.Spec.MinReadySeconds // Set machine's up to date condition @@ -927,9 +927,9 @@ func (r *Reconciler) computeDesiredMachine(machineSet *clusterv1.MachineSet, exi // Set all other in-place mutable fields. desiredMachine.Spec.ReadinessGates = machineSet.Spec.Template.Spec.ReadinessGates - desiredMachine.Spec.NodeDrainTimeout = machineSet.Spec.Template.Spec.NodeDrainTimeout - desiredMachine.Spec.NodeDeletionTimeout = machineSet.Spec.Template.Spec.NodeDeletionTimeout - desiredMachine.Spec.NodeVolumeDetachTimeout = machineSet.Spec.Template.Spec.NodeVolumeDetachTimeout + desiredMachine.Spec.NodeDrainTimeoutSeconds = machineSet.Spec.Template.Spec.NodeDrainTimeoutSeconds + desiredMachine.Spec.NodeDeletionTimeoutSeconds = machineSet.Spec.Template.Spec.NodeDeletionTimeoutSeconds + desiredMachine.Spec.NodeVolumeDetachTimeoutSeconds = machineSet.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds desiredMachine.Spec.MinReadySeconds = machineSet.Spec.Template.Spec.MinReadySeconds return desiredMachine, nil diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 5dc3c033e075..e7fd3028b932 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -100,8 +100,8 @@ func TestMachineSetReconciler(t *testing.T) { namespace, testCluster := setup(t, g) defer teardown(t, g, namespace, testCluster) - duration10m := &metav1.Duration{Duration: 10 * time.Minute} - duration5m := &metav1.Duration{Duration: 5 * time.Minute} + duration10m := ptr.To(int32(10 * 60)) + duration5m := ptr.To(int32(5 * 60)) replicas := int32(2) version := "v1.14.2" machineTemplateSpec := clusterv1.MachineTemplateSpec{ @@ -131,10 +131,10 @@ func TestMachineSetReconciler(t *testing.T) { Name: "ms-template", Namespace: namespace.Name, }, - NodeDrainTimeout: duration10m, - NodeDeletionTimeout: duration10m, - NodeVolumeDetachTimeout: duration10m, - MinReadySeconds: ptr.To[int32](0), + NodeDrainTimeoutSeconds: duration10m, + NodeDeletionTimeoutSeconds: duration10m, + NodeVolumeDetachTimeoutSeconds: duration10m, + MinReadySeconds: ptr.To[int32](0), }, } @@ -354,28 +354,28 @@ func TestMachineSetReconciler(t *testing.T) { } // Verify that in-place mutable fields propagate from MachineSet to Machines. - t.Log("Updating NodeDrainTimeout on MachineSet") + t.Log("Updating NodeDrainTimeoutSeconds on MachineSet") patchHelper, err := patch.NewHelper(instance, env) g.Expect(err).ToNot(HaveOccurred()) - instance.Spec.Template.Spec.NodeDrainTimeout = duration5m + instance.Spec.Template.Spec.NodeDrainTimeoutSeconds = duration5m g.Expect(patchHelper.Patch(ctx, instance)).Should(Succeed()) - t.Log("Verifying new NodeDrainTimeout value is set on Machines") + t.Log("Verifying new NodeDrainTimeoutSeconds value is set on Machines") g.Eventually(func() bool { if err := env.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { return false } // All the machines should have the new NodeDrainTimeoutValue for _, m := range machines.Items { - if m.Spec.NodeDrainTimeout == nil { + if m.Spec.NodeDrainTimeoutSeconds == nil { return false } - if m.Spec.NodeDrainTimeout.Duration != duration5m.Duration { + if *m.Spec.NodeDrainTimeoutSeconds != *duration5m { return false } } return true - }, timeout).Should(BeTrue(), "machine should have the updated NodeDrainTimeout value") + }, timeout).Should(BeTrue(), "machine should have the updated NodeDrainTimeoutSeconds value") // Try to delete 1 machine and check the MachineSet scales back up. machineToBeDeleted := machines.Items[0] @@ -1107,8 +1107,8 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { classicManager := "manager" replicas := int32(2) version := "v1.25.3" - duration10s := &metav1.Duration{Duration: 10 * time.Second} - duration11s := &metav1.Duration{Duration: 11 * time.Second} + duration10s := ptr.To(int32(10)) + duration11s := ptr.To(int32(11)) ms := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ UID: "abc-123-ms-uid", @@ -1379,9 +1379,9 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { } readinessGates := []clusterv1.MachineReadinessGate{{ConditionType: "foo"}} ms.Spec.Template.Spec.ReadinessGates = readinessGates - ms.Spec.Template.Spec.NodeDrainTimeout = duration10s - ms.Spec.Template.Spec.NodeDeletionTimeout = duration10s - ms.Spec.Template.Spec.NodeVolumeDetachTimeout = duration10s + ms.Spec.Template.Spec.NodeDrainTimeoutSeconds = duration10s + ms.Spec.Template.Spec.NodeDeletionTimeoutSeconds = duration10s + ms.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = duration10s ms.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](10) s = &scope{ machineSet: ms, @@ -1400,17 +1400,17 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { // Verify Annotations g.Expect(updatedInPlaceMutatingMachine.Annotations).Should(Equal(ms.Spec.Template.Annotations)) // Verify Node timeout values - g.Expect(updatedInPlaceMutatingMachine.Spec.NodeDrainTimeout).Should(And( + g.Expect(updatedInPlaceMutatingMachine.Spec.NodeDrainTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.NodeDrainTimeout)), + HaveValue(Equal(*ms.Spec.Template.Spec.NodeDrainTimeoutSeconds)), )) - g.Expect(updatedInPlaceMutatingMachine.Spec.NodeDeletionTimeout).Should(And( + g.Expect(updatedInPlaceMutatingMachine.Spec.NodeDeletionTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.NodeDeletionTimeout)), + HaveValue(Equal(*ms.Spec.Template.Spec.NodeDeletionTimeoutSeconds)), )) - g.Expect(updatedInPlaceMutatingMachine.Spec.NodeVolumeDetachTimeout).Should(And( + g.Expect(updatedInPlaceMutatingMachine.Spec.NodeVolumeDetachTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.NodeVolumeDetachTimeout)), + HaveValue(Equal(*ms.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds)), )) g.Expect(updatedInPlaceMutatingMachine.Spec.MinReadySeconds).Should(And( Not(BeNil()), @@ -1464,16 +1464,16 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { // Verify in-place mutable fields are still the same. g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels)) g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations)) - g.Expect(updatedDeletingMachine.Spec.NodeDrainTimeout).Should(Equal(deletingMachine.Spec.NodeDrainTimeout)) - g.Expect(updatedDeletingMachine.Spec.NodeDeletionTimeout).Should(Equal(deletingMachine.Spec.NodeDeletionTimeout)) - g.Expect(updatedDeletingMachine.Spec.NodeVolumeDetachTimeout).Should(Equal(deletingMachine.Spec.NodeVolumeDetachTimeout)) + g.Expect(updatedDeletingMachine.Spec.NodeDrainTimeoutSeconds).Should(Equal(deletingMachine.Spec.NodeDrainTimeoutSeconds)) + g.Expect(updatedDeletingMachine.Spec.NodeDeletionTimeoutSeconds).Should(Equal(deletingMachine.Spec.NodeDeletionTimeoutSeconds)) + g.Expect(updatedDeletingMachine.Spec.NodeVolumeDetachTimeoutSeconds).Should(Equal(deletingMachine.Spec.NodeVolumeDetachTimeoutSeconds)) g.Expect(updatedDeletingMachine.Spec.MinReadySeconds).Should(Equal(deletingMachine.Spec.MinReadySeconds)) }, 5*time.Second).Should(Succeed()) // Verify in-place mutable fields are updated on the deleting machine - ms.Spec.Template.Spec.NodeDrainTimeout = duration11s - ms.Spec.Template.Spec.NodeDeletionTimeout = duration11s - ms.Spec.Template.Spec.NodeVolumeDetachTimeout = duration11s + ms.Spec.Template.Spec.NodeDrainTimeoutSeconds = duration11s + ms.Spec.Template.Spec.NodeDeletionTimeoutSeconds = duration11s + ms.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = duration11s ms.Spec.Template.Spec.MinReadySeconds = ptr.To[int32](11) s = &scope{ machineSet: ms, @@ -1486,17 +1486,17 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed()) // Verify Node timeout values - g.Expect(updatedDeletingMachine.Spec.NodeDrainTimeout).Should(And( + g.Expect(updatedDeletingMachine.Spec.NodeDrainTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.NodeDrainTimeout)), + HaveValue(Equal(*ms.Spec.Template.Spec.NodeDrainTimeoutSeconds)), )) - g.Expect(updatedDeletingMachine.Spec.NodeDeletionTimeout).Should(And( + g.Expect(updatedDeletingMachine.Spec.NodeDeletionTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.NodeDeletionTimeout)), + HaveValue(Equal(*ms.Spec.Template.Spec.NodeDeletionTimeoutSeconds)), )) - g.Expect(updatedDeletingMachine.Spec.NodeVolumeDetachTimeout).Should(And( + g.Expect(updatedDeletingMachine.Spec.NodeVolumeDetachTimeoutSeconds).Should(And( Not(BeNil()), - HaveValue(Equal(*ms.Spec.Template.Spec.NodeVolumeDetachTimeout)), + HaveValue(Equal(*ms.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds)), )) g.Expect(updatedDeletingMachine.Spec.MinReadySeconds).Should(And( Not(BeNil()), @@ -2285,7 +2285,7 @@ func TestMachineSetReconciler_syncReplicas_WithErrors(t *testing.T) { }, } - duration10m := &metav1.Duration{Duration: 10 * time.Minute} + duration10m := ptr.To(int32(10 * 60)) machineSet := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Name: "machineset1", @@ -2313,10 +2313,10 @@ func TestMachineSetReconciler_syncReplicas_WithErrors(t *testing.T) { Name: "ms-template", Namespace: metav1.NamespaceDefault, }, - NodeDrainTimeout: duration10m, - NodeDeletionTimeout: duration10m, - NodeVolumeDetachTimeout: duration10m, - MinReadySeconds: ptr.To[int32](10), + NodeDrainTimeoutSeconds: duration10m, + NodeDeletionTimeoutSeconds: duration10m, + NodeVolumeDetachTimeoutSeconds: duration10m, + MinReadySeconds: ptr.To[int32](10), }, }, }, @@ -2427,8 +2427,8 @@ type computeDesiredMachineTestCase struct { } func TestComputeDesiredMachine(t *testing.T) { - duration5s := &metav1.Duration{Duration: 5 * time.Second} - duration10s := &metav1.Duration{Duration: 10 * time.Second} + duration5s := ptr.To(int32(5)) + duration10s := ptr.To(int32(10)) namingTemplateKey := "-md" mdName := "testmd" @@ -2463,10 +2463,10 @@ func TestComputeDesiredMachine(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ ConfigRef: &bootstrapRef, }, - NodeDrainTimeout: duration10s, - NodeVolumeDetachTimeout: duration10s, - NodeDeletionTimeout: duration10s, - MinReadySeconds: ptr.To[int32](10), + NodeDrainTimeoutSeconds: duration10s, + NodeVolumeDetachTimeoutSeconds: duration10s, + NodeDeletionTimeoutSeconds: duration10s, + MinReadySeconds: ptr.To[int32](10), }, } @@ -2482,12 +2482,12 @@ func TestComputeDesiredMachine(t *testing.T) { Finalizers: []string{clusterv1.MachineFinalizer}, }, Spec: clusterv1.MachineSpec{ - ClusterName: testClusterName, - Version: ptr.To("v1.25.3"), - NodeDrainTimeout: duration10s, - NodeVolumeDetachTimeout: duration10s, - NodeDeletionTimeout: duration10s, - MinReadySeconds: ptr.To[int32](10), + ClusterName: testClusterName, + Version: ptr.To("v1.25.3"), + NodeDrainTimeoutSeconds: duration10s, + NodeVolumeDetachTimeoutSeconds: duration10s, + NodeDeletionTimeoutSeconds: duration10s, + MinReadySeconds: ptr.To[int32](10), }, } @@ -2512,9 +2512,9 @@ func TestComputeDesiredMachine(t *testing.T) { Name: "bootstrap-config-1", APIVersion: clusterv1.GroupVersionBootstrap.String(), } - existingMachine.Spec.NodeDrainTimeout = duration5s - existingMachine.Spec.NodeDeletionTimeout = duration5s - existingMachine.Spec.NodeVolumeDetachTimeout = duration5s + existingMachine.Spec.NodeDrainTimeoutSeconds = duration5s + existingMachine.Spec.NodeDeletionTimeoutSeconds = duration5s + existingMachine.Spec.NodeVolumeDetachTimeoutSeconds = duration5s existingMachine.Spec.MinReadySeconds = ptr.To[int32](5) expectedUpdatedMachine := skeletonMachine.DeepCopy() diff --git a/internal/controllers/topology/cluster/blueprint_test.go b/internal/controllers/topology/cluster/blueprint_test.go index 5ac95e594d80..82bd5b6b83b1 100644 --- a/internal/controllers/topology/cluster/blueprint_test.go +++ b/internal/controllers/topology/cluster/blueprint_test.go @@ -18,11 +18,11 @@ package cluster import ( "testing" - "time" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" @@ -63,9 +63,7 @@ func TestGetBlueprint(t *testing.T) { workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "workerbootstraptemplate1"). Build() machineHealthCheck := &clusterv1.MachineHealthCheckClass{ - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(1), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(1)), } machineDeployment := builder.MachineDeploymentClass("workerclass1"). diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index 9e9e8e75e974..f361b2e08a80 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -20,7 +20,6 @@ import ( "maps" "slices" "testing" - "time" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" @@ -137,14 +136,14 @@ func TestGetCurrentState(t *testing.T) { WithSelector(*selectors.ForMachineDeploymentMHC(machineDeployment)). WithUnhealthyNodeConditions([]clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 5 * 60, }, }). WithClusterName("cluster1"). @@ -154,14 +153,14 @@ func TestGetCurrentState(t *testing.T) { WithSelector(*selectors.ForControlPlaneMHC()). WithUnhealthyNodeConditions([]clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + TimeoutSeconds: 5 * 60, }, }). WithClusterName("cluster1"). diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 6d80df6810f3..2104f89111aa 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -529,9 +529,14 @@ func updateDesiredState(ctx context.Context, req *runtimehooksv1.GeneratePatches contract.ControlPlane().MachineTemplate().Metadata().Path(), contract.ControlPlane().MachineTemplate().ReadinessGates().Path(), contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), + // Note: For simplicity we don't allow patching for the fields of both contracts to avoid + // requiring a client here to retrieve the contract version of the ControlPlane object. contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path(), + contract.ControlPlane().MachineTemplate().NodeDrainTimeoutSeconds().Path(), + contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeoutSeconds().Path(), + contract.ControlPlane().MachineTemplate().NodeDeletionTimeoutSeconds().Path(), contract.ControlPlane().Replicas().Path(), contract.ControlPlane().Version().Path(), }); err != nil { diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 64f06e420d86..6aff2954b9ad 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -21,7 +21,6 @@ import ( "net/http" "regexp" "testing" - "time" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" @@ -1726,9 +1725,9 @@ func TestReconcileControlPlaneMachineHealthCheck(t *testing.T) { mhcClass := &clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, } @@ -2657,7 +2656,7 @@ func TestReconcileMachinePools(t *testing.T) { wantMachinePoolState.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Name = gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef.Name } // expect default value for the node deletion timeout. - wantMachinePoolState.Object.Spec.Template.Spec.NodeDeletionTimeout = &metav1.Duration{Duration: 10 * time.Second} + wantMachinePoolState.Object.Spec.Template.Spec.NodeDeletionTimeoutSeconds = ptr.To(int32(10)) // Compare MachinePool. // Note: We're intentionally only comparing Spec as otherwise we would have to account for @@ -3301,9 +3300,9 @@ func TestReconcileMachineDeploymentMachineHealthCheck(t *testing.T) { WithSelector(*selectors.ForMachineDeploymentMHC(md)). WithUnhealthyNodeConditions([]clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }). WithClusterName("cluster1") @@ -3705,9 +3704,9 @@ func TestReconciler_reconcileMachineHealthCheck(t *testing.T) { WithSelector(*selectors.ForControlPlaneMHC()). WithUnhealthyNodeConditions([]clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }). WithClusterName("cluster1") @@ -3730,16 +3729,16 @@ func TestReconciler_reconcileMachineHealthCheck(t *testing.T) { // update the unhealthy conditions in the MachineHealthCheck desired: mhcBuilder.DeepCopy().WithUnhealthyNodeConditions([]clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 1000 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 1000 * 60, }, }).Build(), want: mhcBuilder.DeepCopy().WithUnhealthyNodeConditions([]clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 1000 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 1000 * 60, }, }).Build(), }, diff --git a/internal/runtime/client/client.go b/internal/runtime/client/client.go index 22401c8fe18c..ca107de30492 100644 --- a/internal/runtime/client/client.go +++ b/internal/runtime/client/client.go @@ -600,7 +600,7 @@ func defaultAndValidateDiscoveryResponse(cat *runtimecatalog.Catalog, discovery errs = append(errs, errors.Errorf("handler name %s is not valid: %s", handler.Name, errStrings)) } - // Timeout should be a positive integer not greater than 30. + // TimeoutSeconds should be a positive integer not greater than 30. if *handler.TimeoutSeconds < 0 || *handler.TimeoutSeconds > 30 { errs = append(errs, errors.Errorf("handler %s timeoutSeconds %d must be between 0 and 30", handler.Name, *handler.TimeoutSeconds)) } diff --git a/internal/runtime/client/client_test.go b/internal/runtime/client/client_test.go index a592818af9aa..5c4828713284 100644 --- a/internal/runtime/client/client_test.go +++ b/internal/runtime/client/client_test.go @@ -398,7 +398,7 @@ func Test_defaultAndValidateDiscoveryResponse(t *testing.T) { wantErr: true, }, { - name: "error with Timeout of over 30 seconds", + name: "error with TimeoutSeconds of over 30 seconds", discovery: &runtimehooksv1.DiscoveryResponse{ TypeMeta: metav1.TypeMeta{ Kind: "DiscoveryResponse", @@ -416,7 +416,7 @@ func Test_defaultAndValidateDiscoveryResponse(t *testing.T) { wantErr: true, }, { - name: "error with Timeout of less than 0", + name: "error with TimeoutSeconds of less than 0", discovery: &runtimehooksv1.DiscoveryResponse{ TypeMeta: metav1.TypeMeta{ Kind: "DiscoveryResponse", diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index 4e8887313c59..95093cb69a3b 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -345,7 +345,7 @@ func newEnvironment(scheme *runtime.Scheme, additionalCRDDirectoryPaths []string } // Set minNodeStartupTimeout for Test, so it does not need to be at least 30s - internalwebhooks.SetMinNodeStartupTimeout(metav1.Duration{Duration: 1 * time.Millisecond}) + internalwebhooks.SetMinNodeStartupTimeoutSeconds(0) if err := (&webhooks.Cluster{Client: mgr.GetClient()}).SetupWebhookWithManager(mgr); err != nil { klog.Fatalf("unable to create webhook: %+v", err) diff --git a/internal/util/ssa/patch_test.go b/internal/util/ssa/patch_test.go index bf61dc0e2edd..a2340668cda2 100644 --- a/internal/util/ssa/patch_test.go +++ b/internal/util/ssa/patch_test.go @@ -106,9 +106,9 @@ func TestPatch(t *testing.T) { }, }, Spec: clusterv1.MachineSpec{ - ClusterName: "cluster-1", - Version: ptr.To("v1.25.0"), - NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, + ClusterName: "cluster-1", + Version: ptr.To("v1.25.0"), + NodeDrainTimeoutSeconds: ptr.To(int32(10)), Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To("data-secret"), }, @@ -134,7 +134,7 @@ func TestPatch(t *testing.T) { g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(originalObject), originalObject)).To(Succeed()) // Modify the object modifiedObject := initialObject.DeepCopy() - modifiedObject.Spec.NodeDrainTimeout = &metav1.Duration{Duration: 5 * time.Second} + modifiedObject.Spec.NodeDrainTimeoutSeconds = ptr.To(int32(5)) // Compute request identifier, so we can later verify that the update call was not cached. modifiedUnstructured, err := prepareModified(env.Scheme(), modifiedObject) g.Expect(err).ToNot(HaveOccurred()) @@ -161,7 +161,7 @@ func TestPatch(t *testing.T) { g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(originalObject), originalObject)).To(Succeed()) // Modify the object modifiedObject = initialObject.DeepCopy() - modifiedObject.Spec.NodeDrainTimeout = &metav1.Duration{Duration: 5 * time.Second} + modifiedObject.Spec.NodeDrainTimeoutSeconds = ptr.To(int32(5)) // Compute request identifier, so we can later verify that the update call was cached. modifiedUnstructured, err = prepareModified(env.Scheme(), modifiedObject) g.Expect(err).ToNot(HaveOccurred()) diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index 888c2b6c6621..8e5deb75543f 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -523,11 +523,11 @@ func validateMachineHealthCheckClass(fldPath *field.Path, namepace string, m *cl Namespace: namepace, }, Spec: clusterv1.MachineHealthCheckSpec{ - NodeStartupTimeout: m.NodeStartupTimeout, - MaxUnhealthy: m.MaxUnhealthy, - UnhealthyNodeConditions: m.UnhealthyNodeConditions, - UnhealthyRange: m.UnhealthyRange, - RemediationTemplate: m.RemediationTemplate, + NodeStartupTimeoutSeconds: m.NodeStartupTimeoutSeconds, + MaxUnhealthy: m.MaxUnhealthy, + UnhealthyNodeConditions: m.UnhealthyNodeConditions, + UnhealthyRange: m.UnhealthyRange, + RemediationTemplate: m.RemediationTemplate, }} return (&MachineHealthCheck{}).validateCommonFields(&mhc, fldPath) diff --git a/internal/webhooks/clusterclass_test.go b/internal/webhooks/clusterclass_test.go index 0fc4aa75da48..5c3e53ba4e0a 100644 --- a/internal/webhooks/clusterclass_test.go +++ b/internal/webhooks/clusterclass_test.go @@ -19,7 +19,6 @@ package webhooks import ( "strings" "testing" - "time" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -892,14 +891,12 @@ func TestClusterClassValidation(t *testing.T) { WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(6000000000000), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(60)), }). Build(), }, @@ -913,9 +910,7 @@ func TestClusterClassValidation(t *testing.T) { Build()). // No ControlPlaneMachineInfrastructure makes this an invalid creation request. WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckClass{ - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(6000000000000), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(60)), }). Build(), expectErr: true, @@ -932,9 +927,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). Build()). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckClass{ - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(6000000000000), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(60)), }). Build(), expectErr: false, @@ -956,14 +949,12 @@ func TestClusterClassValidation(t *testing.T) { WithMachineHealthCheckClass(&clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(6000000000000), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(60)), }). Build()). Build(), @@ -985,15 +976,13 @@ func TestClusterClassValidation(t *testing.T) { WithMachineHealthCheckClass(&clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, - NodeStartupTimeout: &metav1.Duration{ - // nodeStartupTimeout is too short here - 600ns. - Duration: time.Duration(600), - }, + // nodeStartupTimeout is too short here. + NodeStartupTimeoutSeconds: ptr.To(int32(10)), }). Build()). Build(), @@ -1014,9 +1003,7 @@ func TestClusterClassValidation(t *testing.T) { WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). WithMachineHealthCheckClass(&clusterv1.MachineHealthCheckClass{ - NodeStartupTimeout: &metav1.Duration{ - Duration: time.Duration(6000000000000), - }, + NodeStartupTimeoutSeconds: ptr.To(int32(60)), }). Build()). Build(), @@ -2298,9 +2285,9 @@ func TestClusterClassValidationWithClusterAwareChecks(t *testing.T) { WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }). @@ -2351,9 +2338,9 @@ func TestClusterClassValidationWithClusterAwareChecks(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, @@ -2484,9 +2471,9 @@ func TestClusterClassValidationWithClusterAwareChecks(t *testing.T) { MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyNodeConditions: []clusterv1.UnhealthyNodeCondition{ { - Type: corev1.NodeReady, - Status: corev1.ConditionUnknown, - Timeout: metav1.Duration{Duration: 5 * time.Minute}, + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + TimeoutSeconds: 5 * 60, }, }, }, diff --git a/internal/webhooks/machine.go b/internal/webhooks/machine.go index d055d9737288..dd30faef7268 100644 --- a/internal/webhooks/machine.go +++ b/internal/webhooks/machine.go @@ -20,12 +20,11 @@ import ( "context" "fmt" "strings" - "time" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -35,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api/util/version" ) -const defaultNodeDeletionTimeout = 10 * time.Second +const defaultNodeDeletionTimeoutSeconds = int32(10) func (webhook *Machine) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). @@ -79,8 +78,8 @@ func (webhook *Machine) Default(_ context.Context, obj runtime.Object) error { m.Spec.Version = &normalizedVersion } - if m.Spec.NodeDeletionTimeout == nil { - m.Spec.NodeDeletionTimeout = &metav1.Duration{Duration: defaultNodeDeletionTimeout} + if m.Spec.NodeDeletionTimeoutSeconds == nil { + m.Spec.NodeDeletionTimeoutSeconds = ptr.To(defaultNodeDeletionTimeoutSeconds) } return nil diff --git a/internal/webhooks/machine_test.go b/internal/webhooks/machine_test.go index 02329a3c66fc..95bdb2b08115 100644 --- a/internal/webhooks/machine_test.go +++ b/internal/webhooks/machine_test.go @@ -50,7 +50,7 @@ func TestMachineDefault(t *testing.T) { g.Expect(m.Spec.Bootstrap.ConfigRef.Namespace).To(Equal(m.Namespace)) g.Expect(m.Spec.InfrastructureRef.Namespace).To(Equal(m.Namespace)) g.Expect(*m.Spec.Version).To(Equal("v1.17.5")) - g.Expect(m.Spec.NodeDeletionTimeout.Duration).To(Equal(defaultNodeDeletionTimeout)) + g.Expect(*m.Spec.NodeDeletionTimeoutSeconds).To(Equal(defaultNodeDeletionTimeoutSeconds)) } func TestMachineBootstrapValidation(t *testing.T) { diff --git a/internal/webhooks/machinehealthcheck.go b/internal/webhooks/machinehealthcheck.go index a5166636b374..ca0419496cfb 100644 --- a/internal/webhooks/machinehealthcheck.go +++ b/internal/webhooks/machinehealthcheck.go @@ -19,7 +19,6 @@ package webhooks import ( "context" "fmt" - "time" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,18 +34,18 @@ import ( var ( // Minimum time allowed for a node to start up. - minNodeStartupTimeout = metav1.Duration{Duration: 30 * time.Second} + minNodeStartupTimeoutSeconds = int32(30) // We allow users to disable the nodeStartupTimeout by setting the duration to 0. - disabledNodeStartupTimeout = clusterv1.ZeroDuration + disabledNodeStartupTimeoutSeconds = int32(0) ) -// SetMinNodeStartupTimeout allows users to optionally set a custom timeout +// SetMinNodeStartupTimeoutSeconds allows users to optionally set a custom timeout // for the validation webhook. // // This function is mostly used within envtest (integration tests), and should // never be used in a production environment. -func SetMinNodeStartupTimeout(d metav1.Duration) { - minNodeStartupTimeout = d +func SetMinNodeStartupTimeoutSeconds(d int32) { + minNodeStartupTimeoutSeconds = d } func (webhook *MachineHealthCheck) SetupWebhookWithManager(mgr ctrl.Manager) error { @@ -83,8 +82,8 @@ func (webhook *MachineHealthCheck) Default(_ context.Context, obj runtime.Object m.Spec.MaxUnhealthy = &defaultMaxUnhealthy } - if m.Spec.NodeStartupTimeout == nil { - m.Spec.NodeStartupTimeout = &clusterv1.DefaultNodeStartupTimeout + if m.Spec.NodeStartupTimeoutSeconds == nil { + m.Spec.NodeStartupTimeoutSeconds = &clusterv1.DefaultNodeStartupTimeoutSeconds } if m.Spec.RemediationTemplate != nil && m.Spec.RemediationTemplate.Namespace == "" { @@ -165,17 +164,17 @@ func (webhook *MachineHealthCheck) validate(oldMHC, newMHC *clusterv1.MachineHea return apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("MachineHealthCheck").GroupKind(), newMHC.Name, allErrs) } -// ValidateCommonFields validates NodeStartupTimeout, MaxUnhealthy, and RemediationTemplate of the MHC. +// ValidateCommonFields validates NodeStartupTimeoutSeconds, MaxUnhealthy, and RemediationTemplate of the MHC. // These are the fields in common with other types which define MachineHealthChecks such as MachineHealthCheckClass and MachineHealthCheckTopology. func (webhook *MachineHealthCheck) validateCommonFields(m *clusterv1.MachineHealthCheck, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - if m.Spec.NodeStartupTimeout != nil && - m.Spec.NodeStartupTimeout.Seconds() != disabledNodeStartupTimeout.Seconds() && - m.Spec.NodeStartupTimeout.Seconds() < minNodeStartupTimeout.Seconds() { + if m.Spec.NodeStartupTimeoutSeconds != nil && + *m.Spec.NodeStartupTimeoutSeconds != disabledNodeStartupTimeoutSeconds && + *m.Spec.NodeStartupTimeoutSeconds < minNodeStartupTimeoutSeconds { allErrs = append( allErrs, - field.Invalid(fldPath.Child("nodeStartupTimeout"), m.Spec.NodeStartupTimeout.String(), "must be at least 30s"), + field.Invalid(fldPath.Child("nodeStartupTimeoutSeconds"), *m.Spec.NodeStartupTimeoutSeconds, "must be at least 30s"), ) } if m.Spec.MaxUnhealthy != nil { diff --git a/internal/webhooks/machinehealthcheck_test.go b/internal/webhooks/machinehealthcheck_test.go index f7df68b28c89..6b21cb9763c1 100644 --- a/internal/webhooks/machinehealthcheck_test.go +++ b/internal/webhooks/machinehealthcheck_test.go @@ -18,7 +18,6 @@ package webhooks import ( "testing" - "time" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -55,8 +54,8 @@ func TestMachineHealthCheckDefault(t *testing.T) { g.Expect(mhc.Labels[clusterv1.ClusterNameLabel]).To(Equal(mhc.Spec.ClusterName)) g.Expect(mhc.Spec.MaxUnhealthy.String()).To(Equal("100%")) - g.Expect(mhc.Spec.NodeStartupTimeout).ToNot(BeNil()) - g.Expect(*mhc.Spec.NodeStartupTimeout).To(BeComparableTo(metav1.Duration{Duration: 10 * time.Minute})) + g.Expect(mhc.Spec.NodeStartupTimeoutSeconds).ToNot(BeNil()) + g.Expect(*mhc.Spec.NodeStartupTimeoutSeconds).To(Equal(int32(10 * 60))) g.Expect(mhc.Spec.RemediationTemplate.Namespace).To(Equal(mhc.Namespace)) } @@ -247,15 +246,15 @@ func TestMachineHealthCheckUnhealthyNodeConditions(t *testing.T) { } func TestMachineHealthCheckNodeStartupTimeout(t *testing.T) { - zero := metav1.Duration{Duration: 0} - twentyNineSeconds := metav1.Duration{Duration: 29 * time.Second} - thirtySeconds := metav1.Duration{Duration: 30 * time.Second} - oneMinute := metav1.Duration{Duration: 1 * time.Minute} - minusOneMinute := metav1.Duration{Duration: -1 * time.Minute} + zero := int32(0) + twentyNineSeconds := int32(29) + thirtySeconds := int32(30) + oneMinute := int32(60) + minusOneMinute := int32(-60) tests := []struct { name string - timeout *metav1.Duration + timeout *int32 expectErr bool }{ { @@ -295,7 +294,7 @@ func TestMachineHealthCheckNodeStartupTimeout(t *testing.T) { mhc := &clusterv1.MachineHealthCheck{ Spec: clusterv1.MachineHealthCheckSpec{ - NodeStartupTimeout: tt.timeout, + NodeStartupTimeoutSeconds: tt.timeout, Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "test": "test", diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index 289dfbc150bf..d541bc52f7ef 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -22,11 +22,13 @@ import ( "os" "path/filepath" "strings" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -350,7 +352,7 @@ func modifyControlPlaneViaClusterClassAndWait(ctx context.Context, input modifyC g.Expect(err).ToNot(HaveOccurred()) // Verify that the fields from Cluster topology are set on the control plane. - assertControlPlaneTopologyFields(g, controlPlane, controlPlaneTopology) + assertControlPlaneTopologyFields(g, contractVersion, controlPlane, controlPlaneTopology) // Verify that ModifyControlPlaneFields have been set. for fieldPath, expectedValue := range input.ModifyControlPlaneFields { @@ -378,7 +380,7 @@ func modifyControlPlaneViaClusterClassAndWait(ctx context.Context, input modifyC // assertControlPlaneTopologyFields asserts that all fields set in the ControlPlaneTopology have been set on the ControlPlane. // Note: We intentionally focus on the fields set in the ControlPlaneTopology and ignore the ones set through ClusterClass or // ControlPlane template as we want to validate that the fields of the ControlPlaneTopology have been propagated correctly. -func assertControlPlaneTopologyFields(g Gomega, controlPlane *unstructured.Unstructured, controlPlaneTopology clusterv1.ControlPlaneTopology) { +func assertControlPlaneTopologyFields(g Gomega, contractVersion string, controlPlane *unstructured.Unstructured, controlPlaneTopology clusterv1.ControlPlaneTopology) { metadata, err := contract.ControlPlane().MachineTemplate().Metadata().Get(controlPlane) g.Expect(err).ToNot(HaveOccurred()) for k, v := range controlPlaneTopology.Metadata.Labels { @@ -388,22 +390,40 @@ func assertControlPlaneTopologyFields(g Gomega, controlPlane *unstructured.Unstr g.Expect(metadata.Annotations).To(HaveKeyWithValue(k, v)) } - if controlPlaneTopology.NodeDrainTimeout != nil { - nodeDrainTimeout, err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Get(controlPlane) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(nodeDrainTimeout).To(Equal(controlPlaneTopology.NodeDrainTimeout)) + if controlPlaneTopology.NodeDrainTimeoutSeconds != nil { + if contractVersion == "v1beta1" { + nodeDrainTimeout, err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Get(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*nodeDrainTimeout).To(Equal(metav1.Duration{Duration: time.Duration(*controlPlaneTopology.NodeDrainTimeoutSeconds) * time.Second})) + } else { + nodeDrainTimeout, err := contract.ControlPlane().MachineTemplate().NodeDrainTimeoutSeconds().Get(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*nodeDrainTimeout).To(Equal(*controlPlaneTopology.NodeDrainTimeoutSeconds)) + } } - if controlPlaneTopology.NodeDeletionTimeout != nil { - nodeDeletionTimeout, err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Get(controlPlane) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(nodeDeletionTimeout).To(Equal(controlPlaneTopology.NodeDeletionTimeout)) + if controlPlaneTopology.NodeDeletionTimeoutSeconds != nil { + if contractVersion == "v1beta1" { + nodeDeletionTimeout, err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Get(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*nodeDeletionTimeout).To(Equal(metav1.Duration{Duration: time.Duration(*controlPlaneTopology.NodeDeletionTimeoutSeconds) * time.Second})) + } else { + nodeDeletionTimeout, err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeoutSeconds().Get(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*nodeDeletionTimeout).To(Equal(*controlPlaneTopology.NodeDeletionTimeoutSeconds)) + } } - if controlPlaneTopology.NodeVolumeDetachTimeout != nil { - nodeVolumeDetachTimeout, err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Get(controlPlane) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(nodeVolumeDetachTimeout).To(Equal(controlPlaneTopology.NodeVolumeDetachTimeout)) + if controlPlaneTopology.NodeVolumeDetachTimeoutSeconds != nil { + if contractVersion == "v1beta1" { + nodeVolumeDetachTimeout, err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Get(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*nodeVolumeDetachTimeout).To(Equal(metav1.Duration{Duration: time.Duration(*controlPlaneTopology.NodeVolumeDetachTimeoutSeconds) * time.Second})) + } else { + nodeVolumeDetachTimeout, err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeoutSeconds().Get(controlPlane) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*nodeVolumeDetachTimeout).To(Equal(*controlPlaneTopology.NodeVolumeDetachTimeoutSeconds)) + } } } @@ -669,16 +689,16 @@ func assertMachineDeploymentTopologyFields(g Gomega, md clusterv1.MachineDeploym g.Expect(md.Annotations).To(HaveKeyWithValue(k, v)) } - if mdTopology.NodeDrainTimeout != nil { - g.Expect(md.Spec.Template.Spec.NodeDrainTimeout).To(Equal(mdTopology.NodeDrainTimeout)) + if mdTopology.NodeDrainTimeoutSeconds != nil { + g.Expect(md.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(mdTopology.NodeDrainTimeoutSeconds)) } - if mdTopology.NodeDeletionTimeout != nil { - g.Expect(md.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(mdTopology.NodeDeletionTimeout)) + if mdTopology.NodeDeletionTimeoutSeconds != nil { + g.Expect(md.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(mdTopology.NodeDeletionTimeoutSeconds)) } - if mdTopology.NodeVolumeDetachTimeout != nil { - g.Expect(md.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(mdTopology.NodeVolumeDetachTimeout)) + if mdTopology.NodeVolumeDetachTimeoutSeconds != nil { + g.Expect(md.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(mdTopology.NodeVolumeDetachTimeoutSeconds)) } if mdTopology.MinReadySeconds != nil { @@ -707,16 +727,16 @@ func assertMachinePoolTopologyFields(g Gomega, mp clusterv1.MachinePool, mpTopol g.Expect(mp.Annotations).To(HaveKeyWithValue(k, v)) } - if mpTopology.NodeDrainTimeout != nil { - g.Expect(mp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(mpTopology.NodeDrainTimeout)) + if mpTopology.NodeDrainTimeoutSeconds != nil { + g.Expect(mp.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(mpTopology.NodeDrainTimeoutSeconds)) } - if mpTopology.NodeDeletionTimeout != nil { - g.Expect(mp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(mpTopology.NodeDeletionTimeout)) + if mpTopology.NodeDeletionTimeoutSeconds != nil { + g.Expect(mp.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(mpTopology.NodeDeletionTimeoutSeconds)) } - if mpTopology.NodeVolumeDetachTimeout != nil { - g.Expect(mp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(mpTopology.NodeVolumeDetachTimeout)) + if mpTopology.NodeVolumeDetachTimeoutSeconds != nil { + g.Expect(mp.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(mpTopology.NodeVolumeDetachTimeoutSeconds)) } if mpTopology.MinReadySeconds != nil { diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index e709b4b9b2ff..0bd161e72f8b 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -172,9 +172,9 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas topology.Metadata.Annotations = map[string]string{ "Cluster.topology.controlPlane.newAnnotation": "Cluster.topology.controlPlane.newAnnotationValue", } - topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.NodeDeletionTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec + topology.NodeDrainTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.NodeDeletionTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.NodeVolumeDetachTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec }, WaitForControlPlane: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), }) @@ -190,10 +190,10 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas topology.Metadata.Annotations = map[string]string{ "Cluster.topology.machineDeployment.newAnnotation": "Cluster.topology.machineDeployment.newAnnotationValue", } - topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.NodeDeletionTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.MinReadySeconds = ptr.To[int32](rand.Int31n(20)) //nolint:gosec + topology.NodeDrainTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.NodeDeletionTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.NodeVolumeDetachTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.MinReadySeconds = ptr.To[int32](rand.Int31n(20)) //nolint:gosec topology.Strategy = &clusterv1.MachineDeploymentStrategy{ Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ @@ -220,10 +220,10 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas topology.Metadata.Annotations = map[string]string{ "Cluster.topology.machinePool.newAnnotation": "Cluster.topology.machinePool.newAnnotationValue", } - topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.NodeDeletionTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.MinReadySeconds = ptr.To[int32](rand.Int31n(20)) //nolint:gosec + topology.NodeDrainTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.NodeDeletionTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.NodeVolumeDetachTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + topology.MinReadySeconds = ptr.To[int32](rand.Int31n(20)) //nolint:gosec }, WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), }) @@ -1270,8 +1270,11 @@ func modifyControlPlaneViaClusterAndWait(ctx context.Context, input modifyContro controlPlane, err := external.Get(ctx, mgmtClient, controlPlaneRef) g.Expect(err).ToNot(HaveOccurred()) + contractVersion, err := contract.GetContractVersion(ctx, mgmtClient, controlPlane.GroupVersionKind()) + g.Expect(err).ToNot(HaveOccurred()) + // Verify that the fields from Cluster topology are set on the control plane. - assertControlPlaneTopologyFields(g, controlPlane, controlPlaneTopology) + assertControlPlaneTopologyFields(g, contractVersion, controlPlane, controlPlaneTopology) }, input.WaitForControlPlane...).Should(Succeed()) } diff --git a/test/e2e/node_drain.go b/test/e2e/node_drain.go index ba2a5da2eb55..5e28b33c75a3 100644 --- a/test/e2e/node_drain.go +++ b/test/e2e/node_drain.go @@ -78,7 +78,7 @@ type NodeDrainTimeoutSpecInput struct { // NodeDrainTimeoutSpec goes through the following steps: // * Create cluster with 3 CP & 1 worker Machine -// * Ensure Node label is set & NodeDrainTimeout is set to 0 (wait forever) +// * Ensure Node label is set & NodeDrainTimeoutSeconds is set to 0 (wait forever) // * Deploy MachineDrainRules // * Deploy Deployment with unevictable Pods on CP & MD Nodes // * Deploy Deployment with unevictable Pods with `wait-completed` label on CP & MD Nodes @@ -95,7 +95,7 @@ type NodeDrainTimeoutSpecInput struct { // * Verify Node drains for control plane and MachineDeployment Machines are blocked by PDBs // * Delete the unevictable pod PDBs // * Verify machine deletion is blocked by waiting for volume detachment (only if VerifyNodeVolumeDetach is enabled) -// * Set NodeDrainTimeout to 1s to unblock Node drain +// * Set NodeDrainTimeoutSeconds to 1s to unblock Node drain // * Unblocks waiting for volume detachment (only if VerifyNodeVolumeDetach is enabled) // * Verify scale down succeeded because Node drains were unblocked. func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeoutSpecInput) { @@ -163,14 +163,14 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo // This label will be added to all Machines so we can later create Pods on the right Nodes. nodeOwnerLabelKey := "owner.node.cluster.x-k8s.io" - By("Ensure Node label is set & NodeDrainTimeout is set to 0 (wait forever) on ControlPlane and MachineDeployment topologies") + By("Ensure Node label is set & NodeDrainTimeoutSeconds is set to 0 (wait forever) on ControlPlane and MachineDeployment topologies") modifyControlPlaneViaClusterAndWait(ctx, modifyControlPlaneViaClusterAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, Cluster: cluster, ModifyControlPlaneTopology: func(topology *clusterv1.ControlPlaneTopology) { - topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(0)} + topology.NodeDrainTimeoutSeconds = ptr.To(int32(0)) if input.VerifyNodeVolumeDetach { - topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(0)} + topology.NodeVolumeDetachTimeoutSeconds = ptr.To(int32(0)) } if topology.Metadata.Labels == nil { topology.Metadata.Labels = map[string]string{} @@ -183,9 +183,9 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo ClusterProxy: input.BootstrapClusterProxy, Cluster: cluster, ModifyMachineDeploymentTopology: func(topology *clusterv1.MachineDeploymentTopology) { - topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(0)} + topology.NodeDrainTimeoutSeconds = ptr.To(int32(0)) if input.VerifyNodeVolumeDetach { - topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(0)} + topology.NodeVolumeDetachTimeoutSeconds = ptr.To(int32(0)) } if topology.Metadata.Labels == nil { topology.Metadata.Labels = map[string]string{} @@ -575,17 +575,17 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo input.UnblockNodeVolumeDetachment(ctx, input.BootstrapClusterProxy, cluster) } - // Set NodeDrainTimeout and NodeVolumeDetachTimeout to let the second ControlPlane Node get deleted without requiring manual intervention. - By("Set NodeDrainTimeout and NodeVolumeDetachTimeout for ControlPlanes to 1s to unblock Node drain") - // Note: This also verifies that KCP & MachineDeployments are still propagating changes to NodeDrainTimeout down to + // Set NodeDrainTimeoutSeconds and NodeVolumeDetachTimeoutSeconds to let the second ControlPlane Node get deleted without requiring manual intervention. + By("Set NodeDrainTimeoutSeconds and NodeVolumeDetachTimeoutSeconds for ControlPlanes to 1s to unblock Node drain") + // Note: This also verifies that KCP & MachineDeployments are still propagating changes to NodeDrainTimeoutSeconds down to // Machines that already have a deletionTimestamp. - drainTimeout := &metav1.Duration{Duration: time.Duration(1) * time.Second} + drainTimeout := ptr.To(int32(1)) modifyControlPlaneViaClusterAndWait(ctx, modifyControlPlaneViaClusterAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, Cluster: cluster, ModifyControlPlaneTopology: func(topology *clusterv1.ControlPlaneTopology) { - topology.NodeDrainTimeout = drainTimeout - topology.NodeVolumeDetachTimeout = drainTimeout + topology.NodeDrainTimeoutSeconds = drainTimeout + topology.NodeVolumeDetachTimeoutSeconds = drainTimeout }, WaitForControlPlane: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), }) @@ -793,26 +793,26 @@ func verifyPodEvictedAndSucceeded(g Gomega, pod *corev1.Pod) { g.Expect(podEvicted).To(BeTrue(), "Expected Pod to be evicted") } -func getDrainAndDeleteInterval(deleteInterval []interface{}, drainTimeout *metav1.Duration, replicas int) []interface{} { +func getDrainAndDeleteInterval(deleteInterval []interface{}, drainTimeout *int32, replicas int) []interface{} { deleteTimeout, err := time.ParseDuration(deleteInterval[0].(string)) Expect(err).ToNot(HaveOccurred()) // We add the drain timeout to the specified delete timeout per replica. - intervalDuration := (drainTimeout.Duration + deleteTimeout) * time.Duration(replicas) + intervalDuration := (time.Duration(*drainTimeout)*time.Second + deleteTimeout) * time.Duration(replicas) res := []interface{}{intervalDuration.String(), deleteInterval[1]} return res } func unblockNodeVolumeDetachmentFunc(waitControlPlaneIntervals, waitWorkerNodeIntervals []interface{}) func(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster) { return func(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster) { - By("Set NodeVolumeDetachTimeout to 1s to unblock waiting for volume detachments") - // Note: This also verifies that KCP & MachineDeployments are still propagating changes to NodeVolumeDetachTimeout down to + By("Set NodeVolumeDetachTimeoutSeconds to 1s to unblock waiting for volume detachments") + // Note: This also verifies that KCP & MachineDeployments are still propagating changes to NodeVolumeDetachTimeoutSeconds down to // Machines that already have a deletionTimestamp. - nodeVolumeDetachTimeout := &metav1.Duration{Duration: time.Duration(1) * time.Second} + nodeVolumeDetachTimeout := ptr.To(int32(1)) modifyControlPlaneViaClusterAndWait(ctx, modifyControlPlaneViaClusterAndWaitInput{ ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ModifyControlPlaneTopology: func(topology *clusterv1.ControlPlaneTopology) { - topology.NodeVolumeDetachTimeout = nodeVolumeDetachTimeout + topology.NodeVolumeDetachTimeoutSeconds = nodeVolumeDetachTimeout }, WaitForControlPlane: waitControlPlaneIntervals, }) @@ -820,7 +820,7 @@ func unblockNodeVolumeDetachmentFunc(waitControlPlaneIntervals, waitWorkerNodeIn ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ModifyMachineDeploymentTopology: func(topology *clusterv1.MachineDeploymentTopology) { - topology.NodeVolumeDetachTimeout = nodeVolumeDetachTimeout + topology.NodeVolumeDetachTimeoutSeconds = nodeVolumeDetachTimeout }, WaitForMachineDeployments: waitWorkerNodeIntervals, }) diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index 7345b72db645..4308800dd18b 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -21,6 +21,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "os" "testing" @@ -98,8 +99,23 @@ func TestHandler(t *testing.T) { err = clusterClassReconciler.SetupWithManager(ctx, mgr, controller.Options{}) g.Expect(err).ToNot(HaveOccurred()) + // computeControlPlane has to get the contract version to set timeout fields correctly + scheme := runtime.NewScheme() + _ = apiextensionsv1.AddToScheme(scheme) + crd := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadmcontrolplanes.controlplane.cluster.x-k8s.io", + Labels: map[string]string{ + // Set contract label for tt.contract. + fmt.Sprintf("%s/%s", clusterv1.GroupVersion.Group, "v1beta1"): "v1beta1", + fmt.Sprintf("%s/%s", clusterv1.GroupVersion.Group, "v1beta2"): "v1beta2", + }, + }, + } + clientWithV1Beta2ContractCRD := fake.NewClientBuilder().WithScheme(scheme).WithObjects(crd).Build() + // Create a desired state generator. - desiredStateGenerator := desiredstate.NewGenerator(nil, nil, runtimeClient) + desiredStateGenerator := desiredstate.NewGenerator(clientWithV1Beta2ContractCRD, nil, runtimeClient) // Note: as of today we don't have to set any fields and also don't have to call // SetupWebhookWithManager because DefaultAndValidateVariables doesn't need any of that. diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index 5356026e3fee..abb0654d419d 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "math/rand" - "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -392,9 +391,9 @@ func UpgradeMachineDeploymentInPlaceMutableFieldsAndWait(ctx context.Context, in deployment.Spec.Template.Annotations = map[string]string{} } deployment.Spec.Template.Annotations["new-annotation"] = "new-annotation-value" - deployment.Spec.Template.Spec.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - deployment.Spec.Template.Spec.NodeDeletionTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - deployment.Spec.Template.Spec.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec + deployment.Spec.Template.Spec.NodeDrainTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + deployment.Spec.Template.Spec.NodeDeletionTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec + deployment.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds = ptr.To(rand.Int31n(20)) //nolint:gosec Eventually(func() error { return patchHelper.Patch(ctx, deployment) }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch in-place mutable fields of MachineDeployment %s", klog.KObj(deployment)) @@ -426,9 +425,9 @@ func UpgradeMachineDeploymentInPlaceMutableFieldsAndWait(ctx context.Context, in g.Expect(machineSetAfterUpgrade.Spec.Template.Labels).To(HaveKeyWithValue("new-label", "new-label-value")) g.Expect(machineSetAfterUpgrade.Spec.Template.Annotations).To(HaveKeyWithValue("new-annotation", "new-annotation-value")) // Timeouts should be propagated. - g.Expect(machineSetAfterUpgrade.Spec.Template.Spec.NodeDrainTimeout).To(Equal(deployment.Spec.Template.Spec.NodeDrainTimeout)) - g.Expect(machineSetAfterUpgrade.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(deployment.Spec.Template.Spec.NodeDeletionTimeout)) - g.Expect(machineSetAfterUpgrade.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(deployment.Spec.Template.Spec.NodeVolumeDetachTimeout)) + g.Expect(machineSetAfterUpgrade.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(deployment.Spec.Template.Spec.NodeDrainTimeoutSeconds)) + g.Expect(machineSetAfterUpgrade.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(deployment.Spec.Template.Spec.NodeDeletionTimeoutSeconds)) + g.Expect(machineSetAfterUpgrade.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(deployment.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds)) log.Logf("Verify fields have been propagated to Machines") for _, m := range machinesAfterUpgrade { @@ -436,9 +435,9 @@ func UpgradeMachineDeploymentInPlaceMutableFieldsAndWait(ctx context.Context, in g.Expect(m.Labels).To(HaveKeyWithValue("new-label", "new-label-value")) g.Expect(m.Annotations).To(HaveKeyWithValue("new-annotation", "new-annotation-value")) // Timeouts should be propagated. - g.Expect(m.Spec.NodeDrainTimeout).To(Equal(deployment.Spec.Template.Spec.NodeDrainTimeout)) - g.Expect(m.Spec.NodeDeletionTimeout).To(Equal(deployment.Spec.Template.Spec.NodeDeletionTimeout)) - g.Expect(m.Spec.NodeVolumeDetachTimeout).To(Equal(deployment.Spec.Template.Spec.NodeVolumeDetachTimeout)) + g.Expect(m.Spec.NodeDrainTimeoutSeconds).To(Equal(deployment.Spec.Template.Spec.NodeDrainTimeoutSeconds)) + g.Expect(m.Spec.NodeDeletionTimeoutSeconds).To(Equal(deployment.Spec.Template.Spec.NodeDeletionTimeoutSeconds)) + g.Expect(m.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(deployment.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds)) } }, input.WaitForMachinesToBeUpgraded...).Should(Succeed()) } diff --git a/util/test/builder/builders.go b/util/test/builder/builders.go index c1110c30bb6f..d5e5a6b443ef 100644 --- a/util/test/builder/builders.go +++ b/util/test/builder/builders.go @@ -344,9 +344,9 @@ type ClusterClassBuilder struct { controlPlaneTemplate *unstructured.Unstructured controlPlaneInfrastructureMachineTemplate *unstructured.Unstructured controlPlaneMHC *clusterv1.MachineHealthCheckClass - controlPlaneNodeDrainTimeout *metav1.Duration - controlPlaneNodeVolumeDetachTimeout *metav1.Duration - controlPlaneNodeDeletionTimeout *metav1.Duration + controlPlaneNodeDrainTimeout *int32 + controlPlaneNodeVolumeDetachTimeout *int32 + controlPlaneNodeDeletionTimeout *int32 controlPlaneNamingStrategy *clusterv1.ControlPlaneClassNamingStrategy infraClusterNamingStrategy *clusterv1.InfrastructureClassNamingStrategy machineDeploymentClasses []clusterv1.MachineDeploymentClass @@ -404,20 +404,20 @@ func (c *ClusterClassBuilder) WithControlPlaneMachineHealthCheck(mhc *clusterv1. return c } -// WithControlPlaneNodeDrainTimeout adds a NodeDrainTimeout for the ControlPlane to the ClusterClassBuilder. -func (c *ClusterClassBuilder) WithControlPlaneNodeDrainTimeout(t *metav1.Duration) *ClusterClassBuilder { +// WithControlPlaneNodeDrainTimeout adds a NodeDrainTimeoutSeconds for the ControlPlane to the ClusterClassBuilder. +func (c *ClusterClassBuilder) WithControlPlaneNodeDrainTimeout(t *int32) *ClusterClassBuilder { c.controlPlaneNodeDrainTimeout = t return c } -// WithControlPlaneNodeVolumeDetachTimeout adds a NodeVolumeDetachTimeout for the ControlPlane to the ClusterClassBuilder. -func (c *ClusterClassBuilder) WithControlPlaneNodeVolumeDetachTimeout(t *metav1.Duration) *ClusterClassBuilder { +// WithControlPlaneNodeVolumeDetachTimeout adds a NodeVolumeDetachTimeoutSeconds for the ControlPlane to the ClusterClassBuilder. +func (c *ClusterClassBuilder) WithControlPlaneNodeVolumeDetachTimeout(t *int32) *ClusterClassBuilder { c.controlPlaneNodeVolumeDetachTimeout = t return c } -// WithControlPlaneNodeDeletionTimeout adds a NodeDeletionTimeout for the ControlPlane to the ClusterClassBuilder. -func (c *ClusterClassBuilder) WithControlPlaneNodeDeletionTimeout(t *metav1.Duration) *ClusterClassBuilder { +// WithControlPlaneNodeDeletionTimeout adds a NodeDeletionTimeoutSeconds for the ControlPlane to the ClusterClassBuilder. +func (c *ClusterClassBuilder) WithControlPlaneNodeDeletionTimeout(t *int32) *ClusterClassBuilder { c.controlPlaneNodeDeletionTimeout = t return c } @@ -518,13 +518,13 @@ func (c *ClusterClassBuilder) Build() *clusterv1.ClusterClass { obj.Spec.ControlPlane.MachineHealthCheck = c.controlPlaneMHC } if c.controlPlaneNodeDrainTimeout != nil { - obj.Spec.ControlPlane.NodeDrainTimeout = c.controlPlaneNodeDrainTimeout + obj.Spec.ControlPlane.NodeDrainTimeoutSeconds = c.controlPlaneNodeDrainTimeout } if c.controlPlaneNodeVolumeDetachTimeout != nil { - obj.Spec.ControlPlane.NodeVolumeDetachTimeout = c.controlPlaneNodeVolumeDetachTimeout + obj.Spec.ControlPlane.NodeVolumeDetachTimeoutSeconds = c.controlPlaneNodeVolumeDetachTimeout } if c.controlPlaneNodeDeletionTimeout != nil { - obj.Spec.ControlPlane.NodeDeletionTimeout = c.controlPlaneNodeDeletionTimeout + obj.Spec.ControlPlane.NodeDeletionTimeoutSeconds = c.controlPlaneNodeDeletionTimeout } if c.controlPlaneInfrastructureMachineTemplate != nil { obj.Spec.ControlPlane.MachineInfrastructure = &clusterv1.LocalObjectTemplate{ @@ -553,9 +553,9 @@ type MachineDeploymentClassBuilder struct { machineHealthCheckClass *clusterv1.MachineHealthCheckClass readinessGates []clusterv1.MachineReadinessGate failureDomain *string - nodeDrainTimeout *metav1.Duration - nodeVolumeDetachTimeout *metav1.Duration - nodeDeletionTimeout *metav1.Duration + nodeDrainTimeout *int32 + nodeVolumeDetachTimeout *int32 + nodeDeletionTimeout *int32 minReadySeconds *int32 strategy *clusterv1.MachineDeploymentStrategy namingStrategy *clusterv1.MachineDeploymentClassNamingStrategy @@ -610,20 +610,20 @@ func (m *MachineDeploymentClassBuilder) WithFailureDomain(f *string) *MachineDep return m } -// WithNodeDrainTimeout sets the NodeDrainTimeout for the MachineDeploymentClassBuilder. -func (m *MachineDeploymentClassBuilder) WithNodeDrainTimeout(t *metav1.Duration) *MachineDeploymentClassBuilder { +// WithNodeDrainTimeout sets the NodeDrainTimeoutSeconds for the MachineDeploymentClassBuilder. +func (m *MachineDeploymentClassBuilder) WithNodeDrainTimeout(t *int32) *MachineDeploymentClassBuilder { m.nodeDrainTimeout = t return m } -// WithNodeVolumeDetachTimeout sets the NodeVolumeDetachTimeout for the MachineDeploymentClassBuilder. -func (m *MachineDeploymentClassBuilder) WithNodeVolumeDetachTimeout(t *metav1.Duration) *MachineDeploymentClassBuilder { +// WithNodeVolumeDetachTimeout sets the NodeVolumeDetachTimeoutSeconds for the MachineDeploymentClassBuilder. +func (m *MachineDeploymentClassBuilder) WithNodeVolumeDetachTimeout(t *int32) *MachineDeploymentClassBuilder { m.nodeVolumeDetachTimeout = t return m } -// WithNodeDeletionTimeout sets the NodeDeletionTimeout for the MachineDeploymentClassBuilder. -func (m *MachineDeploymentClassBuilder) WithNodeDeletionTimeout(t *metav1.Duration) *MachineDeploymentClassBuilder { +// WithNodeDeletionTimeout sets the NodeDeletionTimeoutSeconds for the MachineDeploymentClassBuilder. +func (m *MachineDeploymentClassBuilder) WithNodeDeletionTimeout(t *int32) *MachineDeploymentClassBuilder { m.nodeDeletionTimeout = t return m } @@ -673,13 +673,13 @@ func (m *MachineDeploymentClassBuilder) Build() *clusterv1.MachineDeploymentClas obj.FailureDomain = m.failureDomain } if m.nodeDrainTimeout != nil { - obj.NodeDrainTimeout = m.nodeDrainTimeout + obj.NodeDrainTimeoutSeconds = m.nodeDrainTimeout } if m.nodeVolumeDetachTimeout != nil { - obj.NodeVolumeDetachTimeout = m.nodeVolumeDetachTimeout + obj.NodeVolumeDetachTimeoutSeconds = m.nodeVolumeDetachTimeout } if m.nodeDeletionTimeout != nil { - obj.NodeDeletionTimeout = m.nodeDeletionTimeout + obj.NodeDeletionTimeoutSeconds = m.nodeDeletionTimeout } if m.minReadySeconds != nil { obj.MinReadySeconds = m.minReadySeconds @@ -701,9 +701,9 @@ type MachinePoolClassBuilder struct { labels map[string]string annotations map[string]string failureDomains []string - nodeDrainTimeout *metav1.Duration - nodeVolumeDetachTimeout *metav1.Duration - nodeDeletionTimeout *metav1.Duration + nodeDrainTimeout *int32 + nodeVolumeDetachTimeout *int32 + nodeDeletionTimeout *int32 minReadySeconds *int32 namingStrategy *clusterv1.MachinePoolClassNamingStrategy } @@ -745,20 +745,20 @@ func (m *MachinePoolClassBuilder) WithFailureDomains(failureDomains ...string) * return m } -// WithNodeDrainTimeout sets the NodeDrainTimeout for the MachinePoolClassBuilder. -func (m *MachinePoolClassBuilder) WithNodeDrainTimeout(t *metav1.Duration) *MachinePoolClassBuilder { +// WithNodeDrainTimeout sets the NodeDrainTimeoutSeconds for the MachinePoolClassBuilder. +func (m *MachinePoolClassBuilder) WithNodeDrainTimeout(t *int32) *MachinePoolClassBuilder { m.nodeDrainTimeout = t return m } -// WithNodeVolumeDetachTimeout sets the NodeVolumeDetachTimeout for the MachinePoolClassBuilder. -func (m *MachinePoolClassBuilder) WithNodeVolumeDetachTimeout(t *metav1.Duration) *MachinePoolClassBuilder { +// WithNodeVolumeDetachTimeout sets the NodeVolumeDetachTimeoutSeconds for the MachinePoolClassBuilder. +func (m *MachinePoolClassBuilder) WithNodeVolumeDetachTimeout(t *int32) *MachinePoolClassBuilder { m.nodeVolumeDetachTimeout = t return m } -// WithNodeDeletionTimeout sets the NodeDeletionTimeout for the MachinePoolClassBuilder. -func (m *MachinePoolClassBuilder) WithNodeDeletionTimeout(t *metav1.Duration) *MachinePoolClassBuilder { +// WithNodeDeletionTimeout sets the NodeDeletionTimeoutSeconds for the MachinePoolClassBuilder. +func (m *MachinePoolClassBuilder) WithNodeDeletionTimeout(t *int32) *MachinePoolClassBuilder { m.nodeDeletionTimeout = t return m } @@ -796,13 +796,13 @@ func (m *MachinePoolClassBuilder) Build() *clusterv1.MachinePoolClass { obj.FailureDomains = m.failureDomains } if m.nodeDrainTimeout != nil { - obj.NodeDrainTimeout = m.nodeDrainTimeout + obj.NodeDrainTimeoutSeconds = m.nodeDrainTimeout } if m.nodeVolumeDetachTimeout != nil { - obj.NodeVolumeDetachTimeout = m.nodeVolumeDetachTimeout + obj.NodeVolumeDetachTimeoutSeconds = m.nodeVolumeDetachTimeout } if m.nodeDeletionTimeout != nil { - obj.NodeDeletionTimeout = m.nodeDeletionTimeout + obj.NodeDeletionTimeoutSeconds = m.nodeDeletionTimeout } if m.minReadySeconds != nil { obj.MinReadySeconds = m.minReadySeconds diff --git a/util/test/builder/zz_generated.deepcopy.go b/util/test/builder/zz_generated.deepcopy.go index 410c0fafb3b4..0c7e610ffd93 100644 --- a/util/test/builder/zz_generated.deepcopy.go +++ b/util/test/builder/zz_generated.deepcopy.go @@ -144,17 +144,17 @@ func (in *ClusterClassBuilder) DeepCopyInto(out *ClusterClassBuilder) { } if in.controlPlaneNodeDrainTimeout != nil { in, out := &in.controlPlaneNodeDrainTimeout, &out.controlPlaneNodeDrainTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.controlPlaneNodeVolumeDetachTimeout != nil { in, out := &in.controlPlaneNodeVolumeDetachTimeout, &out.controlPlaneNodeVolumeDetachTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.controlPlaneNodeDeletionTimeout != nil { in, out := &in.controlPlaneNodeDeletionTimeout, &out.controlPlaneNodeDeletionTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.controlPlaneNamingStrategy != nil { @@ -533,17 +533,17 @@ func (in *MachineDeploymentClassBuilder) DeepCopyInto(out *MachineDeploymentClas } if in.nodeDrainTimeout != nil { in, out := &in.nodeDrainTimeout, &out.nodeDrainTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.nodeVolumeDetachTimeout != nil { in, out := &in.nodeVolumeDetachTimeout, &out.nodeVolumeDetachTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.nodeDeletionTimeout != nil { in, out := &in.nodeDeletionTimeout, &out.nodeDeletionTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.minReadySeconds != nil { @@ -734,17 +734,17 @@ func (in *MachinePoolClassBuilder) DeepCopyInto(out *MachinePoolClassBuilder) { } if in.nodeDrainTimeout != nil { in, out := &in.nodeDrainTimeout, &out.nodeDrainTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.nodeVolumeDetachTimeout != nil { in, out := &in.nodeVolumeDetachTimeout, &out.nodeVolumeDetachTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.nodeDeletionTimeout != nil { in, out := &in.nodeDeletionTimeout, &out.nodeDeletionTimeout - *out = new(v1.Duration) + *out = new(int32) **out = **in } if in.minReadySeconds != nil {