diff --git a/api/bootstrap/kubeadm/v1beta1/conversion.go b/api/bootstrap/kubeadm/v1beta1/conversion.go index 158e5eaea16e..6641118f2e89 100644 --- a/api/bootstrap/kubeadm/v1beta1/conversion.go +++ b/api/bootstrap/kubeadm/v1beta1/conversion.go @@ -262,6 +262,9 @@ func (dst *KubeadmConfig) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.ConvertFrom(&src.Spec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec) + dropEmptyStringsKubeadmConfigStatus(&dst.Status) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -324,6 +327,8 @@ func (dst *KubeadmConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.Template.Spec.ConvertFrom(&src.Spec.Template.Spec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -494,3 +499,38 @@ func Convert_v1_Condition_To_v1beta1_Condition(in *metav1.Condition, out *cluste func Convert_v1beta1_Condition_To_v1_Condition(in *clusterv1beta1.Condition, out *metav1.Condition, s apimachineryconversion.Scope) error { return clusterv1beta1.Convert_v1beta1_Condition_To_v1_Condition(in, out, s) } + +func dropEmptyStringsKubeadmConfigSpec(dst *KubeadmConfigSpec) { + for i, u := range dst.Users { + dropEmptyString(&u.Gecos) + dropEmptyString(&u.Groups) + dropEmptyString(&u.HomeDir) + dropEmptyString(&u.Shell) + dropEmptyString(&u.Passwd) + dropEmptyString(&u.PrimaryGroup) + dropEmptyString(&u.Sudo) + dst.Users[i] = u + } + + if dst.DiskSetup != nil { + for i, p := range dst.DiskSetup.Partitions { + dropEmptyString(&p.TableType) + dst.DiskSetup.Partitions[i] = p + } + for i, f := range dst.DiskSetup.Filesystems { + dropEmptyString(&f.Partition) + dropEmptyString(&f.ReplaceFS) + dst.DiskSetup.Filesystems[i] = f + } + } +} + +func dropEmptyStringsKubeadmConfigStatus(dst *KubeadmConfigStatus) { + dropEmptyString(&dst.DataSecretName) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/api/bootstrap/kubeadm/v1beta1/conversion_test.go b/api/bootstrap/kubeadm/v1beta1/conversion_test.go index b706cdfb74c2..5ce4917ca713 100644 --- a/api/bootstrap/kubeadm/v1beta1/conversion_test.go +++ b/api/bootstrap/kubeadm/v1beta1/conversion_test.go @@ -133,6 +133,8 @@ func spokeKubeadmConfigSpec(in *KubeadmConfigSpec, c randfill.Continue) { // Drop UseExperimentalRetryJoin as we intentionally don't preserve it. in.UseExperimentalRetryJoin = false + + dropEmptyStringsKubeadmConfigSpec(in) } func spokeClusterConfiguration(in *ClusterConfiguration, c randfill.Continue) { @@ -178,4 +180,6 @@ func spokeKubeadmConfigStatus(in *KubeadmConfigStatus, c randfill.Continue) { in.V1Beta2 = nil } } + + dropEmptyStringsKubeadmConfigStatus(in) } diff --git a/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go b/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go index 93bed30a9062..17a8d2f0e8b7 100644 --- a/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go +++ b/api/bootstrap/kubeadm/v1beta1/zz_generated.conversion.go @@ -841,8 +841,28 @@ func Convert_v1beta2_Discovery_To_v1beta1_Discovery(in *v1beta2.Discovery, out * } func autoConvert_v1beta1_DiskSetup_To_v1beta2_DiskSetup(in *DiskSetup, out *v1beta2.DiskSetup, s conversion.Scope) error { - out.Partitions = *(*[]v1beta2.Partition)(unsafe.Pointer(&in.Partitions)) - out.Filesystems = *(*[]v1beta2.Filesystem)(unsafe.Pointer(&in.Filesystems)) + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = make([]v1beta2.Partition, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Partition_To_v1beta2_Partition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Partitions = nil + } + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]v1beta2.Filesystem, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Filesystem_To_v1beta2_Filesystem(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Filesystems = nil + } return nil } @@ -852,8 +872,28 @@ func Convert_v1beta1_DiskSetup_To_v1beta2_DiskSetup(in *DiskSetup, out *v1beta2. } func autoConvert_v1beta2_DiskSetup_To_v1beta1_DiskSetup(in *v1beta2.DiskSetup, out *DiskSetup, s conversion.Scope) error { - out.Partitions = *(*[]Partition)(unsafe.Pointer(&in.Partitions)) - out.Filesystems = *(*[]Filesystem)(unsafe.Pointer(&in.Filesystems)) + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = make([]Partition, len(*in)) + for i := range *in { + if err := Convert_v1beta2_Partition_To_v1beta1_Partition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Partitions = nil + } + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]Filesystem, len(*in)) + for i := range *in { + if err := Convert_v1beta2_Filesystem_To_v1beta1_Filesystem(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Filesystems = nil + } return nil } @@ -1090,9 +1130,13 @@ func autoConvert_v1beta1_Filesystem_To_v1beta2_Filesystem(in *Filesystem, out *v out.Device = in.Device out.Filesystem = in.Filesystem out.Label = in.Label - out.Partition = (*string)(unsafe.Pointer(in.Partition)) + if err := v1.Convert_Pointer_string_To_string(&in.Partition, &out.Partition, s); err != nil { + return err + } out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.ReplaceFS = (*string)(unsafe.Pointer(in.ReplaceFS)) + if err := v1.Convert_Pointer_string_To_string(&in.ReplaceFS, &out.ReplaceFS, s); err != nil { + return err + } out.ExtraOpts = *(*[]string)(unsafe.Pointer(&in.ExtraOpts)) return nil } @@ -1106,9 +1150,13 @@ func autoConvert_v1beta2_Filesystem_To_v1beta1_Filesystem(in *v1beta2.Filesystem out.Device = in.Device out.Filesystem = in.Filesystem out.Label = in.Label - out.Partition = (*string)(unsafe.Pointer(in.Partition)) + if err := v1.Convert_string_To_Pointer_string(&in.Partition, &out.Partition, s); err != nil { + return err + } out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.ReplaceFS = (*string)(unsafe.Pointer(in.ReplaceFS)) + if err := v1.Convert_string_To_Pointer_string(&in.ReplaceFS, &out.ReplaceFS, s); err != nil { + return err + } out.ExtraOpts = *(*[]string)(unsafe.Pointer(&in.ExtraOpts)) return nil } @@ -1569,12 +1617,30 @@ func autoConvert_v1beta1_KubeadmConfigSpec_To_v1beta2_KubeadmConfigSpec(in *Kube } else { out.Files = nil } - out.DiskSetup = (*v1beta2.DiskSetup)(unsafe.Pointer(in.DiskSetup)) + if in.DiskSetup != nil { + in, out := &in.DiskSetup, &out.DiskSetup + *out = new(v1beta2.DiskSetup) + if err := Convert_v1beta1_DiskSetup_To_v1beta2_DiskSetup(*in, *out, s); err != nil { + return err + } + } else { + out.DiskSetup = nil + } out.Mounts = *(*[]v1beta2.MountPoints)(unsafe.Pointer(&in.Mounts)) out.BootCommands = *(*[]string)(unsafe.Pointer(&in.BootCommands)) out.PreKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PreKubeadmCommands)) out.PostKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PostKubeadmCommands)) - out.Users = *(*[]v1beta2.User)(unsafe.Pointer(&in.Users)) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]v1beta2.User, len(*in)) + for i := range *in { + if err := Convert_v1beta1_User_To_v1beta2_User(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Users = nil + } out.NTP = (*v1beta2.NTP)(unsafe.Pointer(in.NTP)) out.Format = v1beta2.Format(in.Format) out.Verbosity = (*int32)(unsafe.Pointer(in.Verbosity)) @@ -1630,12 +1696,30 @@ func autoConvert_v1beta2_KubeadmConfigSpec_To_v1beta1_KubeadmConfigSpec(in *v1be } else { out.Files = nil } - out.DiskSetup = (*DiskSetup)(unsafe.Pointer(in.DiskSetup)) + if in.DiskSetup != nil { + in, out := &in.DiskSetup, &out.DiskSetup + *out = new(DiskSetup) + if err := Convert_v1beta2_DiskSetup_To_v1beta1_DiskSetup(*in, *out, s); err != nil { + return err + } + } else { + out.DiskSetup = nil + } out.Mounts = *(*[]MountPoints)(unsafe.Pointer(&in.Mounts)) out.BootCommands = *(*[]string)(unsafe.Pointer(&in.BootCommands)) out.PreKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PreKubeadmCommands)) out.PostKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PostKubeadmCommands)) - out.Users = *(*[]User)(unsafe.Pointer(&in.Users)) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]User, len(*in)) + for i := range *in { + if err := Convert_v1beta2_User_To_v1beta1_User(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Users = nil + } out.NTP = (*NTP)(unsafe.Pointer(in.NTP)) out.Format = Format(in.Format) out.Verbosity = (*int32)(unsafe.Pointer(in.Verbosity)) @@ -1658,7 +1742,9 @@ func Convert_v1beta2_KubeadmConfigSpec_To_v1beta1_KubeadmConfigSpec(in *v1beta2. func autoConvert_v1beta1_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in *KubeadmConfigStatus, out *v1beta2.KubeadmConfigStatus, s conversion.Scope) error { // WARNING: in.Ready requires manual conversion: does not exist in peer-type - out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) + if err := v1.Convert_Pointer_string_To_string(&in.DataSecretName, &out.DataSecretName, s); err != nil { + return err + } // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type out.ObservedGeneration = in.ObservedGeneration @@ -1690,7 +1776,9 @@ func autoConvert_v1beta2_KubeadmConfigStatus_To_v1beta1_KubeadmConfigStatus(in * out.Conditions = nil } // WARNING: in.Initialization requires manual conversion: does not exist in peer-type - out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) + if err := v1.Convert_string_To_Pointer_string(&in.DataSecretName, &out.DataSecretName, s); err != nil { + return err + } out.ObservedGeneration = in.ObservedGeneration // WARNING: in.Deprecated requires manual conversion: does not exist in peer-type return nil @@ -1890,7 +1978,9 @@ func autoConvert_v1beta1_Partition_To_v1beta2_Partition(in *Partition, out *v1be out.Device = in.Device out.Layout = in.Layout out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.TableType = (*string)(unsafe.Pointer(in.TableType)) + if err := v1.Convert_Pointer_string_To_string(&in.TableType, &out.TableType, s); err != nil { + return err + } return nil } @@ -1903,7 +1993,9 @@ func autoConvert_v1beta2_Partition_To_v1beta1_Partition(in *v1beta2.Partition, o out.Device = in.Device out.Layout = in.Layout out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.TableType = (*string)(unsafe.Pointer(in.TableType)) + if err := v1.Convert_string_To_Pointer_string(&in.TableType, &out.TableType, s); err != nil { + return err + } return nil } @@ -2002,16 +2094,30 @@ func Convert_v1beta2_SecretPasswdSource_To_v1beta1_SecretPasswdSource(in *v1beta func autoConvert_v1beta1_User_To_v1beta2_User(in *User, out *v1beta2.User, s conversion.Scope) error { out.Name = in.Name - out.Gecos = (*string)(unsafe.Pointer(in.Gecos)) - out.Groups = (*string)(unsafe.Pointer(in.Groups)) - out.HomeDir = (*string)(unsafe.Pointer(in.HomeDir)) + if err := v1.Convert_Pointer_string_To_string(&in.Gecos, &out.Gecos, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.HomeDir, &out.HomeDir, s); err != nil { + return err + } out.Inactive = (*bool)(unsafe.Pointer(in.Inactive)) - out.Shell = (*string)(unsafe.Pointer(in.Shell)) - out.Passwd = (*string)(unsafe.Pointer(in.Passwd)) + if err := v1.Convert_Pointer_string_To_string(&in.Shell, &out.Shell, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.Passwd, &out.Passwd, s); err != nil { + return err + } out.PasswdFrom = (*v1beta2.PasswdSource)(unsafe.Pointer(in.PasswdFrom)) - out.PrimaryGroup = (*string)(unsafe.Pointer(in.PrimaryGroup)) + if err := v1.Convert_Pointer_string_To_string(&in.PrimaryGroup, &out.PrimaryGroup, s); err != nil { + return err + } out.LockPassword = (*bool)(unsafe.Pointer(in.LockPassword)) - out.Sudo = (*string)(unsafe.Pointer(in.Sudo)) + if err := v1.Convert_Pointer_string_To_string(&in.Sudo, &out.Sudo, s); err != nil { + return err + } out.SSHAuthorizedKeys = *(*[]string)(unsafe.Pointer(&in.SSHAuthorizedKeys)) return nil } @@ -2023,16 +2129,30 @@ func Convert_v1beta1_User_To_v1beta2_User(in *User, out *v1beta2.User, s convers func autoConvert_v1beta2_User_To_v1beta1_User(in *v1beta2.User, out *User, s conversion.Scope) error { out.Name = in.Name - out.Gecos = (*string)(unsafe.Pointer(in.Gecos)) - out.Groups = (*string)(unsafe.Pointer(in.Groups)) - out.HomeDir = (*string)(unsafe.Pointer(in.HomeDir)) + if err := v1.Convert_string_To_Pointer_string(&in.Gecos, &out.Gecos, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.HomeDir, &out.HomeDir, s); err != nil { + return err + } out.Inactive = (*bool)(unsafe.Pointer(in.Inactive)) - out.Shell = (*string)(unsafe.Pointer(in.Shell)) - out.Passwd = (*string)(unsafe.Pointer(in.Passwd)) + if err := v1.Convert_string_To_Pointer_string(&in.Shell, &out.Shell, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.Passwd, &out.Passwd, s); err != nil { + return err + } out.PasswdFrom = (*PasswdSource)(unsafe.Pointer(in.PasswdFrom)) - out.PrimaryGroup = (*string)(unsafe.Pointer(in.PrimaryGroup)) + if err := v1.Convert_string_To_Pointer_string(&in.PrimaryGroup, &out.PrimaryGroup, s); err != nil { + return err + } out.LockPassword = (*bool)(unsafe.Pointer(in.LockPassword)) - out.Sudo = (*string)(unsafe.Pointer(in.Sudo)) + if err := v1.Convert_string_To_Pointer_string(&in.Sudo, &out.Sudo, s); err != nil { + return err + } out.SSHAuthorizedKeys = *(*[]string)(unsafe.Pointer(&in.SSHAuthorizedKeys)) return nil } diff --git a/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go b/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go index de67bfd79ad8..45f0320ae81a 100644 --- a/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go +++ b/api/bootstrap/kubeadm/v1beta2/kubeadmconfig_types.go @@ -274,7 +274,7 @@ func (c *KubeadmConfigSpec) validateUsers(pathPrefix *field.Path) field.ErrorLis for i := range c.Users { user := c.Users[i] - if user.Passwd != nil && user.PasswdFrom != nil { + if user.Passwd != "" && user.PasswdFrom != nil { allErrs = append( allErrs, field.Invalid( @@ -383,12 +383,12 @@ func (c *KubeadmConfigSpec) validateIgnition(pathPrefix *field.Path) field.Error } for i, partition := range c.DiskSetup.Partitions { - if partition.TableType != nil && *partition.TableType != "gpt" { + if partition.TableType != "" && partition.TableType != "gpt" { allErrs = append( allErrs, field.Invalid( pathPrefix.Child("diskSetup", "partitions").Index(i).Child("tableType"), - *partition.TableType, + partition.TableType, fmt.Sprintf( "only partition type %q is supported when spec.format is set to %q", "gpt", @@ -400,7 +400,7 @@ func (c *KubeadmConfigSpec) validateIgnition(pathPrefix *field.Path) field.Error } for i, fs := range c.DiskSetup.Filesystems { - if fs.ReplaceFS != nil { + if fs.ReplaceFS != "" { allErrs = append( allErrs, field.Forbidden( @@ -410,7 +410,7 @@ func (c *KubeadmConfigSpec) validateIgnition(pathPrefix *field.Path) field.Error ) } - if fs.Partition != nil { + if fs.Partition != "" { allErrs = append( allErrs, field.Forbidden( @@ -468,7 +468,7 @@ type KubeadmConfigStatus struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 - DataSecretName *string `json:"dataSecretName,omitempty"` + DataSecretName string `json:"dataSecretName,omitempty"` // observedGeneration is the latest generation observed by the controller. // +optional @@ -712,19 +712,19 @@ type User struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Gecos *string `json:"gecos,omitempty"` + Gecos string `json:"gecos,omitempty"` // groups specifies the additional groups for the user // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Groups *string `json:"groups,omitempty"` + Groups string `json:"groups,omitempty"` // homeDir specifies the home directory to use for the user // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - HomeDir *string `json:"homeDir,omitempty"` + HomeDir string `json:"homeDir,omitempty"` // inactive specifies whether to mark the user as inactive // +optional @@ -734,13 +734,13 @@ type User struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Shell *string `json:"shell,omitempty"` + Shell string `json:"shell,omitempty"` // passwd specifies a hashed password for the user // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Passwd *string `json:"passwd,omitempty"` + Passwd string `json:"passwd,omitempty"` // passwdFrom is a referenced source of passwd to populate the passwd. // +optional @@ -750,7 +750,7 @@ type User struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - PrimaryGroup *string `json:"primaryGroup,omitempty"` + PrimaryGroup string `json:"primaryGroup,omitempty"` // lockPassword specifies if password login should be disabled // +optional @@ -760,7 +760,7 @@ type User struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Sudo *string `json:"sudo,omitempty"` + Sudo string `json:"sudo,omitempty"` // sshAuthorizedKeys specifies a list of ssh authorized keys for the user // +optional @@ -818,7 +818,7 @@ type Partition struct { // 'gpt': setups a GPT partition table // +optional // +kubebuilder:validation:Enum=mbr;gpt - TableType *string `json:"tableType,omitempty"` + TableType string `json:"tableType,omitempty"` } // Filesystem defines the file systems to be created. @@ -845,7 +845,7 @@ type Filesystem struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 - Partition *string `json:"partition,omitempty"` + Partition string `json:"partition,omitempty"` // overwrite defines whether or not to overwrite any existing filesystem. // If true, any pre-existing file system will be destroyed. Use with Caution. @@ -857,7 +857,7 @@ type Filesystem struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 - ReplaceFS *string `json:"replaceFS,omitempty"` + ReplaceFS string `json:"replaceFS,omitempty"` // extraOpts defined extra options to add to the command for creating the file system. // +optional diff --git a/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go b/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go index eac71e826668..d9f391299523 100644 --- a/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go +++ b/api/bootstrap/kubeadm/v1beta2/zz_generated.deepcopy.go @@ -454,21 +454,11 @@ func (in *FileSource) DeepCopy() *FileSource { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Filesystem) DeepCopyInto(out *Filesystem) { *out = *in - if in.Partition != nil { - in, out := &in.Partition, &out.Partition - *out = new(string) - **out = **in - } if in.Overwrite != nil { in, out := &in.Overwrite, &out.Overwrite *out = new(bool) **out = **in } - if in.ReplaceFS != nil { - in, out := &in.ReplaceFS, &out.ReplaceFS - *out = new(string) - **out = **in - } if in.ExtraOpts != nil { in, out := &in.ExtraOpts, &out.ExtraOpts *out = make([]string, len(*in)) @@ -954,11 +944,6 @@ func (in *KubeadmConfigStatus) DeepCopyInto(out *KubeadmConfigStatus) { *out = new(KubeadmConfigInitializationStatus) (*in).DeepCopyInto(*out) } - if in.DataSecretName != nil { - in, out := &in.DataSecretName, &out.DataSecretName - *out = new(string) - **out = **in - } if in.Deprecated != nil { in, out := &in.Deprecated, &out.Deprecated *out = new(KubeadmConfigDeprecatedStatus) @@ -1216,11 +1201,6 @@ func (in *Partition) DeepCopyInto(out *Partition) { *out = new(bool) **out = **in } - if in.TableType != nil { - in, out := &in.TableType, &out.TableType - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Partition. @@ -1342,56 +1322,21 @@ func (in *Timeouts) DeepCopy() *Timeouts { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *User) DeepCopyInto(out *User) { *out = *in - if in.Gecos != nil { - in, out := &in.Gecos, &out.Gecos - *out = new(string) - **out = **in - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = new(string) - **out = **in - } - if in.HomeDir != nil { - in, out := &in.HomeDir, &out.HomeDir - *out = new(string) - **out = **in - } if in.Inactive != nil { in, out := &in.Inactive, &out.Inactive *out = new(bool) **out = **in } - if in.Shell != nil { - in, out := &in.Shell, &out.Shell - *out = new(string) - **out = **in - } - if in.Passwd != nil { - in, out := &in.Passwd, &out.Passwd - *out = new(string) - **out = **in - } if in.PasswdFrom != nil { in, out := &in.PasswdFrom, &out.PasswdFrom *out = new(PasswdSource) **out = **in } - if in.PrimaryGroup != nil { - in, out := &in.PrimaryGroup, &out.PrimaryGroup - *out = new(string) - **out = **in - } if in.LockPassword != nil { in, out := &in.LockPassword, &out.LockPassword *out = new(bool) **out = **in } - if in.Sudo != nil { - in, out := &in.Sudo, &out.Sudo - *out = new(string) - **out = **in - } if in.SSHAuthorizedKeys != nil { in, out := &in.SSHAuthorizedKeys, &out.SSHAuthorizedKeys *out = make([]string, len(*in)) diff --git a/api/controlplane/kubeadm/v1beta1/conversion.go b/api/controlplane/kubeadm/v1beta1/conversion.go index 085f3cd600c8..3db02c793605 100644 --- a/api/controlplane/kubeadm/v1beta1/conversion.go +++ b/api/controlplane/kubeadm/v1beta1/conversion.go @@ -103,6 +103,9 @@ func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.KubeadmConfigSpec.ConvertFrom(&src.Spec.KubeadmConfigSpec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.KubeadmConfigSpec) + dropEmptyStringsKubeadmControlPlaneStatus(&dst.Status) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -144,6 +147,8 @@ func (dst *KubeadmControlPlaneTemplate) ConvertFrom(srcRaw conversion.Hub) error // Convert timeouts moved from one struct to another. dst.Spec.Template.Spec.KubeadmConfigSpec.ConvertFrom(&src.Spec.Template.Spec.KubeadmConfigSpec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.Template.Spec.KubeadmConfigSpec) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -356,3 +361,38 @@ func convertToObjectReference(ref *clusterv1.ContractVersionedObjectReference, n Name: ref.Name, }, nil } + +func dropEmptyStringsKubeadmConfigSpec(dst *bootstrapv1beta1.KubeadmConfigSpec) { + for i, u := range dst.Users { + dropEmptyString(&u.Gecos) + dropEmptyString(&u.Groups) + dropEmptyString(&u.HomeDir) + dropEmptyString(&u.Shell) + dropEmptyString(&u.Passwd) + dropEmptyString(&u.PrimaryGroup) + dropEmptyString(&u.Sudo) + dst.Users[i] = u + } + + if dst.DiskSetup != nil { + for i, p := range dst.DiskSetup.Partitions { + dropEmptyString(&p.TableType) + dst.DiskSetup.Partitions[i] = p + } + for i, f := range dst.DiskSetup.Filesystems { + dropEmptyString(&f.Partition) + dropEmptyString(&f.ReplaceFS) + dst.DiskSetup.Filesystems[i] = f + } + } +} + +func dropEmptyStringsKubeadmControlPlaneStatus(dst *KubeadmControlPlaneStatus) { + dropEmptyString(&dst.Version) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/api/controlplane/kubeadm/v1beta1/conversion_test.go b/api/controlplane/kubeadm/v1beta1/conversion_test.go index f8d7873cdfd1..3252c3118d64 100644 --- a/api/controlplane/kubeadm/v1beta1/conversion_test.go +++ b/api/controlplane/kubeadm/v1beta1/conversion_test.go @@ -189,6 +189,8 @@ func spokeKubeadmControlPlaneStatus(in *KubeadmControlPlaneStatus, c randfill.Co // Make sure ready is consistent with ready replicas, so we can rebuild the info after the round trip. in.Ready = in.ReadyReplicas > 0 + + dropEmptyStringsKubeadmControlPlaneStatus(in) } func spokeAPIServer(in *bootstrapv1beta1.APIServer, c randfill.Continue) { @@ -212,6 +214,8 @@ func spokeKubeadmConfigSpec(in *bootstrapv1beta1.KubeadmConfigSpec, c randfill.C // Drop UseExperimentalRetryJoin as we intentionally don't preserve it. in.UseExperimentalRetryJoin = false + + dropEmptyStringsKubeadmConfigSpec(in) } func spokeClusterConfiguration(in *bootstrapv1beta1.ClusterConfiguration, c randfill.Continue) { diff --git a/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go b/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go index f39013a1a2a7..05344e2f8827 100644 --- a/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go +++ b/api/controlplane/kubeadm/v1beta1/zz_generated.conversion.go @@ -423,7 +423,9 @@ func autoConvert_v1beta1_KubeadmControlPlaneStatus_To_v1beta2_KubeadmControlPlan if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) + if err := v1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil { + return err + } // WARNING: in.UpdatedReplicas requires manual conversion: does not exist in peer-type if err := v1.Convert_int32_To_Pointer_int32(&in.ReadyReplicas, &out.ReadyReplicas, s); err != nil { return err @@ -472,7 +474,9 @@ func autoConvert_v1beta2_KubeadmControlPlaneStatus_To_v1beta1_KubeadmControlPlan } // WARNING: in.AvailableReplicas requires manual conversion: does not exist in peer-type // WARNING: in.UpToDateReplicas requires manual conversion: does not exist in peer-type - out.Version = (*string)(unsafe.Pointer(in.Version)) + if err := v1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil { + return err + } out.ObservedGeneration = in.ObservedGeneration out.LastRemediation = (*LastRemediationStatus)(unsafe.Pointer(in.LastRemediation)) // WARNING: in.Deprecated requires manual conversion: does not exist in peer-type diff --git a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go index 215df63758b1..fdd8bfed88b2 100644 --- a/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go +++ b/api/controlplane/kubeadm/v1beta2/kubeadm_control_plane_types.go @@ -671,7 +671,7 @@ type KubeadmControlPlaneStatus struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Version *string `json:"version,omitempty"` + Version string `json:"version,omitempty"` // observedGeneration is the latest generation observed by the controller. // +optional @@ -732,7 +732,7 @@ type KubeadmControlPlaneV1Beta1DeprecatedStatus struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=10240 - FailureMessage *string `json:"failureMessage,omitempty"` + FailureMessage *string `json:"failureMessage,omitempty"` //nolint:kubeapilinter // field will be removed when v1beta1 is removed // updatedReplicas is the total number of non-terminated machines targeted by this control plane // that have the desired template spec. diff --git a/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go b/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go index d91dab027411..f4d1c52306e2 100644 --- a/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go +++ b/api/controlplane/kubeadm/v1beta2/zz_generated.deepcopy.go @@ -244,11 +244,6 @@ func (in *KubeadmControlPlaneStatus) DeepCopyInto(out *KubeadmControlPlaneStatus *out = new(int32) **out = **in } - if in.Version != nil { - in, out := &in.Version, &out.Version - *out = new(string) - **out = **in - } if in.LastRemediation != nil { in, out := &in.LastRemediation, &out.LastRemediation *out = new(LastRemediationStatus) diff --git a/api/core/v1beta1/conversion.go b/api/core/v1beta1/conversion.go index 5f4eb6e750d1..edbb3b624554 100644 --- a/api/core/v1beta1/conversion.go +++ b/api/core/v1beta1/conversion.go @@ -125,6 +125,8 @@ func (dst *Cluster) ConvertFrom(srcRaw conversion.Hub) error { } } + dropEmptyStringsCluster(dst) + return utilconversion.MarshalData(src, dst) } @@ -355,6 +357,7 @@ func (dst *ClusterClass) ConvertFrom(srcRaw conversion.Hub) error { md.MachineHealthCheck.RemediationTemplate.Namespace = dst.Namespace } } + dropEmptyStringsClusterClass(dst) return utilconversion.MarshalData(src, dst) } @@ -408,6 +411,8 @@ func (dst *Machine) ConvertFrom(srcRaw conversion.Hub) error { return err } + dropEmptyStringsMachineSpec(&dst.Spec) + return utilconversion.MarshalData(src, dst) } @@ -444,6 +449,7 @@ func (dst *MachineSet) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = ptr.Deref(src.Spec.Template.Spec.MinReadySeconds, 0) + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) return nil } @@ -485,6 +491,8 @@ func (dst *MachineDeployment) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = src.Spec.Template.Spec.MinReadySeconds + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + return utilconversion.MarshalData(src, dst) } @@ -503,6 +511,8 @@ func (dst *MachineHealthCheck) ConvertFrom(srcRaw conversion.Hub) error { if dst.Spec.RemediationTemplate != nil { dst.Spec.RemediationTemplate.Namespace = src.Namespace } + + dropEmptyStringsMachineHealthCheck(dst) return nil } @@ -554,6 +564,8 @@ func (dst *MachinePool) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = src.Spec.Template.Spec.MinReadySeconds + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + return utilconversion.MarshalData(src, dst) } @@ -606,8 +618,12 @@ func Convert_v1beta2_ClusterClassSpec_To_v1beta1_ClusterClassSpec(in *clusterv1. } if in.Infrastructure.NamingStrategy != nil { + var template *string + if in.Infrastructure.NamingStrategy.Template != "" { + template = ptr.To(in.Infrastructure.NamingStrategy.Template) + } out.InfrastructureNamingStrategy = &InfrastructureNamingStrategy{ - Template: in.Infrastructure.NamingStrategy.Template, + Template: template, } } return nil @@ -628,7 +644,7 @@ func Convert_v1beta1_ClusterClassSpec_To_v1beta2_ClusterClassSpec(in *ClusterCla if in.InfrastructureNamingStrategy != nil { out.Infrastructure.NamingStrategy = &clusterv1.InfrastructureClassNamingStrategy{ - Template: in.InfrastructureNamingStrategy.Template, + Template: ptr.Deref(in.InfrastructureNamingStrategy.Template, ""), } } return nil @@ -1496,8 +1512,8 @@ func Convert_v1beta1_ExternalPatchDefinition_To_v1beta2_ExternalPatchDefinition( return err } - out.GeneratePatchesExtension = in.GenerateExtension - out.ValidateTopologyExtension = in.ValidateExtension + out.GeneratePatchesExtension = ptr.Deref(in.GenerateExtension, "") + out.ValidateTopologyExtension = ptr.Deref(in.ValidateExtension, "") return nil } @@ -1506,8 +1522,12 @@ func Convert_v1beta2_ExternalPatchDefinition_To_v1beta1_ExternalPatchDefinition( return err } - out.GenerateExtension = in.GeneratePatchesExtension - out.ValidateExtension = in.ValidateTopologyExtension + if in.GeneratePatchesExtension != "" { + out.GenerateExtension = ptr.To(in.GeneratePatchesExtension) + } + if in.ValidateTopologyExtension != "" { + out.ValidateExtension = ptr.To(in.ValidateTopologyExtension) + } return nil } @@ -1646,3 +1666,91 @@ func Convert_v1beta1_JSONSchemaProps_To_v1beta2_JSONSchemaProps(in *JSONSchemaPr // By implementing this func, autoConvert_v1beta1_JSONSchemaProps_To_v1beta2_JSONSchemaProps is generated properly. return autoConvert_v1beta1_JSONSchemaProps_To_v1beta2_JSONSchemaProps(in, out, s) } + +func dropEmptyStringsCluster(dst *Cluster) { + if dst.Spec.Topology != nil { + if dst.Spec.Topology.ControlPlane.MachineHealthCheck != nil { + dropEmptyString(&dst.Spec.Topology.ControlPlane.MachineHealthCheck.UnhealthyRange) + } + + if dst.Spec.Topology.Workers != nil { + for i, md := range dst.Spec.Topology.Workers.MachineDeployments { + dropEmptyString(&md.FailureDomain) + if md.MachineHealthCheck != nil { + dropEmptyString(&md.MachineHealthCheck.UnhealthyRange) + } + dst.Spec.Topology.Workers.MachineDeployments[i] = md + } + } + } +} + +func dropEmptyStringsClusterClass(dst *ClusterClass) { + if dst.Spec.InfrastructureNamingStrategy != nil { + dropEmptyString(&dst.Spec.InfrastructureNamingStrategy.Template) + } + + if dst.Spec.ControlPlane.NamingStrategy != nil { + dropEmptyString(&dst.Spec.ControlPlane.NamingStrategy.Template) + } + if dst.Spec.ControlPlane.MachineHealthCheck != nil { + dropEmptyString(&dst.Spec.ControlPlane.MachineHealthCheck.UnhealthyRange) + } + + for i, md := range dst.Spec.Workers.MachineDeployments { + if md.NamingStrategy != nil { + dropEmptyString(&md.NamingStrategy.Template) + } + dropEmptyString(&md.FailureDomain) + if md.MachineHealthCheck != nil { + dropEmptyString(&md.MachineHealthCheck.UnhealthyRange) + } + dst.Spec.Workers.MachineDeployments[i] = md + } + + for i, mp := range dst.Spec.Workers.MachinePools { + if mp.NamingStrategy != nil { + dropEmptyString(&mp.NamingStrategy.Template) + } + + dst.Spec.Workers.MachinePools[i] = mp + } + + for i, p := range dst.Spec.Patches { + dropEmptyString(&p.EnabledIf) + if p.External != nil { + dropEmptyString(&p.External.GenerateExtension) + dropEmptyString(&p.External.ValidateExtension) + dropEmptyString(&p.External.DiscoverVariablesExtension) + } + + for j, d := range p.Definitions { + for k, jp := range d.JSONPatches { + if jp.ValueFrom != nil { + dropEmptyString(&jp.ValueFrom.Variable) + dropEmptyString(&jp.ValueFrom.Template) + } + d.JSONPatches[k] = jp + } + p.Definitions[j] = d + } + + dst.Spec.Patches[i] = p + } +} + +func dropEmptyStringsMachineSpec(spec *MachineSpec) { + dropEmptyString(&spec.Version) + dropEmptyString(&spec.ProviderID) + dropEmptyString(&spec.FailureDomain) +} + +func dropEmptyStringsMachineHealthCheck(dst *MachineHealthCheck) { + dropEmptyString(&dst.Spec.UnhealthyRange) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/api/core/v1beta1/conversion_test.go b/api/core/v1beta1/conversion_test.go index a1c7c2ebb591..8e311faf0661 100644 --- a/api/core/v1beta1/conversion_test.go +++ b/api/core/v1beta1/conversion_test.go @@ -199,6 +199,8 @@ func spokeCluster(in *Cluster, c randfill.Continue) { in.Spec.InfrastructureRef.ResourceVersion = "" in.Spec.InfrastructureRef.FieldPath = "" } + + dropEmptyStringsCluster(in) } func spokeClusterTopology(in *Topology, c randfill.Continue) { @@ -319,6 +321,8 @@ func spokeClusterClass(in *ClusterClass, c randfill.Continue) { c.FillNoCustom(in) in.Namespace = "foo" + + dropEmptyStringsClusterClass(in) } func spokeClusterClassStatus(in *ClusterClassStatus, c randfill.Continue) { @@ -447,6 +451,8 @@ func spokeMachine(in *Machine, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec) } func fillMachineSpec(spec *MachineSpec, c randfill.Continue, namespace string) { @@ -532,6 +538,8 @@ func spokeMachineSet(in *MachineSet, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func spokeMachineSetStatus(in *MachineSetStatus, c randfill.Continue) { @@ -575,6 +583,8 @@ func spokeMachineDeployment(in *MachineDeployment, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func spokeMachineDeploymentSpec(in *MachineDeploymentSpec, c randfill.Continue) { @@ -622,6 +632,8 @@ func spokeMachineHealthCheck(in *MachineHealthCheck, c randfill.Continue) { c.FillNoCustom(in) in.Namespace = "foo" + + dropEmptyStringsMachineHealthCheck(in) } func spokeMachineHealthCheckStatus(in *MachineHealthCheckStatus, c randfill.Continue) { @@ -685,6 +697,8 @@ func spokeMachinePool(in *MachinePool, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func spokeMachinePoolStatus(in *MachinePoolStatus, c randfill.Continue) { diff --git a/api/core/v1beta1/zz_generated.conversion.go b/api/core/v1beta1/zz_generated.conversion.go index a77e64d27af6..34675bf583f5 100644 --- a/api/core/v1beta1/zz_generated.conversion.go +++ b/api/core/v1beta1/zz_generated.conversion.go @@ -1169,7 +1169,9 @@ func Convert_v1beta2_ClusterClassList_To_v1beta1_ClusterClassList(in *v1beta2.Cl func autoConvert_v1beta1_ClusterClassPatch_To_v1beta2_ClusterClassPatch(in *ClusterClassPatch, out *v1beta2.ClusterClassPatch, s conversion.Scope) error { out.Name = in.Name out.Description = in.Description - out.EnabledIf = (*string)(unsafe.Pointer(in.EnabledIf)) + if err := v1.Convert_Pointer_string_To_string(&in.EnabledIf, &out.EnabledIf, s); err != nil { + return err + } if in.Definitions != nil { in, out := &in.Definitions, &out.Definitions *out = make([]v1beta2.PatchDefinition, len(*in)) @@ -1201,7 +1203,9 @@ func Convert_v1beta1_ClusterClassPatch_To_v1beta2_ClusterClassPatch(in *ClusterC func autoConvert_v1beta2_ClusterClassPatch_To_v1beta1_ClusterClassPatch(in *v1beta2.ClusterClassPatch, out *ClusterClassPatch, s conversion.Scope) error { out.Name = in.Name out.Description = in.Description - out.EnabledIf = (*string)(unsafe.Pointer(in.EnabledIf)) + if err := v1.Convert_string_To_Pointer_string(&in.EnabledIf, &out.EnabledIf, s); err != nil { + return err + } if in.Definitions != nil { in, out := &in.Definitions, &out.Definitions *out = make([]PatchDefinition, len(*in)) @@ -1769,7 +1773,15 @@ func autoConvert_v1beta1_ControlPlaneClass_To_v1beta2_ControlPlaneClass(in *Cont } else { out.MachineHealthCheck = nil } - out.NamingStrategy = (*v1beta2.ControlPlaneClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(v1beta2.ControlPlaneClassNamingStrategy) + if err := Convert_v1beta1_ControlPlaneClassNamingStrategy_To_v1beta2_ControlPlaneClassNamingStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.NamingStrategy = nil + } // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type @@ -1800,7 +1812,15 @@ func autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *v1be } else { out.MachineHealthCheck = nil } - out.NamingStrategy = (*ControlPlaneClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(ControlPlaneClassNamingStrategy) + if err := Convert_v1beta2_ControlPlaneClassNamingStrategy_To_v1beta1_ControlPlaneClassNamingStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.NamingStrategy = nil + } // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type @@ -1809,7 +1829,9 @@ func autoConvert_v1beta2_ControlPlaneClass_To_v1beta1_ControlPlaneClass(in *v1be } func autoConvert_v1beta1_ControlPlaneClassNamingStrategy_To_v1beta2_ControlPlaneClassNamingStrategy(in *ControlPlaneClassNamingStrategy, out *v1beta2.ControlPlaneClassNamingStrategy, s conversion.Scope) error { - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_Pointer_string_To_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -1819,7 +1841,9 @@ func Convert_v1beta1_ControlPlaneClassNamingStrategy_To_v1beta2_ControlPlaneClas } func autoConvert_v1beta2_ControlPlaneClassNamingStrategy_To_v1beta1_ControlPlaneClassNamingStrategy(in *v1beta2.ControlPlaneClassNamingStrategy, out *ControlPlaneClassNamingStrategy, s conversion.Scope) error { - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_string_To_Pointer_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -1931,7 +1955,9 @@ func Convert_v1beta2_ControlPlaneVariables_To_v1beta1_ControlPlaneVariables(in * func autoConvert_v1beta1_ExternalPatchDefinition_To_v1beta2_ExternalPatchDefinition(in *ExternalPatchDefinition, out *v1beta2.ExternalPatchDefinition, s conversion.Scope) error { // WARNING: in.GenerateExtension requires manual conversion: does not exist in peer-type // WARNING: in.ValidateExtension requires manual conversion: does not exist in peer-type - out.DiscoverVariablesExtension = (*string)(unsafe.Pointer(in.DiscoverVariablesExtension)) + if err := v1.Convert_Pointer_string_To_string(&in.DiscoverVariablesExtension, &out.DiscoverVariablesExtension, s); err != nil { + return err + } out.Settings = *(*map[string]string)(unsafe.Pointer(&in.Settings)) return nil } @@ -1939,7 +1965,9 @@ func autoConvert_v1beta1_ExternalPatchDefinition_To_v1beta2_ExternalPatchDefinit func autoConvert_v1beta2_ExternalPatchDefinition_To_v1beta1_ExternalPatchDefinition(in *v1beta2.ExternalPatchDefinition, out *ExternalPatchDefinition, s conversion.Scope) error { // WARNING: in.GeneratePatchesExtension requires manual conversion: does not exist in peer-type // WARNING: in.ValidateTopologyExtension requires manual conversion: does not exist in peer-type - out.DiscoverVariablesExtension = (*string)(unsafe.Pointer(in.DiscoverVariablesExtension)) + if err := v1.Convert_string_To_Pointer_string(&in.DiscoverVariablesExtension, &out.DiscoverVariablesExtension, s); err != nil { + return err + } out.Settings = *(*map[string]string)(unsafe.Pointer(&in.Settings)) return nil } @@ -1948,7 +1976,15 @@ func autoConvert_v1beta1_JSONPatch_To_v1beta2_JSONPatch(in *JSONPatch, out *v1be out.Op = in.Op out.Path = in.Path out.Value = (*apiextensionsv1.JSON)(unsafe.Pointer(in.Value)) - out.ValueFrom = (*v1beta2.JSONPatchValue)(unsafe.Pointer(in.ValueFrom)) + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(v1beta2.JSONPatchValue) + if err := Convert_v1beta1_JSONPatchValue_To_v1beta2_JSONPatchValue(*in, *out, s); err != nil { + return err + } + } else { + out.ValueFrom = nil + } return nil } @@ -1961,7 +1997,15 @@ func autoConvert_v1beta2_JSONPatch_To_v1beta1_JSONPatch(in *v1beta2.JSONPatch, o out.Op = in.Op out.Path = in.Path out.Value = (*apiextensionsv1.JSON)(unsafe.Pointer(in.Value)) - out.ValueFrom = (*JSONPatchValue)(unsafe.Pointer(in.ValueFrom)) + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(JSONPatchValue) + if err := Convert_v1beta2_JSONPatchValue_To_v1beta1_JSONPatchValue(*in, *out, s); err != nil { + return err + } + } else { + out.ValueFrom = nil + } return nil } @@ -1971,8 +2015,12 @@ func Convert_v1beta2_JSONPatch_To_v1beta1_JSONPatch(in *v1beta2.JSONPatch, out * } func autoConvert_v1beta1_JSONPatchValue_To_v1beta2_JSONPatchValue(in *JSONPatchValue, out *v1beta2.JSONPatchValue, s conversion.Scope) error { - out.Variable = (*string)(unsafe.Pointer(in.Variable)) - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_Pointer_string_To_string(&in.Variable, &out.Variable, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -1982,8 +2030,12 @@ func Convert_v1beta1_JSONPatchValue_To_v1beta2_JSONPatchValue(in *JSONPatchValue } func autoConvert_v1beta2_JSONPatchValue_To_v1beta1_JSONPatchValue(in *v1beta2.JSONPatchValue, out *JSONPatchValue, s conversion.Scope) error { - out.Variable = (*string)(unsafe.Pointer(in.Variable)) - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_string_To_Pointer_string(&in.Variable, &out.Variable, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -2339,8 +2391,18 @@ func autoConvert_v1beta1_MachineDeploymentClass_To_v1beta2_MachineDeploymentClas } else { out.MachineHealthCheck = nil } - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) - out.NamingStrategy = (*v1beta2.MachineDeploymentClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) + if err := v1.Convert_Pointer_string_To_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(v1beta2.MachineDeploymentClassNamingStrategy) + if err := Convert_v1beta1_MachineDeploymentClassNamingStrategy_To_v1beta2_MachineDeploymentClassNamingStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.NamingStrategy = nil + } // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type @@ -2364,8 +2426,18 @@ func autoConvert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClas } else { out.MachineHealthCheck = nil } - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) - out.NamingStrategy = (*MachineDeploymentClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) + if err := v1.Convert_string_To_Pointer_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(MachineDeploymentClassNamingStrategy) + if err := Convert_v1beta2_MachineDeploymentClassNamingStrategy_To_v1beta1_MachineDeploymentClassNamingStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.NamingStrategy = nil + } // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type @@ -2376,7 +2448,9 @@ func autoConvert_v1beta2_MachineDeploymentClass_To_v1beta1_MachineDeploymentClas } func autoConvert_v1beta1_MachineDeploymentClassNamingStrategy_To_v1beta2_MachineDeploymentClassNamingStrategy(in *MachineDeploymentClassNamingStrategy, out *v1beta2.MachineDeploymentClassNamingStrategy, s conversion.Scope) error { - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_Pointer_string_To_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -2386,7 +2460,9 @@ func Convert_v1beta1_MachineDeploymentClassNamingStrategy_To_v1beta2_MachineDepl } func autoConvert_v1beta2_MachineDeploymentClassNamingStrategy_To_v1beta1_MachineDeploymentClassNamingStrategy(in *v1beta2.MachineDeploymentClassNamingStrategy, out *MachineDeploymentClassNamingStrategy, s conversion.Scope) error { - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_string_To_Pointer_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -2602,7 +2678,9 @@ func autoConvert_v1beta1_MachineDeploymentTopology_To_v1beta2_MachineDeploymentT } out.Class = in.Class out.Name = in.Name - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_Pointer_string_To_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) if in.MachineHealthCheck != nil { in, out := &in.MachineHealthCheck, &out.MachineHealthCheck @@ -2637,7 +2715,9 @@ func autoConvert_v1beta2_MachineDeploymentTopology_To_v1beta1_MachineDeploymentT } out.Class = in.Class out.Name = in.Name - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_string_To_Pointer_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) if in.MachineHealthCheck != nil { in, out := &in.MachineHealthCheck, &out.MachineHealthCheck @@ -2883,7 +2963,9 @@ func Convert_v1beta2_MachineHealthCheck_To_v1beta1_MachineHealthCheck(in *v1beta func autoConvert_v1beta1_MachineHealthCheckClass_To_v1beta2_MachineHealthCheckClass(in *MachineHealthCheckClass, out *v1beta2.MachineHealthCheckClass, s conversion.Scope) error { // WARNING: in.UnhealthyConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) - out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) + if err := v1.Convert_Pointer_string_To_string(&in.UnhealthyRange, &out.UnhealthyRange, s); err != nil { + return err + } // WARNING: in.NodeStartupTimeout requires manual conversion: does not exist in peer-type if in.RemediationTemplate != nil { in, out := &in.RemediationTemplate, &out.RemediationTemplate @@ -2900,7 +2982,9 @@ func autoConvert_v1beta1_MachineHealthCheckClass_To_v1beta2_MachineHealthCheckCl func autoConvert_v1beta2_MachineHealthCheckClass_To_v1beta1_MachineHealthCheckClass(in *v1beta2.MachineHealthCheckClass, out *MachineHealthCheckClass, s conversion.Scope) error { // WARNING: in.UnhealthyNodeConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) - out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) + if err := v1.Convert_string_To_Pointer_string(&in.UnhealthyRange, &out.UnhealthyRange, s); err != nil { + return err + } // WARNING: in.NodeStartupTimeoutSeconds requires manual conversion: does not exist in peer-type if in.RemediationTemplate != nil { in, out := &in.RemediationTemplate, &out.RemediationTemplate @@ -2961,7 +3045,9 @@ func autoConvert_v1beta1_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSpe out.Selector = in.Selector // WARNING: in.UnhealthyConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) - out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) + if err := v1.Convert_Pointer_string_To_string(&in.UnhealthyRange, &out.UnhealthyRange, s); err != nil { + return err + } // WARNING: in.NodeStartupTimeout requires manual conversion: does not exist in peer-type if in.RemediationTemplate != nil { in, out := &in.RemediationTemplate, &out.RemediationTemplate @@ -2980,7 +3066,9 @@ func autoConvert_v1beta2_MachineHealthCheckSpec_To_v1beta1_MachineHealthCheckSpe out.Selector = in.Selector // WARNING: in.UnhealthyNodeConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) - out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) + if err := v1.Convert_string_To_Pointer_string(&in.UnhealthyRange, &out.UnhealthyRange, s); err != nil { + return err + } // WARNING: in.NodeStartupTimeoutSeconds requires manual conversion: does not exist in peer-type if in.RemediationTemplate != nil { in, out := &in.RemediationTemplate, &out.RemediationTemplate @@ -3162,7 +3250,15 @@ func autoConvert_v1beta1_MachinePoolClass_To_v1beta2_MachinePoolClass(in *Machin return err } out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) - out.NamingStrategy = (*v1beta2.MachinePoolClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(v1beta2.MachinePoolClassNamingStrategy) + if err := Convert_v1beta1_MachinePoolClassNamingStrategy_To_v1beta2_MachinePoolClassNamingStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.NamingStrategy = nil + } // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type @@ -3176,7 +3272,15 @@ func autoConvert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in *v1beta return err } out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) - out.NamingStrategy = (*MachinePoolClassNamingStrategy)(unsafe.Pointer(in.NamingStrategy)) + if in.NamingStrategy != nil { + in, out := &in.NamingStrategy, &out.NamingStrategy + *out = new(MachinePoolClassNamingStrategy) + if err := Convert_v1beta2_MachinePoolClassNamingStrategy_To_v1beta1_MachinePoolClassNamingStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.NamingStrategy = nil + } // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeoutSeconds requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeoutSeconds requires manual conversion: does not exist in peer-type @@ -3185,7 +3289,9 @@ func autoConvert_v1beta2_MachinePoolClass_To_v1beta1_MachinePoolClass(in *v1beta } func autoConvert_v1beta1_MachinePoolClassNamingStrategy_To_v1beta2_MachinePoolClassNamingStrategy(in *MachinePoolClassNamingStrategy, out *v1beta2.MachinePoolClassNamingStrategy, s conversion.Scope) error { - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_Pointer_string_To_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -3195,7 +3301,9 @@ func Convert_v1beta1_MachinePoolClassNamingStrategy_To_v1beta2_MachinePoolClassN } func autoConvert_v1beta2_MachinePoolClassNamingStrategy_To_v1beta1_MachinePoolClassNamingStrategy(in *v1beta2.MachinePoolClassNamingStrategy, out *MachinePoolClassNamingStrategy, s conversion.Scope) error { - out.Template = (*string)(unsafe.Pointer(in.Template)) + if err := v1.Convert_string_To_Pointer_string(&in.Template, &out.Template, s); err != nil { + return err + } return nil } @@ -3677,9 +3785,15 @@ func autoConvert_v1beta1_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, out if err := Convert_v1_ObjectReference_To_v1beta2_ContractVersionedObjectReference(&in.InfrastructureRef, &out.InfrastructureRef, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) - out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.ProviderID, &out.ProviderID, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } out.ReadinessGates = *(*[]v1beta2.MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type @@ -3695,9 +3809,15 @@ func autoConvert_v1beta2_MachineSpec_To_v1beta1_MachineSpec(in *v1beta2.MachineS if err := Convert_v1beta2_ContractVersionedObjectReference_To_v1_ObjectReference(&in.InfrastructureRef, &out.InfrastructureRef, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) - out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.ProviderID, &out.ProviderID, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type out.ReadinessGates = *(*[]MachineReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type @@ -3851,7 +3971,17 @@ func autoConvert_v1beta1_PatchDefinition_To_v1beta2_PatchDefinition(in *PatchDef if err := Convert_v1beta1_PatchSelector_To_v1beta2_PatchSelector(&in.Selector, &out.Selector, s); err != nil { return err } - out.JSONPatches = *(*[]v1beta2.JSONPatch)(unsafe.Pointer(&in.JSONPatches)) + if in.JSONPatches != nil { + in, out := &in.JSONPatches, &out.JSONPatches + *out = make([]v1beta2.JSONPatch, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JSONPatch_To_v1beta2_JSONPatch(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONPatches = nil + } return nil } @@ -3864,7 +3994,17 @@ func autoConvert_v1beta2_PatchDefinition_To_v1beta1_PatchDefinition(in *v1beta2. if err := Convert_v1beta2_PatchSelector_To_v1beta1_PatchSelector(&in.Selector, &out.Selector, s); err != nil { return err } - out.JSONPatches = *(*[]JSONPatch)(unsafe.Pointer(&in.JSONPatches)) + if in.JSONPatches != nil { + in, out := &in.JSONPatches, &out.JSONPatches + *out = make([]JSONPatch, len(*in)) + for i := range *in { + if err := Convert_v1beta2_JSONPatch_To_v1beta1_JSONPatch(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JSONPatches = nil + } return nil } diff --git a/api/core/v1beta2/cluster_types.go b/api/core/v1beta2/cluster_types.go index 6bf71d126c7e..e74055d4f872 100644 --- a/api/core/v1beta2/cluster_types.go +++ b/api/core/v1beta2/cluster_types.go @@ -698,7 +698,7 @@ type MachineDeploymentTopology struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - FailureDomain *string `json:"failureDomain,omitempty"` + FailureDomain string `json:"failureDomain,omitempty"` // replicas is the number of worker nodes belonging to this set. // If the value is nil, the MachineDeployment is created without the number of Replicas (defaulting to 1) @@ -1060,7 +1060,7 @@ type ClusterV1Beta1DeprecatedStatus struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=10240 - FailureMessage *string `json:"failureMessage,omitempty"` + FailureMessage *string `json:"failureMessage,omitempty"` //nolint:kubeapilinter // field will be removed when v1beta1 is removed } // ClusterControlPlaneStatus groups all the observations about control plane current state. diff --git a/api/core/v1beta2/clusterclass_types.go b/api/core/v1beta2/clusterclass_types.go index 14c7a98b373a..e9255d8a9a74 100644 --- a/api/core/v1beta2/clusterclass_types.go +++ b/api/core/v1beta2/clusterclass_types.go @@ -226,7 +226,7 @@ type ControlPlaneClassNamingStrategy struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=1024 - Template *string `json:"template,omitempty"` + Template string `json:"template,omitempty"` } // InfrastructureClassNamingStrategy defines the naming strategy for infrastructure objects. @@ -241,7 +241,7 @@ type InfrastructureClassNamingStrategy struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=1024 - Template *string `json:"template,omitempty"` + Template string `json:"template,omitempty"` } // WorkersClass is a collection of deployment classes. @@ -289,7 +289,7 @@ type MachineDeploymentClass struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - FailureDomain *string `json:"failureDomain,omitempty"` + FailureDomain string `json:"failureDomain,omitempty"` // namingStrategy allows changing the naming pattern used when creating the MachineDeployment. // +optional @@ -379,7 +379,7 @@ type MachineDeploymentClassNamingStrategy struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=1024 - Template *string `json:"template,omitempty"` + Template string `json:"template,omitempty"` } // MachineHealthCheckClass defines a MachineHealthCheck for a group of Machines. @@ -408,7 +408,7 @@ type MachineHealthCheckClass struct { // +kubebuilder:validation:Pattern=^\[[0-9]+-[0-9]+\]$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 - UnhealthyRange *string `json:"unhealthyRange,omitempty"` + UnhealthyRange string `json:"unhealthyRange,omitempty"` // nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck // to consider a Machine unhealthy if a corresponding Node isn't associated @@ -529,7 +529,7 @@ type MachinePoolClassNamingStrategy struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=1024 - Template *string `json:"template,omitempty"` + Template string `json:"template,omitempty"` } // IsZero returns true if none of the values of MachineHealthCheckClass are defined. @@ -961,7 +961,7 @@ type ClusterClassPatch struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - EnabledIf *string `json:"enabledIf,omitempty"` + EnabledIf string `json:"enabledIf,omitempty"` // definitions define inline patches. // Note: Patches will be applied in the order of the array. @@ -1107,7 +1107,7 @@ type JSONPatchValue struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Variable *string `json:"variable,omitempty"` + Variable string `json:"variable,omitempty"` // template is the Go template to be used to calculate the value. // A template can reference variables defined in .spec.variables and builtin variables. @@ -1115,7 +1115,7 @@ type JSONPatchValue struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=10240 - Template *string `json:"template,omitempty"` + Template string `json:"template,omitempty"` } // ExternalPatchDefinition defines an external patch. @@ -1125,19 +1125,19 @@ type ExternalPatchDefinition struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 - GeneratePatchesExtension *string `json:"generatePatchesExtension,omitempty"` + GeneratePatchesExtension string `json:"generatePatchesExtension,omitempty"` // validateTopologyExtension references an extension which is called to validate the topology. // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 - ValidateTopologyExtension *string `json:"validateTopologyExtension,omitempty"` + ValidateTopologyExtension string `json:"validateTopologyExtension,omitempty"` // discoverVariablesExtension references an extension which is called to discover variables. // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 - DiscoverVariablesExtension *string `json:"discoverVariablesExtension,omitempty"` + DiscoverVariablesExtension string `json:"discoverVariablesExtension,omitempty"` // settings defines key value pairs to be passed to the extensions. // Values defined here take precedence over the values defined in the diff --git a/api/core/v1beta2/index/machine.go b/api/core/v1beta2/index/machine.go index 4c7f23495b11..e44ea66e3727 100644 --- a/api/core/v1beta2/index/machine.go +++ b/api/core/v1beta2/index/machine.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/pkg/errors" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,11 +80,9 @@ func machineByProviderID(o client.Object) []string { panic(fmt.Sprintf("Expected a Machine but got a %T", o)) } - providerID := ptr.Deref(machine.Spec.ProviderID, "") - - if providerID == "" { + if machine.Spec.ProviderID == "" { return nil } - return []string{providerID} + return []string{machine.Spec.ProviderID} } diff --git a/api/core/v1beta2/index/machine_test.go b/api/core/v1beta2/index/machine_test.go index 67a699f43afc..e4e6df728797 100644 --- a/api/core/v1beta2/index/machine_test.go +++ b/api/core/v1beta2/index/machine_test.go @@ -20,7 +20,6 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -76,7 +75,7 @@ func TestIndexMachineByProviderID(t *testing.T) { name: "Machine has invalid providerID", object: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - ProviderID: ptr.To(""), + ProviderID: "", }, }, expected: nil, @@ -85,7 +84,7 @@ func TestIndexMachineByProviderID(t *testing.T) { name: "Machine has valid providerID", object: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - ProviderID: ptr.To(validProviderID), + ProviderID: validProviderID, }, }, expected: []string{validProviderID}, diff --git a/api/core/v1beta2/machine_types.go b/api/core/v1beta2/machine_types.go index f4bd7347dc51..930511c7cba6 100644 --- a/api/core/v1beta2/machine_types.go +++ b/api/core/v1beta2/machine_types.go @@ -400,7 +400,7 @@ type MachineSpec struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - Version *string `json:"version,omitempty"` + Version string `json:"version,omitempty"` // providerID is the identification ID of the machine provided by the provider. // This field must match the provider ID as seen on the node object corresponding to this machine. @@ -415,14 +415,14 @@ type MachineSpec struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 - ProviderID *string `json:"providerID,omitempty"` + ProviderID string `json:"providerID,omitempty"` // failureDomain is the failure domain the machine will be created in. // Must match the name of a FailureDomain from the Cluster status. // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - FailureDomain *string `json:"failureDomain,omitempty"` + FailureDomain string `json:"failureDomain,omitempty"` // minReadySeconds is the minimum number of seconds for which a Machine should be ready before considering it available. // Defaults to 0 (Machine will be considered available as soon as the Machine is ready) @@ -649,7 +649,7 @@ type MachineV1Beta1DeprecatedStatus struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=10240 - FailureMessage *string `json:"failureMessage,omitempty"` + FailureMessage *string `json:"failureMessage,omitempty"` //nolint:kubeapilinter // field will be removed when v1beta1 is removed } // ANCHOR_END: MachineStatus diff --git a/api/core/v1beta2/machinehealthcheck_types.go b/api/core/v1beta2/machinehealthcheck_types.go index 28223d692f1c..6e72b24fc37c 100644 --- a/api/core/v1beta2/machinehealthcheck_types.go +++ b/api/core/v1beta2/machinehealthcheck_types.go @@ -90,7 +90,7 @@ type MachineHealthCheckSpec struct { // +kubebuilder:validation:Pattern=^\[[0-9]+-[0-9]+\]$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 - UnhealthyRange *string `json:"unhealthyRange,omitempty"` + UnhealthyRange string `json:"unhealthyRange,omitempty"` // nodeStartupTimeoutSeconds allows to set the maximum time for MachineHealthCheck // to consider a Machine unhealthy if a corresponding Node isn't associated diff --git a/api/core/v1beta2/machinepool_types.go b/api/core/v1beta2/machinepool_types.go index 745b019465d4..96078232875b 100644 --- a/api/core/v1beta2/machinepool_types.go +++ b/api/core/v1beta2/machinepool_types.go @@ -205,7 +205,7 @@ type MachinePoolV1Beta1DeprecatedStatus struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=10240 - FailureMessage *string `json:"failureMessage,omitempty"` + FailureMessage *string `json:"failureMessage,omitempty"` //nolint:kubeapilinter // field will be removed when v1beta1 is removed // readyReplicas is the number of ready replicas for this MachinePool. A machine is considered ready when the node has been created and is "Ready". // diff --git a/api/core/v1beta2/machineset_types.go b/api/core/v1beta2/machineset_types.go index baafc877a71a..090fe8d17950 100644 --- a/api/core/v1beta2/machineset_types.go +++ b/api/core/v1beta2/machineset_types.go @@ -375,7 +375,7 @@ type MachineSetV1Beta1DeprecatedStatus struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=10240 - FailureMessage *string `json:"failureMessage,omitempty"` + FailureMessage *string `json:"failureMessage,omitempty"` //nolint:kubeapilinter // field will be removed when v1beta1 is removed // fullyLabeledReplicas is the number of replicas that have labels matching the labels of the machine template of the MachineSet. // diff --git a/api/core/v1beta2/zz_generated.deepcopy.go b/api/core/v1beta2/zz_generated.deepcopy.go index a97fb5ea0e3c..bf7560392fe9 100644 --- a/api/core/v1beta2/zz_generated.deepcopy.go +++ b/api/core/v1beta2/zz_generated.deepcopy.go @@ -193,11 +193,6 @@ func (in *ClusterClassList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClassPatch) DeepCopyInto(out *ClusterClassPatch) { *out = *in - if in.EnabledIf != nil { - in, out := &in.EnabledIf, &out.EnabledIf - *out = new(string) - **out = **in - } if in.Definitions != nil { in, out := &in.Definitions, &out.Definitions *out = make([]PatchDefinition, len(*in)) @@ -815,7 +810,7 @@ func (in *ControlPlaneClass) DeepCopyInto(out *ControlPlaneClass) { if in.NamingStrategy != nil { in, out := &in.NamingStrategy, &out.NamingStrategy *out = new(ControlPlaneClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } if in.NodeDrainTimeoutSeconds != nil { in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds @@ -852,11 +847,6 @@ func (in *ControlPlaneClass) DeepCopy() *ControlPlaneClass { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControlPlaneClassNamingStrategy) DeepCopyInto(out *ControlPlaneClassNamingStrategy) { *out = *in - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneClassNamingStrategy. @@ -945,21 +935,6 @@ func (in *ControlPlaneVariables) DeepCopy() *ControlPlaneVariables { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalPatchDefinition) DeepCopyInto(out *ExternalPatchDefinition) { *out = *in - if in.GeneratePatchesExtension != nil { - in, out := &in.GeneratePatchesExtension, &out.GeneratePatchesExtension - *out = new(string) - **out = **in - } - if in.ValidateTopologyExtension != nil { - in, out := &in.ValidateTopologyExtension, &out.ValidateTopologyExtension - *out = new(string) - **out = **in - } - if in.DiscoverVariablesExtension != nil { - in, out := &in.DiscoverVariablesExtension, &out.DiscoverVariablesExtension - *out = new(string) - **out = **in - } if in.Settings != nil { in, out := &in.Settings, &out.Settings *out = make(map[string]string, len(*in)) @@ -1013,7 +988,7 @@ func (in *InfrastructureClass) DeepCopyInto(out *InfrastructureClass) { if in.NamingStrategy != nil { in, out := &in.NamingStrategy, &out.NamingStrategy *out = new(InfrastructureClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } } @@ -1030,11 +1005,6 @@ func (in *InfrastructureClass) DeepCopy() *InfrastructureClass { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InfrastructureClassNamingStrategy) DeepCopyInto(out *InfrastructureClassNamingStrategy) { *out = *in - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureClassNamingStrategy. @@ -1058,7 +1028,7 @@ func (in *JSONPatch) DeepCopyInto(out *JSONPatch) { if in.ValueFrom != nil { in, out := &in.ValueFrom, &out.ValueFrom *out = new(JSONPatchValue) - (*in).DeepCopyInto(*out) + **out = **in } } @@ -1075,16 +1045,6 @@ func (in *JSONPatch) DeepCopy() *JSONPatch { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JSONPatchValue) DeepCopyInto(out *JSONPatchValue) { *out = *in - if in.Variable != nil { - in, out := &in.Variable, &out.Variable - *out = new(string) - **out = **in - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatchValue. @@ -1372,15 +1332,10 @@ func (in *MachineDeploymentClass) DeepCopyInto(out *MachineDeploymentClass) { *out = new(MachineHealthCheckClass) (*in).DeepCopyInto(*out) } - if in.FailureDomain != nil { - in, out := &in.FailureDomain, &out.FailureDomain - *out = new(string) - **out = **in - } if in.NamingStrategy != nil { in, out := &in.NamingStrategy, &out.NamingStrategy *out = new(MachineDeploymentClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } if in.NodeDrainTimeoutSeconds != nil { in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds @@ -1427,11 +1382,6 @@ func (in *MachineDeploymentClass) DeepCopy() *MachineDeploymentClass { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineDeploymentClassNamingStrategy) DeepCopyInto(out *MachineDeploymentClassNamingStrategy) { *out = *in - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentClassNamingStrategy. @@ -1631,11 +1581,6 @@ func (in *MachineDeploymentStrategy) DeepCopy() *MachineDeploymentStrategy { func (in *MachineDeploymentTopology) DeepCopyInto(out *MachineDeploymentTopology) { *out = *in in.Metadata.DeepCopyInto(&out.Metadata) - if in.FailureDomain != nil { - in, out := &in.FailureDomain, &out.FailureDomain - *out = new(string) - **out = **in - } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) @@ -1955,11 +1900,6 @@ func (in *MachineHealthCheckClass) DeepCopyInto(out *MachineHealthCheckClass) { *out = new(intstr.IntOrString) **out = **in } - if in.UnhealthyRange != nil { - in, out := &in.UnhealthyRange, &out.UnhealthyRange - *out = new(string) - **out = **in - } if in.NodeStartupTimeoutSeconds != nil { in, out := &in.NodeStartupTimeoutSeconds, &out.NodeStartupTimeoutSeconds *out = new(int32) @@ -2063,11 +2003,6 @@ func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { *out = new(intstr.IntOrString) **out = **in } - if in.UnhealthyRange != nil { - in, out := &in.UnhealthyRange, &out.UnhealthyRange - *out = new(string) - **out = **in - } if in.NodeStartupTimeoutSeconds != nil { in, out := &in.NodeStartupTimeoutSeconds, &out.NodeStartupTimeoutSeconds *out = new(int32) @@ -2291,7 +2226,7 @@ func (in *MachinePoolClass) DeepCopyInto(out *MachinePoolClass) { if in.NamingStrategy != nil { in, out := &in.NamingStrategy, &out.NamingStrategy *out = new(MachinePoolClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } if in.NodeDrainTimeoutSeconds != nil { in, out := &in.NodeDrainTimeoutSeconds, &out.NodeDrainTimeoutSeconds @@ -2328,11 +2263,6 @@ func (in *MachinePoolClass) DeepCopy() *MachinePoolClass { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachinePoolClassNamingStrategy) DeepCopyInto(out *MachinePoolClassNamingStrategy) { *out = *in - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(string) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolClassNamingStrategy. @@ -2868,21 +2798,6 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { *out = *in in.Bootstrap.DeepCopyInto(&out.Bootstrap) out.InfrastructureRef = in.InfrastructureRef - if in.Version != nil { - in, out := &in.Version, &out.Version - *out = new(string) - **out = **in - } - if in.ProviderID != nil { - in, out := &in.ProviderID, &out.ProviderID - *out = new(string) - **out = **in - } - if in.FailureDomain != nil { - in, out := &in.FailureDomain, &out.FailureDomain - *out = new(string) - **out = **in - } if in.MinReadySeconds != nil { in, out := &in.MinReadySeconds, &out.MinReadySeconds *out = new(int32) diff --git a/api/runtime/v1alpha1/conversion.go b/api/runtime/v1alpha1/conversion.go index f5a0228c041a..71053462a04a 100644 --- a/api/runtime/v1alpha1/conversion.go +++ b/api/runtime/v1alpha1/conversion.go @@ -34,7 +34,12 @@ func (src *ExtensionConfig) ConvertTo(dstRaw conversion.Hub) error { func (dst *ExtensionConfig) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*runtimev1.ExtensionConfig) - return Convert_v1beta2_ExtensionConfig_To_v1alpha1_ExtensionConfig(src, dst, nil) + if err := Convert_v1beta2_ExtensionConfig_To_v1alpha1_ExtensionConfig(src, dst, nil); err != nil { + return err + } + + dropEmptyStringsExtensionConfig(dst) + return nil } func Convert_v1beta2_ExtensionConfigStatus_To_v1alpha1_ExtensionConfigStatus(in *runtimev1.ExtensionConfigStatus, out *ExtensionConfigStatus, s apimachineryconversion.Scope) error { @@ -100,3 +105,16 @@ func Convert_v1_Condition_To_v1beta1_Condition(in *metav1.Condition, out *cluste func Convert_v1beta1_Condition_To_v1_Condition(in *clusterv1beta1.Condition, out *metav1.Condition, s apimachineryconversion.Scope) error { return clusterv1beta1.Convert_v1beta1_Condition_To_v1_Condition(in, out, s) } + +func dropEmptyStringsExtensionConfig(dst *ExtensionConfig) { + dropEmptyString(&dst.Spec.ClientConfig.URL) + if dst.Spec.ClientConfig.Service != nil { + dropEmptyString(&dst.Spec.ClientConfig.Service.Path) + } +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/api/runtime/v1alpha1/conversion_test.go b/api/runtime/v1alpha1/conversion_test.go index 357962b392b3..674dcca64dd7 100644 --- a/api/runtime/v1alpha1/conversion_test.go +++ b/api/runtime/v1alpha1/conversion_test.go @@ -43,6 +43,7 @@ func TestFuzzyConversion(t *testing.T) { func ExtensionConfigFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubExtensionConfigStatus, + spokeExtensionConfig, spokeExtensionConfigStatus, } } @@ -57,6 +58,12 @@ func hubExtensionConfigStatus(in *runtimev1.ExtensionConfigStatus, c randfill.Co } } +func spokeExtensionConfig(in *ExtensionConfig, c randfill.Continue) { + c.FillNoCustom(in) + + dropEmptyStringsExtensionConfig(in) +} + func spokeExtensionConfigStatus(in *ExtensionConfigStatus, c randfill.Continue) { c.FillNoCustom(in) // Drop empty structs with only omit empty fields. diff --git a/api/runtime/v1alpha1/zz_generated.conversion.go b/api/runtime/v1alpha1/zz_generated.conversion.go index 976acfeec3d3..c9d04fab3a8c 100644 --- a/api/runtime/v1alpha1/zz_generated.conversion.go +++ b/api/runtime/v1alpha1/zz_generated.conversion.go @@ -132,8 +132,18 @@ func RegisterConversions(s *runtime.Scheme) error { } func autoConvert_v1alpha1_ClientConfig_To_v1beta2_ClientConfig(in *ClientConfig, out *v1beta2.ClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*v1beta2.ServiceReference)(unsafe.Pointer(in.Service)) + if err := v1.Convert_Pointer_string_To_string(&in.URL, &out.URL, s); err != nil { + return err + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(v1beta2.ServiceReference) + if err := Convert_v1alpha1_ServiceReference_To_v1beta2_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) return nil } @@ -144,8 +154,18 @@ func Convert_v1alpha1_ClientConfig_To_v1beta2_ClientConfig(in *ClientConfig, out } func autoConvert_v1beta2_ClientConfig_To_v1alpha1_ClientConfig(in *v1beta2.ClientConfig, out *ClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - out.Service = (*ServiceReference)(unsafe.Pointer(in.Service)) + if err := v1.Convert_string_To_Pointer_string(&in.URL, &out.URL, s); err != nil { + return err + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_v1beta2_ServiceReference_To_v1alpha1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) return nil } @@ -346,7 +366,9 @@ func Convert_v1beta2_GroupVersionHook_To_v1alpha1_GroupVersionHook(in *v1beta2.G func autoConvert_v1alpha1_ServiceReference_To_v1beta2_ServiceReference(in *ServiceReference, out *v1beta2.ServiceReference, s conversion.Scope) error { out.Namespace = in.Namespace out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_Pointer_string_To_string(&in.Path, &out.Path, s); err != nil { + return err + } out.Port = (*int32)(unsafe.Pointer(in.Port)) return nil } @@ -359,7 +381,9 @@ func Convert_v1alpha1_ServiceReference_To_v1beta2_ServiceReference(in *ServiceRe func autoConvert_v1beta2_ServiceReference_To_v1alpha1_ServiceReference(in *v1beta2.ServiceReference, out *ServiceReference, s conversion.Scope) error { out.Namespace = in.Namespace out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) + if err := v1.Convert_string_To_Pointer_string(&in.Path, &out.Path, s); err != nil { + return err + } out.Port = (*int32)(unsafe.Pointer(in.Port)) return nil } diff --git a/api/runtime/v1beta2/extensionconfig_types.go b/api/runtime/v1beta2/extensionconfig_types.go index 73d24106d34f..4f0316f6c60f 100644 --- a/api/runtime/v1beta2/extensionconfig_types.go +++ b/api/runtime/v1beta2/extensionconfig_types.go @@ -65,7 +65,7 @@ type ClientConfig struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 - URL *string `json:"url,omitempty"` + URL string `json:"url,omitempty"` // service is a reference to the Kubernetes service for the Extension server. // Note: Exactly one of `url` or `service` must be specified. @@ -101,7 +101,7 @@ type ServiceReference struct { // +optional // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=512 - Path *string `json:"path,omitempty"` + Path string `json:"path,omitempty"` // port is the port on the service that's hosting the Extension server. // Defaults to 443. diff --git a/api/runtime/v1beta2/zz_generated.deepcopy.go b/api/runtime/v1beta2/zz_generated.deepcopy.go index 64820d769401..b4d2fbe95776 100644 --- a/api/runtime/v1beta2/zz_generated.deepcopy.go +++ b/api/runtime/v1beta2/zz_generated.deepcopy.go @@ -29,11 +29,6 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientConfig) DeepCopyInto(out *ClientConfig) { *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } if in.Service != nil { in, out := &in.Service, &out.Service *out = new(ServiceReference) @@ -263,11 +258,6 @@ func (in *GroupVersionHook) DeepCopy() *GroupVersionHook { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } if in.Port != nil { in, out := &in.Port, &out.Port *out = new(int32) diff --git a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go index 943450c424e2..2d97f04801bb 100644 --- a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go +++ b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go @@ -150,7 +150,7 @@ func TestNewInitControlPlaneDiskMounts(t *testing.T) { Device: "test-device", Layout: true, Overwrite: ptr.To(false), - TableType: ptr.To("gpt"), + TableType: "gpt", }, }, Filesystems: []bootstrapv1.Filesystem{ diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index a1da5ee33098..f244be696968 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -304,12 +304,12 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c return ctrl.Result{}, nil // Reconcile status for machines that already have a secret reference, but our status isn't up to date. // This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects. - case configOwner.DataSecretName() != nil && (config.Status.Initialization == nil || !ptr.Deref(config.Status.Initialization.DataSecretCreated, false) || config.Status.DataSecretName == nil): + case configOwner.DataSecretName() != nil && (config.Status.Initialization == nil || !ptr.Deref(config.Status.Initialization.DataSecretCreated, false) || config.Status.DataSecretName == ""): if config.Status.Initialization == nil { config.Status.Initialization = &bootstrapv1.KubeadmConfigInitializationStatus{} } config.Status.Initialization.DataSecretCreated = ptr.To(true) - config.Status.DataSecretName = configOwner.DataSecretName() + config.Status.DataSecretName = *configOwner.DataSecretName() v1beta1conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableV1Beta1Condition) conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableCondition, @@ -1026,7 +1026,7 @@ func (r *KubeadmConfigReconciler) resolveUsers(ctx context.Context, cfg *bootstr } in.PasswdFrom = nil passwdContent := string(data) - in.Passwd = &passwdContent + in.Passwd = passwdContent } collected = append(collected, in) } @@ -1344,8 +1344,8 @@ func (r *KubeadmConfigReconciler) computeClusterConfigurationAndAdditionalData(c } // Use Version from machine, if defined - if machine.Spec.Version != nil { - data.KubernetesVersion = machine.Spec.Version + if machine.Spec.Version != "" { + data.KubernetesVersion = ptr.To(machine.Spec.Version) } // Use ControlPlaneComponentHealthCheckSeconds from init configuration @@ -1398,7 +1398,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } } - scope.Config.Status.DataSecretName = ptr.To(secret.Name) + scope.Config.Status.DataSecretName = secret.Name if scope.Config.Status.Initialization == nil { scope.Config.Status.Initialization = &bootstrapv1.KubeadmConfigInitializationStatus{} } diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 004f193498f4..6492f74f05c8 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -530,7 +530,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigCertificatesAvailableCondition) assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) @@ -712,7 +712,7 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) @@ -790,7 +790,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) l := &corev1.SecretList{} @@ -892,13 +892,13 @@ func TestBootstrapDataFormat(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) // Read the secret containing the bootstrap data which was generated by the // KubeadmConfig controller. key := client.ObjectKey{ Namespace: metav1.NamespaceDefault, - Name: *cfg.Status.DataSecretName, + Name: cfg.Status.DataSecretName, } secret := &corev1.Secret{} err = myclient.Get(ctx, key, secret) @@ -998,7 +998,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) } @@ -1053,7 +1053,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) request = ctrl.Request{ @@ -1070,7 +1070,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) l := &corev1.SecretList{} @@ -1301,7 +1301,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) l := &corev1.SecretList{} @@ -1494,7 +1494,7 @@ func TestBootstrapTokenRefreshIfTokenSecretCleaned(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) g.Expect(cfg.Spec.JoinConfiguration.Discovery.BootstrapToken.Token).ToNot(BeEmpty()) firstToken := cfg.Spec.JoinConfiguration.Discovery.BootstrapToken.Token @@ -1568,7 +1568,7 @@ func TestBootstrapTokenRefreshIfTokenSecretCleaned(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(cfg.Status.Initialization).ToNot(BeNil()) g.Expect(ptr.Deref(cfg.Status.Initialization.DataSecretCreated, false)).To(BeTrue()) - g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) + g.Expect(cfg.Status.DataSecretName).NotTo(BeEmpty()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) g.Expect(cfg.Spec.JoinConfiguration.Discovery.BootstrapToken.Token).ToNot(BeEmpty()) firstToken := cfg.Spec.JoinConfiguration.Discovery.BootstrapToken.Token @@ -1855,7 +1855,7 @@ func TestKubeadmConfigReconciler_computeClusterConfigurationAndAdditionalData(t }, machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.23.0"), + Version: "v1.23.0", }, }, initConfiguration: &bootstrapv1.InitConfiguration{ @@ -2111,10 +2111,10 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali for _, c := range confList.Items { // Ensure the DataSecretName is only set for controlPlaneInitConfigFirst. if c.Name == controlPlaneInitConfigFirst.Name { - g.Expect(*c.Status.DataSecretName).To(Not(BeEmpty())) + g.Expect(c.Status.DataSecretName).To(Not(BeEmpty())) } if c.Name == controlPlaneInitConfigSecond.Name { - g.Expect(c.Status.DataSecretName).To(BeNil()) + g.Expect(c.Status.DataSecretName).To(BeEmpty()) } } } @@ -2438,7 +2438,7 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { Users: []bootstrapv1.User{ { Name: "foo", - Passwd: &fakePasswd, + Passwd: fakePasswd, }, }, }, @@ -2446,7 +2446,7 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { expect: []bootstrapv1.User{ { Name: "foo", - Passwd: &fakePasswd, + Passwd: fakePasswd, }, }, }, @@ -2469,7 +2469,7 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { expect: []bootstrapv1.User{ { Name: "foo", - Passwd: &fakePasswd, + Passwd: fakePasswd, }, }, objects: []client.Object{testSecret}, @@ -2480,7 +2480,7 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { Users: []bootstrapv1.User{ { Name: "foo", - Passwd: &fakePasswd, + Passwd: fakePasswd, }, { Name: "bar", @@ -2497,11 +2497,11 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { expect: []bootstrapv1.User{ { Name: "foo", - Passwd: &fakePasswd, + Passwd: fakePasswd, }, { Name: "bar", - Passwd: &fakePasswd, + Passwd: fakePasswd, }, }, objects: []client.Object{testSecret}, @@ -2536,7 +2536,7 @@ func TestKubeadmConfigReconciler_ResolveUsers(t *testing.T) { for _, user := range tc.cfg.Spec.Users { if passwdFrom[user.Name] { g.Expect(user.PasswdFrom).NotTo(BeNil()) - g.Expect(user.Passwd).To(BeNil()) + g.Expect(user.Passwd).To(BeEmpty()) } } }) diff --git a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go index 4c48167cf56e..fd8ee290e3a3 100644 --- a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go +++ b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go @@ -85,13 +85,13 @@ func TestRender(t *testing.T) { Users: []bootstrapv1.User{ { Name: "foo", - Gecos: ptr.To("Foo B. Bar"), - Groups: ptr.To("foo, bar"), - HomeDir: ptr.To("/home/foo"), - Shell: ptr.To("/bin/false"), - Passwd: ptr.To("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), - PrimaryGroup: ptr.To("foo"), - Sudo: ptr.To("ALL=(ALL) NOPASSWD:ALL"), + Gecos: "Foo B. Bar", + Groups: "foo, bar", + HomeDir: "/home/foo", + Shell: "/bin/false", + Passwd: "$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/", + PrimaryGroup: "foo", + Sudo: "ALL=(ALL) NOPASSWD:ALL", SSHAuthorizedKeys: []string{ "foo", "bar", @@ -104,7 +104,7 @@ func TestRender(t *testing.T) { Device: "/dev/disk/azure/scsi1/lun0", Layout: true, Overwrite: ptr.To(true), - TableType: ptr.To("gpt"), + TableType: "gpt", }, }, Filesystems: []bootstrapv1.Filesystem{ diff --git a/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go index f5661124c917..4a7d86b9ba9a 100644 --- a/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go @@ -192,7 +192,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Spec: bootstrapv1.KubeadmConfigSpec{ Users: []bootstrapv1.User{ { - Passwd: ptr.To("foo"), + Passwd: "foo", }, }, }, @@ -228,7 +228,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Users: []bootstrapv1.User{ { PasswdFrom: &bootstrapv1.PasswdSource{}, - Passwd: ptr.To("foo"), + Passwd: "foo", }, }, }, @@ -249,7 +249,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Key: "bar", }, }, - Passwd: ptr.To("foo"), + Passwd: "foo", }, }, }, @@ -270,7 +270,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Name: "foo", }, }, - Passwd: ptr.To("foo"), + Passwd: "foo", }, }, }, @@ -332,7 +332,7 @@ func TestKubeadmConfigValidate(t *testing.T) { DiskSetup: &bootstrapv1.DiskSetup{ Partitions: []bootstrapv1.Partition{ { - TableType: ptr.To("MS-DOS"), + TableType: "MS-DOS", }, }, }, @@ -379,7 +379,7 @@ func TestKubeadmConfigValidate(t *testing.T) { DiskSetup: &bootstrapv1.DiskSetup{ Filesystems: []bootstrapv1.Filesystem{ { - ReplaceFS: ptr.To("ntfs"), + ReplaceFS: "ntfs", }, }, }, @@ -399,7 +399,7 @@ func TestKubeadmConfigValidate(t *testing.T) { DiskSetup: &bootstrapv1.DiskSetup{ Filesystems: []bootstrapv1.Filesystem{ { - Partition: ptr.To("1"), + Partition: "1", }, }, }, diff --git a/bootstrap/util/configowner_test.go b/bootstrap/util/configowner_test.go index 3fe33d7b7b8b..36e364163e4a 100644 --- a/bootstrap/util/configowner_test.go +++ b/bootstrap/util/configowner_test.go @@ -54,7 +54,7 @@ func TestGetConfigOwner(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To("my-data-secret"), }, - Version: ptr.To("v1.19.6"), + Version: "v1.19.6", }, Status: clusterv1.MachineStatus{ Initialization: &clusterv1.MachineInitializationStatus{ @@ -104,7 +104,7 @@ func TestGetConfigOwner(t *testing.T) { ClusterName: "my-cluster", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.19.6"), + Version: "v1.19.6", }, }, }, diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 70e76d20a269..21be4df59662 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -184,7 +184,7 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines // FailureDomainWithMostMachines returns the fd with most machines in it and at least one eligible machine in it. // Note: if there are eligibleMachines machines in failure domain that do not exist anymore, cleaning up those failure domains takes precedence. -func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, eligibleMachines collections.Machines) *string { +func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, eligibleMachines collections.Machines) string { // See if there are any Machines that are not in currently defined failure domains first. notInFailureDomains := eligibleMachines.Filter( collections.Not(collections.InFailureDomains(getGetFailureDomainIDs(c.FailureDomains())...)), @@ -205,17 +205,17 @@ func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, eligib // // In case of tie (more failure domain with the same number of up-to-date, not deleted machines) the failure domain with the fewest number of // machine overall is picked to ensure a better spreading of machines while the rollout is performed. -func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) (*string, error) { +func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) (string, error) { if len(c.FailureDomains()) == 0 { - return nil, nil + return "", nil } return failuredomains.PickFewest(ctx, c.FailureDomains(), c.Machines, c.UpToDateMachines().Filter(collections.Not(collections.HasDeletionTimestamp))), nil } -func getGetFailureDomainIDs(failureDomains []clusterv1.FailureDomain) []*string { - ids := make([]*string, 0, len(failureDomains)) +func getGetFailureDomainIDs(failureDomains []clusterv1.FailureDomain) []string { + ids := make([]string, 0, len(failureDomains)) for _, fd := range failureDomains { - ids = append(ids, ptr.To(fd.Name)) + ids = append(ids, fd.Name) } return ids } diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index cdfb1e68d8ee..d39654ad4c0e 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -54,13 +54,13 @@ func TestControlPlane(t *testing.T) { t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(*testing.T) { g := NewWithT(t) - g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two")) + g.Expect(controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two")) }) t.Run("With some machines in non defined failure domains", func(*testing.T) { g := NewWithT(t) controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown"))) - g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown")) + g.Expect(controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown")) }) }) @@ -84,36 +84,36 @@ func TestControlPlane(t *testing.T) { "machine-1": &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), // up-to-date - FailureDomain: ptr.To("one"), + Version: "v1.31.0", // up-to-date + FailureDomain: "one", InfrastructureRef: clusterv1.ContractVersionedObjectReference{Kind: "GenericInfrastructureMachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, Name: "m1"}, }}, "machine-2": &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.29.0"), // not up-to-date - FailureDomain: ptr.To("two"), + Version: "v1.29.0", // not up-to-date + FailureDomain: "two", InfrastructureRef: clusterv1.ContractVersionedObjectReference{Kind: "GenericInfrastructureMachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, Name: "m2"}, }}, "machine-3": &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3", DeletionTimestamp: ptr.To(metav1.Now())}, // deleted Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.29.3"), // not up-to-date - FailureDomain: ptr.To("three"), + Version: "v1.29.3", // not up-to-date + FailureDomain: "three", InfrastructureRef: clusterv1.ContractVersionedObjectReference{Kind: "GenericInfrastructureMachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, Name: "m3"}, }}, "machine-4": &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m4", DeletionTimestamp: ptr.To(metav1.Now())}, // deleted Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), // up-to-date - FailureDomain: ptr.To("two"), + Version: "v1.31.0", // up-to-date + FailureDomain: "two", InfrastructureRef: clusterv1.ContractVersionedObjectReference{Kind: "GenericInfrastructureMachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, Name: "m4"}, }}, "machine-5": &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m5"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), // up-to-date - FailureDomain: ptr.To("three"), + Version: "v1.31.0", // up-to-date + FailureDomain: "three", InfrastructureRef: clusterv1.ContractVersionedObjectReference{Kind: "GenericInfrastructureMachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, Name: "m5"}, }}, } @@ -140,7 +140,7 @@ func TestControlPlane(t *testing.T) { fd, err := controlPlane.NextFailureDomainForScaleUp(ctx) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(fd).To(Equal(ptr.To("two"))) // deleted up-to-date machines (m4) should not be counted when picking the next failure domain for scale up + g.Expect(fd).To(Equal("two")) // deleted up-to-date machines (m4) should not be counted when picking the next failure domain for scale up }) } @@ -367,7 +367,7 @@ func failureDomain(name string, controlPlane bool) clusterv1.FailureDomain { func withFailureDomain(fd string) machineOpt { return func(m *clusterv1.Machine) { - m.Spec.FailureDomain = &fd + m.Spec.FailureDomain = fd } } diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 4a519ee0da41..c03f9d2a1684 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -1311,18 +1311,18 @@ func (r *KubeadmControlPlaneReconciler) adoptMachines(ctx context.Context, kcp * return errors.Errorf("unable to adopt Machine %v/%v: expected a ConfigRef of kind KubeadmConfig but instead found %v", m.Namespace, m.Name, ref) } - if m.Spec.Version == nil { + if m.Spec.Version == "" { // if the machine's version is not immediately apparent, assume the operator knows what they're doing continue } - machineVersion, err := semver.ParseTolerant(*m.Spec.Version) + machineVersion, err := semver.ParseTolerant(m.Spec.Version) if err != nil { - return errors.Wrapf(err, "failed to parse kubernetes version %q", *m.Spec.Version) + return errors.Wrapf(err, "failed to parse kubernetes version %q", m.Spec.Version) } if !util.IsSupportedVersionSkew(kcpVersion, machineVersion) { - r.recorder.Eventf(kcp, corev1.EventTypeWarning, "AdoptionFailed", "Could not adopt Machine %s/%s: its version (%q) is outside supported +/- one minor version skew from KCP's (%q)", m.Namespace, m.Name, *m.Spec.Version, kcp.Spec.Version) + r.recorder.Eventf(kcp, corev1.EventTypeWarning, "AdoptionFailed", "Could not adopt Machine %s/%s: its version (%q) is outside supported +/- one minor version skew from KCP's (%q)", m.Namespace, m.Name, m.Spec.Version, kcp.Spec.Version) // avoid returning an error here so we don't cause the KCP controller to spin until the operator clarifies their intent return nil } @@ -1370,7 +1370,7 @@ func (r *KubeadmControlPlaneReconciler) adoptOwnedSecrets(ctx context.Context, k continue } // avoid taking ownership of the bootstrap data secret - if currentOwner.Status.DataSecretName != nil && s.Name == *currentOwner.Status.DataSecretName { + if s.Name == currentOwner.Status.DataSecretName { continue } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 79745331bd45..b6873adf7fd0 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -520,7 +520,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Name: name, }, }, - Version: &version, + Version: version, }, } cfg := &bootstrapv1.KubeadmConfig{ @@ -588,7 +588,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Name: name, }, }, - Version: &version, + Version: version, }, } cfg := &bootstrapv1.KubeadmConfig{ @@ -703,7 +703,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Name: name, }, }, - Version: &version, + Version: version, }, } cfg := &bootstrapv1.KubeadmConfig{ @@ -760,7 +760,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Kind: "KubeadmConfig", }, }, - Version: ptr.To("v1.15.0"), + Version: "v1.15.0", }, }, }, @@ -1737,7 +1737,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { g.Expect(env.Create(ctx, existingKubeadmConfig, client.FieldOwner("manager"))).To(Succeed()) // Existing Machine to validate in-place mutation - fd := ptr.To("fd1") + fd := "fd1" inPlaceMutatingMachine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", @@ -1763,9 +1763,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { ConfigRef: bootstrapRef, }, InfrastructureRef: *infraMachineRef, - Version: ptr.To("v1.25.3"), + Version: "v1.25.3", FailureDomain: fd, - ProviderID: ptr.To("provider-id"), + ProviderID: "provider-id", NodeDrainTimeoutSeconds: duration5s, NodeVolumeDetachTimeoutSeconds: duration5s, NodeDeletionTimeoutSeconds: duration5s, @@ -2080,8 +2080,8 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Namespace: metav1.NamespaceDefault, }, Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), - ProviderID: ptr.To("foo"), + Version: "v1.31.0", + ProviderID: "foo", InfrastructureRef: clusterv1.ContractVersionedObjectReference{ Kind: "GenericInfrastructureMachine", APIGroup: clusterv1.GroupVersionInfrastructure.Group, @@ -2090,7 +2090,7 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition }, } defaultMachine1NotUpToDate := defaultMachine1.DeepCopy() - defaultMachine1NotUpToDate.Spec.Version = ptr.To("v1.30.0") + defaultMachine1NotUpToDate.Spec.Version = "v1.30.0" defaultMachine2 := clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 93d3a5d053d8..0431535803ca 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -181,7 +181,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileExternalReference(ctx context.C return patchHelper.Patch(ctx, obj) } -func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, bootstrapSpec *bootstrapv1.KubeadmConfigSpec, failureDomain *string) (*clusterv1.Machine, error) { +func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, bootstrapSpec *bootstrapv1.KubeadmConfigSpec, failureDomain string) (*clusterv1.Machine, error) { var errs []error // Compute desired Machine @@ -355,10 +355,10 @@ func (r *KubeadmControlPlaneReconciler) updateMachine(ctx context.Context, machi // There are small differences in how we calculate the Machine depending on if it // is a create or update. Example: for a new Machine we have to calculate a new name, // while for an existing Machine we have to use the name of the existing Machine. -func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, failureDomain *string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { +func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, failureDomain string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { var machineName string var machineUID types.UID - var version *string + var version string annotations := map[string]string{} if existingMachine == nil { // Creating a new machine @@ -374,7 +374,7 @@ func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev return nil, errors.Wrap(err, "failed to generate Machine name") } machineName = generatedMachineName - version = &kcp.Spec.Version + version = kcp.Spec.Version // Machine's bootstrap config may be missing ClusterConfiguration if it is not the first machine in the control plane. // We store ClusterConfiguration as annotation here to detect any changes in KCP ClusterConfiguration and rollout the machine if any. diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index 25993f7b7161..f06c6d64fbca 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -373,7 +373,7 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{ JoinConfiguration: &bootstrapv1.JoinConfiguration{}, } - _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, nil) + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, "") g.Expect(err).To(Succeed()) machineList := &clusterv1.MachineList{} @@ -460,7 +460,7 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { // Try to break Infra Cloning kcp.Spec.MachineTemplate.InfrastructureRef.Name = "something_invalid" - _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, nil) + _, err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, "") g.Expect(err).To(HaveOccurred()) g.Expect(&kcp.GetV1Beta1Conditions()[0]).Should(v1beta1conditions.HaveSameStateOf(&clusterv1.Condition{ Type: controlplanev1.MachinesCreatedV1Beta1Condition, @@ -803,7 +803,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { g := NewWithT(t) var desiredMachine *clusterv1.Machine - failureDomain := ptr.To("fd-1") + failureDomain := "fd-1" var expectedMachineSpec clusterv1.MachineSpec var err error @@ -813,7 +813,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { // Use different ClusterConfiguration string than the information present in KCP // to verify that for an existing machine we do not override this information. remediationData := "remediation-data" - machineVersion := ptr.To("v1.25.3") + machineVersion := "v1.25.3" existingMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: machineName, @@ -890,7 +890,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { expectedMachineSpec = clusterv1.MachineSpec{ ClusterName: cluster.Name, - Version: ptr.To(tt.kcp.Spec.Version), + Version: tt.kcp.Spec.Version, FailureDomain: failureDomain, NodeDrainTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeDrainTimeoutSeconds, NodeDeletionTimeoutSeconds: tt.kcp.Spec.MachineTemplate.NodeDeletionTimeoutSeconds, diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index e1219d3a6390..beaed7c34aa4 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -784,7 +784,7 @@ func failureDomain(name string, controlPlane bool) clusterv1.FailureDomain { func withFailureDomain(fd string) machineOpt { return func(m *clusterv1.Machine) { - m.Spec.FailureDomain = &fd + m.Spec.FailureDomain = fd } } diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index d66dd055a9e2..b220ebf46611 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -126,7 +126,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro // Set status.version with the lowest K8s version from CP machines. lowestVersion := controlPlane.Machines.LowestVersion() - if lowestVersion != nil { + if lowestVersion != "" { controlPlane.KCP.Status.Version = lowestVersion } @@ -571,7 +571,7 @@ func setAvailableCondition(_ context.Context, kcp *controlplanev1.KubeadmControl // Note: this avoids some noise when a new machine is provisioning; it is not possible to delay further // because the etcd member might join the cluster / control plane components might start even before // kubelet registers the node to the API server (e.g. in case kubelet has issues to register itself). - if machine.Spec.ProviderID == nil { + if machine.Spec.ProviderID == "" { continue } diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index fa138c65efc1..42f21aee5756 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -979,7 +979,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1011,7 +1011,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1019,7 +1019,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1027,7 +1027,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1150,7 +1150,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1158,7 +1158,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1166,7 +1166,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy}, @@ -1200,7 +1200,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1208,7 +1208,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1216,7 +1216,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy11s}, @@ -1252,7 +1252,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1260,7 +1260,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy}, @@ -1268,7 +1268,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy}, @@ -1303,7 +1303,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1343,7 +1343,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1351,7 +1351,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1359,7 +1359,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy11s}, @@ -1367,7 +1367,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m4"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m4")}, + Spec: clusterv1.MachineSpec{ProviderID: "m4"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m4"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy11s}, @@ -1404,7 +1404,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1412,7 +1412,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1420,7 +1420,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy11s}, @@ -1428,7 +1428,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m4"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m4")}, + Spec: clusterv1.MachineSpec{ProviderID: "m4"}, Status: clusterv1.MachineStatus{ NodeRef: nil, // Note this is not a real use case, but it helps to validate that machine m4 is bound to an etcd member and counted as healthy. @@ -1467,7 +1467,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1475,7 +1475,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1483,7 +1483,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1491,7 +1491,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m4"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m4")}, + Spec: clusterv1.MachineSpec{ProviderID: "m4"}, Status: clusterv1.MachineStatus{ NodeRef: nil, Conditions: []metav1.Condition{apiServerPodHealthyUnknown, controllerManagerPodHealthyUnknown, schedulerPodHealthyUnknown, etcdPodHealthyUnknown, etcdMemberHealthyUnknown11s}, @@ -1528,7 +1528,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1536,7 +1536,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1544,7 +1544,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3-does-not-exist"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1579,7 +1579,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1587,7 +1587,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1595,7 +1595,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy11s}, @@ -1603,7 +1603,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m4"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m4")}, + Spec: clusterv1.MachineSpec{ProviderID: "m4"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m4"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberNotHealthy11s}, @@ -1643,7 +1643,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1683,7 +1683,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1691,7 +1691,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m2"}, Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1699,7 +1699,7 @@ func Test_setAvailableCondition(t *testing.T) { }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m3"}, Conditions: []metav1.Condition{apiServerPodNotHealthy11s, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1734,17 +1734,17 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}}, }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}}, }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}}, }, ), @@ -1782,17 +1782,17 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy}}, }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy}}, }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy11s, controllerManagerPodHealthy, schedulerPodHealthy}}, }, ), @@ -1827,17 +1827,17 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy}}, }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m2"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m2")}, + Spec: clusterv1.MachineSpec{ProviderID: "m2"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy}}, }, &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m3"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m3")}, + Spec: clusterv1.MachineSpec{ProviderID: "m3"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{apiServerPodNotHealthy, controllerManagerPodHealthy, schedulerPodHealthy}}, }, ), @@ -1868,7 +1868,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, @@ -1907,7 +1907,7 @@ func Test_setAvailableCondition(t *testing.T) { Machines: collections.FromMachines( &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "m1"}, - Spec: clusterv1.MachineSpec{ProviderID: ptr.To("m1")}, + Spec: clusterv1.MachineSpec{ProviderID: "m1"}, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{Name: "m1"}, Conditions: []metav1.Condition{apiServerPodHealthy, controllerManagerPodHealthy, schedulerPodHealthy, etcdPodHealthy, etcdMemberHealthy}, diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index 806d2fb43e75..5ecc4046f228 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -163,7 +163,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { machinesRequireUpgrade := collections.Machines{} for i := range bothMachines.Items { - if bothMachines.Items[i].Spec.Version != nil && *bothMachines.Items[i].Spec.Version != UpdatedVersion { + if bothMachines.Items[i].Spec.Version != "" && bothMachines.Items[i].Spec.Version != UpdatedVersion { machinesRequireUpgrade[bothMachines.Items[i].Name] = &bothMachines.Items[i] } } @@ -215,7 +215,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { Name: name, }, }, - Version: &version, + Version: version, }, } cfg := &bootstrapv1.KubeadmConfig{ diff --git a/controlplane/kubeadm/internal/filters.go b/controlplane/kubeadm/internal/filters.go index 87ac60733fab..ce6ba53242ad 100644 --- a/controlplane/kubeadm/internal/filters.go +++ b/controlplane/kubeadm/internal/filters.go @@ -49,8 +49,8 @@ func matchesMachineSpec(infraConfigs map[string]*unstructured.Unstructured, mach if !collections.MatchesKubernetesVersion(kcp.Spec.Version)(machine) { machineVersion := "" - if machine != nil && machine.Spec.Version != nil { - machineVersion = *machine.Spec.Version + if machine != nil && machine.Spec.Version != "" { + machineVersion = machine.Spec.Version } logMessages = append(logMessages, fmt.Sprintf("Machine version %q is not equal to KCP version %q", machineVersion, kcp.Spec.Version)) // Note: the code computing the message for KCP's RolloutOut condition is making assumptions on the format/content of this message. diff --git a/controlplane/kubeadm/internal/filters_test.go b/controlplane/kubeadm/internal/filters_test.go index b16208105021..585fa4602a4c 100644 --- a/controlplane/kubeadm/internal/filters_test.go +++ b/controlplane/kubeadm/internal/filters_test.go @@ -1706,7 +1706,7 @@ func TestUpToDate(t *testing.T) { }, }, Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", InfrastructureRef: clusterv1.ContractVersionedObjectReference{APIGroup: clusterv1.GroupVersionInfrastructure.Group, Kind: "AWSMachine", Name: "infra-machine1"}, }, Status: clusterv1.MachineStatus{ diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index e8c07cdb5d7a..2cff26d14eef 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -29,7 +29,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -97,10 +96,10 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane provisioningMachines := controlPlane.Machines.Filter(collections.Not(collections.HasNode())) for _, machine := range provisioningMachines { var msg string - if ptr.Deref(machine.Spec.ProviderID, "") != "" { + if machine.Spec.ProviderID != "" { // If the machine is at the end of the provisioning phase, with ProviderID set, but still waiting // for a matching Node to exists, surface this. - msg = fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", *machine.Spec.ProviderID) + msg = fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", machine.Spec.ProviderID) } else { // If the machine is at the beginning of the provisioning phase, with ProviderID not yet set, surface this. msg = fmt.Sprintf("Waiting for %s to report spec.providerID", machine.Spec.InfrastructureRef.Kind) @@ -526,10 +525,10 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * for _, machine := range provisioningMachines { for _, condition := range allMachinePodConditions { var msg string - if ptr.Deref(machine.Spec.ProviderID, "") != "" { + if machine.Spec.ProviderID != "" { // If the machine is at the end of the provisioning phase, with ProviderID set, but still waiting // for a matching Node to exists, surface this. - msg = fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", *machine.Spec.ProviderID) + msg = fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", machine.Spec.ProviderID) } else { // If the machine is at the beginning of the provisioning phase, with ProviderID not yet set, surface this. msg = fmt.Sprintf("Waiting for %s to report spec.providerID", machine.Spec.InfrastructureRef.Kind) @@ -1033,7 +1032,7 @@ func aggregateConditionsFromMachinesToKCP(input aggregateConditionsFromMachinesT // Note: this avoids some noise when a new machine is provisioning; it is not possible to delay further // because the etcd member might join the cluster / control plane components might start even before // kubelet registers the node to the API server (e.g. in case kubelet has issues to register itself). - if machine.Spec.ProviderID == nil { + if machine.Spec.ProviderID == "" { kcpMachinesWithInfo.Insert(machine.Name) break } diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index b0577d789b56..7f9eb1904d2e 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -1479,7 +1479,7 @@ func withNodeRef(ref string) fakeMachineOption { func withProviderID(providerID string) fakeMachineOption { return func(machine *clusterv1.Machine) { - machine.Spec.ProviderID = ptr.To(providerID) + machine.Spec.ProviderID = providerID } } diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index eca7c6bbcbf0..93f970ccdbcb 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -491,9 +491,9 @@ func (r *MachinePoolReconciler) computeDesiredMachine(mp *clusterv1.MachinePool, Name: infraMachine.GetName(), } - var kubernetesVersion *string + var kubernetesVersion string if existingNode != nil && existingNode.Status.NodeInfo.KubeletVersion != "" { - kubernetesVersion = &existingNode.Status.NodeInfo.KubeletVersion + kubernetesVersion = existingNode.Status.NodeInfo.KubeletVersion } machine := &clusterv1.Machine{ diff --git a/exp/internal/webhooks/machinepool.go b/exp/internal/webhooks/machinepool.go index 8eb38099a24f..a335cf6a91ee 100644 --- a/exp/internal/webhooks/machinepool.go +++ b/exp/internal/webhooks/machinepool.go @@ -103,9 +103,9 @@ func (webhook *MachinePool) Default(ctx context.Context, obj runtime.Object) err } // tolerate version strings without a "v" prefix: prepend it if it's not there. - if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { - normalizedVersion := "v" + *m.Spec.Template.Spec.Version - m.Spec.Template.Spec.Version = &normalizedVersion + if m.Spec.Template.Spec.Version != "" && !strings.HasPrefix(m.Spec.Template.Spec.Version, "v") { + normalizedVersion := "v" + m.Spec.Template.Spec.Version + m.Spec.Template.Spec.Version = normalizedVersion } return nil @@ -174,9 +174,9 @@ func (webhook *MachinePool) validate(oldObj, newObj *clusterv1.MachinePool) erro ) } - if newObj.Spec.Template.Spec.Version != nil { - if !version.KubeSemver.MatchString(*newObj.Spec.Template.Spec.Version) { - allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), *newObj.Spec.Template.Spec.Version, "must be a valid semantic version")) + if newObj.Spec.Template.Spec.Version != "" { + if !version.KubeSemver.MatchString(newObj.Spec.Template.Spec.Version) { + allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), newObj.Spec.Template.Spec.Version, "must be a valid semantic version")) } } diff --git a/exp/internal/webhooks/machinepool_test.go b/exp/internal/webhooks/machinepool_test.go index 065b1ae71401..e1316674630c 100644 --- a/exp/internal/webhooks/machinepool_test.go +++ b/exp/internal/webhooks/machinepool_test.go @@ -44,7 +44,7 @@ func TestMachinePoolDefault(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{}}, - Version: ptr.To("1.20.0"), + Version: "1.20.0", }, }, }, @@ -56,7 +56,7 @@ func TestMachinePoolDefault(t *testing.T) { g.Expect(mp.Labels[clusterv1.ClusterNameLabel]).To(Equal(mp.Spec.ClusterName)) g.Expect(mp.Spec.Replicas).To(Equal(ptr.To[int32](1))) - g.Expect(mp.Spec.Template.Spec.Version).To(Equal(ptr.To("v1.20.0"))) + g.Expect(mp.Spec.Template.Spec.Version).To(Equal("v1.20.0")) g.Expect(*mp.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(defaultNodeDeletionTimeoutSeconds)) } @@ -376,7 +376,7 @@ func TestMachinePoolVersionValidation(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{}}, - Version: &tt.version, + Version: tt.version, }, }, }, diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index cd7b508fc644..49bfba2b8a3d 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -163,7 +162,7 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { // Patch the extension with the new server endpoint. patch := client.MergeFrom(extensionConfig.DeepCopy()) - extensionConfig.Spec.ClientConfig.URL = &updatedServer.URL + extensionConfig.Spec.ClientConfig.URL = updatedServer.URL g.Expect(env.Patch(ctx, extensionConfig, patch)).To(Succeed()) @@ -174,8 +173,8 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { if err != nil { return err } - if *conf.Spec.ClientConfig.URL != updatedServer.URL { - return errors.Errorf("URL not set on updated object: got: %s, want: %s", *conf.Spec.ClientConfig.URL, updatedServer.URL) + if conf.Spec.ClientConfig.URL != updatedServer.URL { + return errors.Errorf("URL not set on updated object: got: %s, want: %s", conf.Spec.ClientConfig.URL, updatedServer.URL) } return nil }, 30*time.Second, 100*time.Millisecond).Should(Succeed()) @@ -417,7 +416,7 @@ func fakeExtensionConfigForURL(namespace, name, url string) *runtimev1.Extension }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: ptr.To(url), + URL: url, }, NamespaceSelector: nil, }, diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index af281f4c0818..1d0f34af4399 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -194,8 +194,8 @@ func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructu currentRef := cluster.Spec.InfrastructureRef nameTemplate := "{{ .cluster.name }}-{{ .random }}" - if s.Blueprint.ClusterClass.Spec.Infrastructure.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.Infrastructure.NamingStrategy.Template != nil { - nameTemplate = *s.Blueprint.ClusterClass.Spec.Infrastructure.NamingStrategy.Template + if s.Blueprint.ClusterClass.Spec.Infrastructure.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.Infrastructure.NamingStrategy.Template != "" { + nameTemplate = s.Blueprint.ClusterClass.Spec.Infrastructure.NamingStrategy.Template } infrastructureCluster, err := templateToObject(templateToInput{ @@ -292,8 +292,8 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf controlPlaneAnnotations := util.MergeMap(topologyMetadata.Annotations, clusterClassMetadata.Annotations) nameTemplate := "{{ .cluster.name }}-{{ .random }}" - if s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template != nil { - nameTemplate = *s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template + if s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template != "" { + nameTemplate = s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template } controlPlane, err := templateToObject(templateToInput{ @@ -798,7 +798,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope } failureDomain := machineDeploymentClass.FailureDomain - if machineDeploymentTopology.FailureDomain != nil { + if machineDeploymentTopology.FailureDomain != "" { failureDomain = machineDeploymentTopology.FailureDomain } @@ -827,8 +827,8 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope desiredInfraMachineTemplateRef := contract.ObjToContractVersionedObjectReference(desiredMachineDeployment.InfrastructureMachineTemplate) nameTemplate := "{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}" - if machineDeploymentClass.NamingStrategy != nil && machineDeploymentClass.NamingStrategy.Template != nil { - nameTemplate = *machineDeploymentClass.NamingStrategy.Template + if machineDeploymentClass.NamingStrategy != nil && machineDeploymentClass.NamingStrategy.Template != "" { + nameTemplate = machineDeploymentClass.NamingStrategy.Template } name, err := topologynames.MachineDeploymentNameGenerator(nameTemplate, s.Current.Cluster.Name, machineDeploymentTopology.Name).GenerateName() @@ -851,7 +851,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: s.Current.Cluster.Name, - Version: ptr.To(version), + Version: version, Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef}, InfrastructureRef: *desiredInfraMachineTemplateRef, FailureDomain: failureDomain, @@ -939,7 +939,7 @@ func (g *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeplo } // Get the current version of the machine deployment. - currentVersion := *currentMDState.Object.Spec.Template.Spec.Version + currentVersion := currentMDState.Object.Spec.Template.Spec.Version // Return early if the currentVersion is already equal to the desiredVersion // no further checks required. @@ -1142,8 +1142,8 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin desiredInfraMachinePoolRef := contract.ObjToContractVersionedObjectReference(desiredMachinePool.InfrastructureMachinePoolObject) nameTemplate := "{{ .cluster.name }}-{{ .machinePool.topologyName }}-{{ .random }}" - if machinePoolClass.NamingStrategy != nil && machinePoolClass.NamingStrategy.Template != nil { - nameTemplate = *machinePoolClass.NamingStrategy.Template + if machinePoolClass.NamingStrategy != nil && machinePoolClass.NamingStrategy.Template != "" { + nameTemplate = machinePoolClass.NamingStrategy.Template } name, err := topologynames.MachinePoolNameGenerator(nameTemplate, s.Current.Cluster.Name, machinePoolTopology.Name).GenerateName() @@ -1166,7 +1166,7 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: s.Current.Cluster.Name, - Version: ptr.To(version), + Version: version, Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef}, InfrastructureRef: *desiredInfraMachinePoolRef, NodeDrainTimeoutSeconds: nodeDrainTimeout, @@ -1234,7 +1234,7 @@ func (g *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopolog } // Get the current version of the machine pool. - currentVersion := *currentMPState.Object.Spec.Template.Spec.Version + currentVersion := currentMPState.Object.Spec.Template.Spec.Version // Return early if the currentVersion is already equal to the desiredVersion // no further checks required. diff --git a/exp/topology/desiredstate/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go index 299dd395c3a8..7c051aef007d 100644 --- a/exp/topology/desiredstate/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -1693,7 +1693,7 @@ func TestComputeMachineDeployment(t *testing.T) { NodeStartupTimeoutSeconds: nodeTimeoutDuration, }). WithReadinessGates(clusterClassReadinessGates). - WithFailureDomain(&clusterClassFailureDomain). + WithFailureDomain(clusterClassFailureDomain). WithNodeDrainTimeout(&clusterClassDuration). WithNodeVolumeDetachTimeout(&clusterClassDuration). WithNodeDeletionTimeout(&clusterClassDuration). @@ -1768,7 +1768,7 @@ func TestComputeMachineDeployment(t *testing.T) { Class: "linux-worker", Name: "big-pool-of-machines", Replicas: &replicas, - FailureDomain: &topologyFailureDomain, + FailureDomain: topologyFailureDomain, ReadinessGates: readinessGates, NodeDrainTimeoutSeconds: &topologyDuration, NodeVolumeDetachTimeoutSeconds: &topologyDuration, @@ -1805,7 +1805,7 @@ func TestComputeMachineDeployment(t *testing.T) { g.Expect(*actualMd.Spec.Replicas).To(Equal(replicas)) g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(topologyStrategy)) g.Expect(actualMd.Spec.Template.Spec.MinReadySeconds).To(HaveValue(Equal(topologyMinReadySeconds))) - g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain)) + g.Expect(actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain)) g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(topologyDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(topologyDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeoutSeconds).To(Equal(topologyDuration)) @@ -1863,7 +1863,7 @@ func TestComputeMachineDeployment(t *testing.T) { actualMd := actual.Object g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(clusterClassStrategy)) g.Expect(actualMd.Spec.Template.Spec.MinReadySeconds).To(HaveValue(Equal(clusterClassMinReadySeconds))) - g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(clusterClassFailureDomain)) + g.Expect(actualMd.Spec.Template.Spec.FailureDomain).To(Equal(clusterClassFailureDomain)) g.Expect(actualMd.Spec.Template.Spec.ReadinessGates).To(Equal(clusterClassReadinessGates)) g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeoutSeconds).To(Equal(clusterClassDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeoutSeconds).To(Equal(clusterClassDuration)) @@ -1932,7 +1932,7 @@ func TestComputeMachineDeployment(t *testing.T) { Replicas: ¤tReplicas, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To(version), + Version: version, Bootstrap: clusterv1.Bootstrap{ ConfigRef: contract.ObjToContractVersionedObjectReference(workerBootstrapTemplate), }, @@ -1957,7 +1957,7 @@ func TestComputeMachineDeployment(t *testing.T) { actualMd := actual.Object g.Expect(*actualMd.Spec.Replicas).NotTo(Equal(currentReplicas)) - g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain)) + g.Expect(actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain)) g.Expect(actualMd.Name).To(Equal("existing-deployment-1")) expectedAnnotations := util.MergeMap(mdTopology.Metadata.Annotations, md1.Template.Metadata.Annotations) @@ -2118,7 +2118,7 @@ func TestComputeMachineDeployment(t *testing.T) { obj, err := e.computeMachineDeployment(ctx, s, mdTopology) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) + g.Expect(obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) }) } }) @@ -2336,7 +2336,7 @@ func TestComputeMachinePool(t *testing.T) { Replicas: ¤tReplicas, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To(version), + Version: version, Bootstrap: clusterv1.Bootstrap{ ConfigRef: contract.ObjToContractVersionedObjectReference(workerBootstrapConfig), }, @@ -2519,7 +2519,7 @@ func TestComputeMachinePool(t *testing.T) { obj, err := e.computeMachinePool(ctx, s, mpTopology) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) + g.Expect(obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) }) } }) diff --git a/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go b/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go index e991a2cfb4b6..4e070ad74f53 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha3/conversion.go @@ -306,6 +306,9 @@ func (dst *KubeadmConfig) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.ConvertFrom(&src.Spec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec) + dropEmptyStringsKubeadmConfigStatus(&dst.Status) + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, dst) } @@ -372,6 +375,8 @@ func (dst *KubeadmConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.Template.Spec.ConvertFrom(&src.Spec.Template.Spec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -441,3 +446,38 @@ func Convert_v1_Condition_To_v1alpha3_Condition(in *metav1.Condition, out *clust func Convert_v1alpha3_Condition_To_v1_Condition(in *clusterv1alpha3.Condition, out *metav1.Condition, s apimachineryconversion.Scope) error { return clusterv1alpha3.Convert_v1alpha3_Condition_To_v1_Condition(in, out, s) } + +func dropEmptyStringsKubeadmConfigSpec(dst *KubeadmConfigSpec) { + for i, u := range dst.Users { + dropEmptyString(&u.Gecos) + dropEmptyString(&u.Groups) + dropEmptyString(&u.HomeDir) + dropEmptyString(&u.Shell) + dropEmptyString(&u.Passwd) + dropEmptyString(&u.PrimaryGroup) + dropEmptyString(&u.Sudo) + dst.Users[i] = u + } + + if dst.DiskSetup != nil { + for i, p := range dst.DiskSetup.Partitions { + dropEmptyString(&p.TableType) + dst.DiskSetup.Partitions[i] = p + } + for i, f := range dst.DiskSetup.Filesystems { + dropEmptyString(&f.Partition) + dropEmptyString(&f.ReplaceFS) + dst.DiskSetup.Filesystems[i] = f + } + } +} + +func dropEmptyStringsKubeadmConfigStatus(dst *KubeadmConfigStatus) { + dropEmptyString(&dst.DataSecretName) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go b/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go index 1e4f3d2347eb..923217e9a6ef 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go +++ b/internal/api/bootstrap/kubeadm/v1alpha3/conversion_test.go @@ -72,7 +72,6 @@ func KubeadmConfigFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { func KubeadmConfigTemplateFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ spokeKubeadmConfigSpec, - spokeKubeadmConfigStatus, spokeDNS, spokeClusterConfiguration, hubBootstrapTokenString, @@ -126,6 +125,8 @@ func spokeKubeadmConfigSpec(in *KubeadmConfigSpec, c randfill.Continue) { // Drop UseExperimentalRetryJoin as we intentionally don't preserve it. in.UseExperimentalRetryJoin = false + + dropEmptyStringsKubeadmConfigSpec(in) } func spokeKubeadmConfigStatus(obj *KubeadmConfigStatus, c randfill.Continue) { @@ -133,6 +134,8 @@ func spokeKubeadmConfigStatus(obj *KubeadmConfigStatus, c randfill.Continue) { // KubeadmConfigStatus.BootstrapData has been removed in v1alpha4, so setting it to nil in order to avoid v1alpha3 --> --> v1alpha3 round trip errors. obj.BootstrapData = nil + + dropEmptyStringsKubeadmConfigStatus(obj) } func spokeDNS(obj *DNS, c randfill.Continue) { diff --git a/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go b/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go index f8c343aaf54e..3cd96dbc6eca 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha3/zz_generated.conversion.go @@ -680,8 +680,28 @@ func Convert_v1beta2_Discovery_To_v1alpha3_Discovery(in *v1beta2.Discovery, out } func autoConvert_v1alpha3_DiskSetup_To_v1beta2_DiskSetup(in *DiskSetup, out *v1beta2.DiskSetup, s conversion.Scope) error { - out.Partitions = *(*[]v1beta2.Partition)(unsafe.Pointer(&in.Partitions)) - out.Filesystems = *(*[]v1beta2.Filesystem)(unsafe.Pointer(&in.Filesystems)) + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = make([]v1beta2.Partition, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_Partition_To_v1beta2_Partition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Partitions = nil + } + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]v1beta2.Filesystem, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_Filesystem_To_v1beta2_Filesystem(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Filesystems = nil + } return nil } @@ -691,8 +711,28 @@ func Convert_v1alpha3_DiskSetup_To_v1beta2_DiskSetup(in *DiskSetup, out *v1beta2 } func autoConvert_v1beta2_DiskSetup_To_v1alpha3_DiskSetup(in *v1beta2.DiskSetup, out *DiskSetup, s conversion.Scope) error { - out.Partitions = *(*[]Partition)(unsafe.Pointer(&in.Partitions)) - out.Filesystems = *(*[]Filesystem)(unsafe.Pointer(&in.Filesystems)) + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = make([]Partition, len(*in)) + for i := range *in { + if err := Convert_v1beta2_Partition_To_v1alpha3_Partition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Partitions = nil + } + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]Filesystem, len(*in)) + for i := range *in { + if err := Convert_v1beta2_Filesystem_To_v1alpha3_Filesystem(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Filesystems = nil + } return nil } @@ -835,9 +875,13 @@ func autoConvert_v1alpha3_Filesystem_To_v1beta2_Filesystem(in *Filesystem, out * out.Device = in.Device out.Filesystem = in.Filesystem out.Label = in.Label - out.Partition = (*string)(unsafe.Pointer(in.Partition)) + if err := v1.Convert_Pointer_string_To_string(&in.Partition, &out.Partition, s); err != nil { + return err + } out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.ReplaceFS = (*string)(unsafe.Pointer(in.ReplaceFS)) + if err := v1.Convert_Pointer_string_To_string(&in.ReplaceFS, &out.ReplaceFS, s); err != nil { + return err + } out.ExtraOpts = *(*[]string)(unsafe.Pointer(&in.ExtraOpts)) return nil } @@ -851,9 +895,13 @@ func autoConvert_v1beta2_Filesystem_To_v1alpha3_Filesystem(in *v1beta2.Filesyste out.Device = in.Device out.Filesystem = in.Filesystem out.Label = in.Label - out.Partition = (*string)(unsafe.Pointer(in.Partition)) + if err := v1.Convert_string_To_Pointer_string(&in.Partition, &out.Partition, s); err != nil { + return err + } out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.ReplaceFS = (*string)(unsafe.Pointer(in.ReplaceFS)) + if err := v1.Convert_string_To_Pointer_string(&in.ReplaceFS, &out.ReplaceFS, s); err != nil { + return err + } out.ExtraOpts = *(*[]string)(unsafe.Pointer(&in.ExtraOpts)) return nil } @@ -1128,7 +1176,15 @@ func autoConvert_v1alpha3_KubeadmConfigSpec_To_v1beta2_KubeadmConfigSpec(in *Kub } else { out.Files = nil } - out.DiskSetup = (*v1beta2.DiskSetup)(unsafe.Pointer(in.DiskSetup)) + if in.DiskSetup != nil { + in, out := &in.DiskSetup, &out.DiskSetup + *out = new(v1beta2.DiskSetup) + if err := Convert_v1alpha3_DiskSetup_To_v1beta2_DiskSetup(*in, *out, s); err != nil { + return err + } + } else { + out.DiskSetup = nil + } out.Mounts = *(*[]v1beta2.MountPoints)(unsafe.Pointer(&in.Mounts)) out.PreKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PreKubeadmCommands)) out.PostKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PostKubeadmCommands)) @@ -1189,7 +1245,15 @@ func autoConvert_v1beta2_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec(in *v1b } else { out.Files = nil } - out.DiskSetup = (*DiskSetup)(unsafe.Pointer(in.DiskSetup)) + if in.DiskSetup != nil { + in, out := &in.DiskSetup, &out.DiskSetup + *out = new(DiskSetup) + if err := Convert_v1beta2_DiskSetup_To_v1alpha3_DiskSetup(*in, *out, s); err != nil { + return err + } + } else { + out.DiskSetup = nil + } out.Mounts = *(*[]MountPoints)(unsafe.Pointer(&in.Mounts)) // WARNING: in.BootCommands requires manual conversion: does not exist in peer-type out.PreKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PreKubeadmCommands)) @@ -1214,7 +1278,9 @@ func autoConvert_v1beta2_KubeadmConfigSpec_To_v1alpha3_KubeadmConfigSpec(in *v1b func autoConvert_v1alpha3_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in *KubeadmConfigStatus, out *v1beta2.KubeadmConfigStatus, s conversion.Scope) error { // WARNING: in.Ready requires manual conversion: does not exist in peer-type - out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) + if err := v1.Convert_Pointer_string_To_string(&in.DataSecretName, &out.DataSecretName, s); err != nil { + return err + } // WARNING: in.BootstrapData requires manual conversion: does not exist in peer-type // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type @@ -1246,7 +1312,9 @@ func autoConvert_v1beta2_KubeadmConfigStatus_To_v1alpha3_KubeadmConfigStatus(in out.Conditions = nil } // WARNING: in.Initialization requires manual conversion: does not exist in peer-type - out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) + if err := v1.Convert_string_To_Pointer_string(&in.DataSecretName, &out.DataSecretName, s); err != nil { + return err + } out.ObservedGeneration = in.ObservedGeneration // WARNING: in.Deprecated requires manual conversion: does not exist in peer-type return nil @@ -1432,7 +1500,9 @@ func autoConvert_v1alpha3_Partition_To_v1beta2_Partition(in *Partition, out *v1b out.Device = in.Device out.Layout = in.Layout out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.TableType = (*string)(unsafe.Pointer(in.TableType)) + if err := v1.Convert_Pointer_string_To_string(&in.TableType, &out.TableType, s); err != nil { + return err + } return nil } @@ -1445,7 +1515,9 @@ func autoConvert_v1beta2_Partition_To_v1alpha3_Partition(in *v1beta2.Partition, out.Device = in.Device out.Layout = in.Layout out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.TableType = (*string)(unsafe.Pointer(in.TableType)) + if err := v1.Convert_string_To_Pointer_string(&in.TableType, &out.TableType, s); err != nil { + return err + } return nil } @@ -1478,15 +1550,29 @@ func Convert_v1beta2_SecretFileSource_To_v1alpha3_SecretFileSource(in *v1beta2.S func autoConvert_v1alpha3_User_To_v1beta2_User(in *User, out *v1beta2.User, s conversion.Scope) error { out.Name = in.Name - out.Gecos = (*string)(unsafe.Pointer(in.Gecos)) - out.Groups = (*string)(unsafe.Pointer(in.Groups)) - out.HomeDir = (*string)(unsafe.Pointer(in.HomeDir)) + if err := v1.Convert_Pointer_string_To_string(&in.Gecos, &out.Gecos, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.HomeDir, &out.HomeDir, s); err != nil { + return err + } out.Inactive = (*bool)(unsafe.Pointer(in.Inactive)) - out.Shell = (*string)(unsafe.Pointer(in.Shell)) - out.Passwd = (*string)(unsafe.Pointer(in.Passwd)) - out.PrimaryGroup = (*string)(unsafe.Pointer(in.PrimaryGroup)) + if err := v1.Convert_Pointer_string_To_string(&in.Shell, &out.Shell, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.Passwd, &out.Passwd, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.PrimaryGroup, &out.PrimaryGroup, s); err != nil { + return err + } out.LockPassword = (*bool)(unsafe.Pointer(in.LockPassword)) - out.Sudo = (*string)(unsafe.Pointer(in.Sudo)) + if err := v1.Convert_Pointer_string_To_string(&in.Sudo, &out.Sudo, s); err != nil { + return err + } out.SSHAuthorizedKeys = *(*[]string)(unsafe.Pointer(&in.SSHAuthorizedKeys)) return nil } @@ -1498,16 +1584,30 @@ func Convert_v1alpha3_User_To_v1beta2_User(in *User, out *v1beta2.User, s conver func autoConvert_v1beta2_User_To_v1alpha3_User(in *v1beta2.User, out *User, s conversion.Scope) error { out.Name = in.Name - out.Gecos = (*string)(unsafe.Pointer(in.Gecos)) - out.Groups = (*string)(unsafe.Pointer(in.Groups)) - out.HomeDir = (*string)(unsafe.Pointer(in.HomeDir)) + if err := v1.Convert_string_To_Pointer_string(&in.Gecos, &out.Gecos, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.HomeDir, &out.HomeDir, s); err != nil { + return err + } out.Inactive = (*bool)(unsafe.Pointer(in.Inactive)) - out.Shell = (*string)(unsafe.Pointer(in.Shell)) - out.Passwd = (*string)(unsafe.Pointer(in.Passwd)) + if err := v1.Convert_string_To_Pointer_string(&in.Shell, &out.Shell, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.Passwd, &out.Passwd, s); err != nil { + return err + } // WARNING: in.PasswdFrom requires manual conversion: does not exist in peer-type - out.PrimaryGroup = (*string)(unsafe.Pointer(in.PrimaryGroup)) + if err := v1.Convert_string_To_Pointer_string(&in.PrimaryGroup, &out.PrimaryGroup, s); err != nil { + return err + } out.LockPassword = (*bool)(unsafe.Pointer(in.LockPassword)) - out.Sudo = (*string)(unsafe.Pointer(in.Sudo)) + if err := v1.Convert_string_To_Pointer_string(&in.Sudo, &out.Sudo, s); err != nil { + return err + } out.SSHAuthorizedKeys = *(*[]string)(unsafe.Pointer(&in.SSHAuthorizedKeys)) return nil } diff --git a/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go b/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go index 398addfb3a60..ae2e3f1223fe 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha4/conversion.go @@ -304,6 +304,9 @@ func (dst *KubeadmConfig) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.ConvertFrom(&src.Spec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec) + dropEmptyStringsKubeadmConfigStatus(&dst.Status) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -370,6 +373,8 @@ func (dst *KubeadmConfigTemplate) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.Template.Spec.ConvertFrom(&src.Spec.Template.Spec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -509,3 +514,38 @@ func Convert_v1_Condition_To_v1alpha4_Condition(in *metav1.Condition, out *clust func Convert_v1alpha4_Condition_To_v1_Condition(in *clusterv1alpha4.Condition, out *metav1.Condition, s apimachineryconversion.Scope) error { return clusterv1alpha4.Convert_v1alpha4_Condition_To_v1_Condition(in, out, s) } + +func dropEmptyStringsKubeadmConfigSpec(dst *KubeadmConfigSpec) { + for i, u := range dst.Users { + dropEmptyString(&u.Gecos) + dropEmptyString(&u.Groups) + dropEmptyString(&u.HomeDir) + dropEmptyString(&u.Shell) + dropEmptyString(&u.Passwd) + dropEmptyString(&u.PrimaryGroup) + dropEmptyString(&u.Sudo) + dst.Users[i] = u + } + + if dst.DiskSetup != nil { + for i, p := range dst.DiskSetup.Partitions { + dropEmptyString(&p.TableType) + dst.DiskSetup.Partitions[i] = p + } + for i, f := range dst.DiskSetup.Filesystems { + dropEmptyString(&f.Partition) + dropEmptyString(&f.ReplaceFS) + dst.DiskSetup.Filesystems[i] = f + } + } +} + +func dropEmptyStringsKubeadmConfigStatus(dst *KubeadmConfigStatus) { + dropEmptyString(&dst.DataSecretName) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go b/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go index 2605bc86449f..6943587c2cdf 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go +++ b/internal/api/bootstrap/kubeadm/v1alpha4/conversion_test.go @@ -57,6 +57,7 @@ func KubeadmConfigFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { return []interface{}{ hubKubeadmConfigStatus, spokeKubeadmConfigSpec, + spokeKubeadmConfigStatus, spokeClusterConfiguration, hubBootstrapTokenString, spokeBootstrapTokenString, @@ -122,6 +123,15 @@ func spokeKubeadmConfigSpec(in *KubeadmConfigSpec, c randfill.Continue) { // Drop UseExperimentalRetryJoin as we intentionally don't preserve it. in.UseExperimentalRetryJoin = false + + dropEmptyStringsKubeadmConfigSpec(in) +} + + +func spokeKubeadmConfigStatus(obj *KubeadmConfigStatus, c randfill.Continue) { + c.FillNoCustom(obj) + + dropEmptyStringsKubeadmConfigStatus(obj) } func spokeClusterConfiguration(in *ClusterConfiguration, c randfill.Continue) { diff --git a/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go b/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go index 7f1c007ea69c..b86f42c35ab1 100644 --- a/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go +++ b/internal/api/bootstrap/kubeadm/v1alpha4/zz_generated.conversion.go @@ -683,8 +683,28 @@ func Convert_v1beta2_Discovery_To_v1alpha4_Discovery(in *v1beta2.Discovery, out } func autoConvert_v1alpha4_DiskSetup_To_v1beta2_DiskSetup(in *DiskSetup, out *v1beta2.DiskSetup, s conversion.Scope) error { - out.Partitions = *(*[]v1beta2.Partition)(unsafe.Pointer(&in.Partitions)) - out.Filesystems = *(*[]v1beta2.Filesystem)(unsafe.Pointer(&in.Filesystems)) + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = make([]v1beta2.Partition, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_Partition_To_v1beta2_Partition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Partitions = nil + } + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]v1beta2.Filesystem, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_Filesystem_To_v1beta2_Filesystem(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Filesystems = nil + } return nil } @@ -694,8 +714,28 @@ func Convert_v1alpha4_DiskSetup_To_v1beta2_DiskSetup(in *DiskSetup, out *v1beta2 } func autoConvert_v1beta2_DiskSetup_To_v1alpha4_DiskSetup(in *v1beta2.DiskSetup, out *DiskSetup, s conversion.Scope) error { - out.Partitions = *(*[]Partition)(unsafe.Pointer(&in.Partitions)) - out.Filesystems = *(*[]Filesystem)(unsafe.Pointer(&in.Filesystems)) + if in.Partitions != nil { + in, out := &in.Partitions, &out.Partitions + *out = make([]Partition, len(*in)) + for i := range *in { + if err := Convert_v1beta2_Partition_To_v1alpha4_Partition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Partitions = nil + } + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]Filesystem, len(*in)) + for i := range *in { + if err := Convert_v1beta2_Filesystem_To_v1alpha4_Filesystem(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Filesystems = nil + } return nil } @@ -838,9 +878,13 @@ func autoConvert_v1alpha4_Filesystem_To_v1beta2_Filesystem(in *Filesystem, out * out.Device = in.Device out.Filesystem = in.Filesystem out.Label = in.Label - out.Partition = (*string)(unsafe.Pointer(in.Partition)) + if err := v1.Convert_Pointer_string_To_string(&in.Partition, &out.Partition, s); err != nil { + return err + } out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.ReplaceFS = (*string)(unsafe.Pointer(in.ReplaceFS)) + if err := v1.Convert_Pointer_string_To_string(&in.ReplaceFS, &out.ReplaceFS, s); err != nil { + return err + } out.ExtraOpts = *(*[]string)(unsafe.Pointer(&in.ExtraOpts)) return nil } @@ -854,9 +898,13 @@ func autoConvert_v1beta2_Filesystem_To_v1alpha4_Filesystem(in *v1beta2.Filesyste out.Device = in.Device out.Filesystem = in.Filesystem out.Label = in.Label - out.Partition = (*string)(unsafe.Pointer(in.Partition)) + if err := v1.Convert_string_To_Pointer_string(&in.Partition, &out.Partition, s); err != nil { + return err + } out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.ReplaceFS = (*string)(unsafe.Pointer(in.ReplaceFS)) + if err := v1.Convert_string_To_Pointer_string(&in.ReplaceFS, &out.ReplaceFS, s); err != nil { + return err + } out.ExtraOpts = *(*[]string)(unsafe.Pointer(&in.ExtraOpts)) return nil } @@ -1131,7 +1179,15 @@ func autoConvert_v1alpha4_KubeadmConfigSpec_To_v1beta2_KubeadmConfigSpec(in *Kub } else { out.Files = nil } - out.DiskSetup = (*v1beta2.DiskSetup)(unsafe.Pointer(in.DiskSetup)) + if in.DiskSetup != nil { + in, out := &in.DiskSetup, &out.DiskSetup + *out = new(v1beta2.DiskSetup) + if err := Convert_v1alpha4_DiskSetup_To_v1beta2_DiskSetup(*in, *out, s); err != nil { + return err + } + } else { + out.DiskSetup = nil + } out.Mounts = *(*[]v1beta2.MountPoints)(unsafe.Pointer(&in.Mounts)) out.PreKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PreKubeadmCommands)) out.PostKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PostKubeadmCommands)) @@ -1192,7 +1248,15 @@ func autoConvert_v1beta2_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(in *v1b } else { out.Files = nil } - out.DiskSetup = (*DiskSetup)(unsafe.Pointer(in.DiskSetup)) + if in.DiskSetup != nil { + in, out := &in.DiskSetup, &out.DiskSetup + *out = new(DiskSetup) + if err := Convert_v1beta2_DiskSetup_To_v1alpha4_DiskSetup(*in, *out, s); err != nil { + return err + } + } else { + out.DiskSetup = nil + } out.Mounts = *(*[]MountPoints)(unsafe.Pointer(&in.Mounts)) // WARNING: in.BootCommands requires manual conversion: does not exist in peer-type out.PreKubeadmCommands = *(*[]string)(unsafe.Pointer(&in.PreKubeadmCommands)) @@ -1217,7 +1281,9 @@ func autoConvert_v1beta2_KubeadmConfigSpec_To_v1alpha4_KubeadmConfigSpec(in *v1b func autoConvert_v1alpha4_KubeadmConfigStatus_To_v1beta2_KubeadmConfigStatus(in *KubeadmConfigStatus, out *v1beta2.KubeadmConfigStatus, s conversion.Scope) error { // WARNING: in.Ready requires manual conversion: does not exist in peer-type - out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) + if err := v1.Convert_Pointer_string_To_string(&in.DataSecretName, &out.DataSecretName, s); err != nil { + return err + } // WARNING: in.FailureReason requires manual conversion: does not exist in peer-type // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type out.ObservedGeneration = in.ObservedGeneration @@ -1248,7 +1314,9 @@ func autoConvert_v1beta2_KubeadmConfigStatus_To_v1alpha4_KubeadmConfigStatus(in out.Conditions = nil } // WARNING: in.Initialization requires manual conversion: does not exist in peer-type - out.DataSecretName = (*string)(unsafe.Pointer(in.DataSecretName)) + if err := v1.Convert_string_To_Pointer_string(&in.DataSecretName, &out.DataSecretName, s); err != nil { + return err + } out.ObservedGeneration = in.ObservedGeneration // WARNING: in.Deprecated requires manual conversion: does not exist in peer-type return nil @@ -1435,7 +1503,9 @@ func autoConvert_v1alpha4_Partition_To_v1beta2_Partition(in *Partition, out *v1b out.Device = in.Device out.Layout = in.Layout out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.TableType = (*string)(unsafe.Pointer(in.TableType)) + if err := v1.Convert_Pointer_string_To_string(&in.TableType, &out.TableType, s); err != nil { + return err + } return nil } @@ -1448,7 +1518,9 @@ func autoConvert_v1beta2_Partition_To_v1alpha4_Partition(in *v1beta2.Partition, out.Device = in.Device out.Layout = in.Layout out.Overwrite = (*bool)(unsafe.Pointer(in.Overwrite)) - out.TableType = (*string)(unsafe.Pointer(in.TableType)) + if err := v1.Convert_string_To_Pointer_string(&in.TableType, &out.TableType, s); err != nil { + return err + } return nil } @@ -1481,15 +1553,29 @@ func Convert_v1beta2_SecretFileSource_To_v1alpha4_SecretFileSource(in *v1beta2.S func autoConvert_v1alpha4_User_To_v1beta2_User(in *User, out *v1beta2.User, s conversion.Scope) error { out.Name = in.Name - out.Gecos = (*string)(unsafe.Pointer(in.Gecos)) - out.Groups = (*string)(unsafe.Pointer(in.Groups)) - out.HomeDir = (*string)(unsafe.Pointer(in.HomeDir)) + if err := v1.Convert_Pointer_string_To_string(&in.Gecos, &out.Gecos, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.HomeDir, &out.HomeDir, s); err != nil { + return err + } out.Inactive = (*bool)(unsafe.Pointer(in.Inactive)) - out.Shell = (*string)(unsafe.Pointer(in.Shell)) - out.Passwd = (*string)(unsafe.Pointer(in.Passwd)) - out.PrimaryGroup = (*string)(unsafe.Pointer(in.PrimaryGroup)) + if err := v1.Convert_Pointer_string_To_string(&in.Shell, &out.Shell, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.Passwd, &out.Passwd, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.PrimaryGroup, &out.PrimaryGroup, s); err != nil { + return err + } out.LockPassword = (*bool)(unsafe.Pointer(in.LockPassword)) - out.Sudo = (*string)(unsafe.Pointer(in.Sudo)) + if err := v1.Convert_Pointer_string_To_string(&in.Sudo, &out.Sudo, s); err != nil { + return err + } out.SSHAuthorizedKeys = *(*[]string)(unsafe.Pointer(&in.SSHAuthorizedKeys)) return nil } @@ -1501,16 +1587,30 @@ func Convert_v1alpha4_User_To_v1beta2_User(in *User, out *v1beta2.User, s conver func autoConvert_v1beta2_User_To_v1alpha4_User(in *v1beta2.User, out *User, s conversion.Scope) error { out.Name = in.Name - out.Gecos = (*string)(unsafe.Pointer(in.Gecos)) - out.Groups = (*string)(unsafe.Pointer(in.Groups)) - out.HomeDir = (*string)(unsafe.Pointer(in.HomeDir)) + if err := v1.Convert_string_To_Pointer_string(&in.Gecos, &out.Gecos, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.HomeDir, &out.HomeDir, s); err != nil { + return err + } out.Inactive = (*bool)(unsafe.Pointer(in.Inactive)) - out.Shell = (*string)(unsafe.Pointer(in.Shell)) - out.Passwd = (*string)(unsafe.Pointer(in.Passwd)) + if err := v1.Convert_string_To_Pointer_string(&in.Shell, &out.Shell, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.Passwd, &out.Passwd, s); err != nil { + return err + } // WARNING: in.PasswdFrom requires manual conversion: does not exist in peer-type - out.PrimaryGroup = (*string)(unsafe.Pointer(in.PrimaryGroup)) + if err := v1.Convert_string_To_Pointer_string(&in.PrimaryGroup, &out.PrimaryGroup, s); err != nil { + return err + } out.LockPassword = (*bool)(unsafe.Pointer(in.LockPassword)) - out.Sudo = (*string)(unsafe.Pointer(in.Sudo)) + if err := v1.Convert_string_To_Pointer_string(&in.Sudo, &out.Sudo, s); err != nil { + return err + } out.SSHAuthorizedKeys = *(*[]string)(unsafe.Pointer(&in.SSHAuthorizedKeys)) return nil } diff --git a/internal/api/controlplane/kubeadm/v1alpha3/conversion.go b/internal/api/controlplane/kubeadm/v1alpha3/conversion.go index 6d33cffff32b..5d7418a22d72 100644 --- a/internal/api/controlplane/kubeadm/v1alpha3/conversion.go +++ b/internal/api/controlplane/kubeadm/v1alpha3/conversion.go @@ -171,6 +171,8 @@ func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.KubeadmConfigSpec.ConvertFrom(&src.Spec.KubeadmConfigSpec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.KubeadmConfigSpec) + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, dst) } @@ -246,3 +248,34 @@ func convertToObjectReference(ref *clusterv1.ContractVersionedObjectReference, n Name: ref.Name, }, nil } + +func dropEmptyStringsKubeadmConfigSpec(dst *bootstrapv1alpha3.KubeadmConfigSpec) { + for i, u := range dst.Users { + dropEmptyString(&u.Gecos) + dropEmptyString(&u.Groups) + dropEmptyString(&u.HomeDir) + dropEmptyString(&u.Shell) + dropEmptyString(&u.Passwd) + dropEmptyString(&u.PrimaryGroup) + dropEmptyString(&u.Sudo) + dst.Users[i] = u + } + + if dst.DiskSetup != nil { + for i, p := range dst.DiskSetup.Partitions { + dropEmptyString(&p.TableType) + dst.DiskSetup.Partitions[i] = p + } + for i, f := range dst.DiskSetup.Filesystems { + dropEmptyString(&f.Partition) + dropEmptyString(&f.ReplaceFS) + dst.DiskSetup.Filesystems[i] = f + } + } +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go b/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go index dea5c7044999..79edf651014d 100644 --- a/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go +++ b/internal/api/controlplane/kubeadm/v1alpha3/conversion_test.go @@ -193,6 +193,8 @@ func spokeKubeadmConfigSpec(in *bootstrapv1alpha3.KubeadmConfigSpec, c randfill. // Drop UseExperimentalRetryJoin as we intentionally don't preserve it. in.UseExperimentalRetryJoin = false + + dropEmptyStringsKubeadmConfigSpec(in) } func spokeAPIServer(in *bootstrapv1alpha3.APIServer, c randfill.Continue) { diff --git a/internal/api/controlplane/kubeadm/v1alpha4/conversion.go b/internal/api/controlplane/kubeadm/v1alpha4/conversion.go index e4730d7a4343..a5a4ecb671d8 100644 --- a/internal/api/controlplane/kubeadm/v1alpha4/conversion.go +++ b/internal/api/controlplane/kubeadm/v1alpha4/conversion.go @@ -168,6 +168,9 @@ func (dst *KubeadmControlPlane) ConvertFrom(srcRaw conversion.Hub) error { // Convert timeouts moved from one struct to another. dst.Spec.KubeadmConfigSpec.ConvertFrom(&src.Spec.KubeadmConfigSpec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.KubeadmConfigSpec) + dropEmptyStringsKubeadmControlPlaneStatus(&dst.Status) + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, dst) } @@ -234,6 +237,8 @@ func (dst *KubeadmControlPlaneTemplate) ConvertFrom(srcRaw conversion.Hub) error // Convert timeouts moved from one struct to another. dst.Spec.Template.Spec.KubeadmConfigSpec.ConvertFrom(&src.Spec.Template.Spec.KubeadmConfigSpec) + dropEmptyStringsKubeadmConfigSpec(&dst.Spec.Template.Spec.KubeadmConfigSpec) + // Preserve Hub data on down-conversion except for metadata. return utilconversion.MarshalData(src, dst) } @@ -389,3 +394,38 @@ func convertToObjectReference(ref *clusterv1.ContractVersionedObjectReference, n Name: ref.Name, }, nil } + +func dropEmptyStringsKubeadmConfigSpec(dst *bootstrapv1alpha4.KubeadmConfigSpec) { + for i, u := range dst.Users { + dropEmptyString(&u.Gecos) + dropEmptyString(&u.Groups) + dropEmptyString(&u.HomeDir) + dropEmptyString(&u.Shell) + dropEmptyString(&u.Passwd) + dropEmptyString(&u.PrimaryGroup) + dropEmptyString(&u.Sudo) + dst.Users[i] = u + } + + if dst.DiskSetup != nil { + for i, p := range dst.DiskSetup.Partitions { + dropEmptyString(&p.TableType) + dst.DiskSetup.Partitions[i] = p + } + for i, f := range dst.DiskSetup.Filesystems { + dropEmptyString(&f.Partition) + dropEmptyString(&f.ReplaceFS) + dst.DiskSetup.Filesystems[i] = f + } + } +} + +func dropEmptyStringsKubeadmControlPlaneStatus(dst *KubeadmControlPlaneStatus) { + dropEmptyString(&dst.Version) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go b/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go index ea60c13292cf..20ae6cbab84d 100644 --- a/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go +++ b/internal/api/controlplane/kubeadm/v1alpha4/conversion_test.go @@ -154,6 +154,8 @@ func spokeKubeadmControlPlaneStatus(in *KubeadmControlPlaneStatus, c randfill.Co // Make sure ready is consistent with ready replicas, so we can rebuild the info after the round trip. in.Ready = in.ReadyReplicas > 0 + + dropEmptyStringsKubeadmControlPlaneStatus(in) } func hubBootstrapTokenString(in *bootstrapv1.BootstrapTokenString, _ randfill.Continue) { @@ -211,6 +213,8 @@ func spokeKubeadmConfigSpec(in *bootstrapv1alpha4.KubeadmConfigSpec, c randfill. // Drop UseExperimentalRetryJoin as we intentionally don't preserve it. in.UseExperimentalRetryJoin = false + + dropEmptyStringsKubeadmConfigSpec(in) } func spokeClusterConfiguration(in *bootstrapv1alpha4.ClusterConfiguration, c randfill.Continue) { diff --git a/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go b/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go index d117a6eb9d95..6ad0f4c4e376 100644 --- a/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go +++ b/internal/api/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go @@ -336,7 +336,9 @@ func autoConvert_v1alpha4_KubeadmControlPlaneStatus_To_v1beta2_KubeadmControlPla if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) + if err := v1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil { + return err + } // WARNING: in.UpdatedReplicas requires manual conversion: does not exist in peer-type if err := v1.Convert_int32_To_Pointer_int32(&in.ReadyReplicas, &out.ReadyReplicas, s); err != nil { return err @@ -383,7 +385,9 @@ func autoConvert_v1beta2_KubeadmControlPlaneStatus_To_v1alpha4_KubeadmControlPla } // WARNING: in.AvailableReplicas requires manual conversion: does not exist in peer-type // WARNING: in.UpToDateReplicas requires manual conversion: does not exist in peer-type - out.Version = (*string)(unsafe.Pointer(in.Version)) + if err := v1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil { + return err + } out.ObservedGeneration = in.ObservedGeneration // WARNING: in.LastRemediation requires manual conversion: does not exist in peer-type // WARNING: in.Deprecated requires manual conversion: does not exist in peer-type diff --git a/internal/api/core/v1alpha3/conversion.go b/internal/api/core/v1alpha3/conversion.go index 915d26438d6e..1f95ad869fea 100644 --- a/internal/api/core/v1alpha3/conversion.go +++ b/internal/api/core/v1alpha3/conversion.go @@ -293,6 +293,8 @@ func (dst *Machine) ConvertFrom(srcRaw conversion.Hub) error { dst.Status.InfrastructureReady = ptr.Deref(src.Status.Initialization.InfrastructureProvisioned, false) } + dropEmptyStringsMachineSpec(&dst.Spec) + // Preserve Hub data on down-conversion except for metadata if err := utilconversion.MarshalData(src, dst); err != nil { return err @@ -382,6 +384,8 @@ func (dst *MachineSet) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = ptr.Deref(src.Spec.Template.Spec.MinReadySeconds, 0) + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata if err := utilconversion.MarshalData(src, dst); err != nil { return err @@ -487,6 +491,8 @@ func (dst *MachineDeployment) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = src.Spec.Template.Spec.MinReadySeconds + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata if err := utilconversion.MarshalData(src, dst); err != nil { return err @@ -519,7 +525,7 @@ func (src *MachineHealthCheck) ConvertTo(dstRaw conversion.Hub) error { return err } - if restored.Spec.UnhealthyRange != nil { + if restored.Spec.UnhealthyRange != "" { dst.Spec.UnhealthyRange = restored.Spec.UnhealthyRange } dst.Status.Conditions = restored.Status.Conditions @@ -663,6 +669,8 @@ func (dst *MachinePool) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = src.Spec.Template.Spec.MinReadySeconds + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + return utilconversion.MarshalData(src, dst) } @@ -967,3 +975,15 @@ func convertToObjectReference(ref *clusterv1.ContractVersionedObjectReference, n Name: ref.Name, }, nil } + +func dropEmptyStringsMachineSpec(spec *MachineSpec) { + dropEmptyString(&spec.Version) + dropEmptyString(&spec.ProviderID) + dropEmptyString(&spec.FailureDomain) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/internal/api/core/v1alpha3/conversion_test.go b/internal/api/core/v1alpha3/conversion_test.go index 17b897734d30..80e85ad13d2e 100644 --- a/internal/api/core/v1alpha3/conversion_test.go +++ b/internal/api/core/v1alpha3/conversion_test.go @@ -140,6 +140,8 @@ func spokeMachine(in *Machine, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec) } func fillMachineSpec(spec *MachineSpec, c randfill.Continue, namespace string) { @@ -218,6 +220,8 @@ func spokeMachineSet(in *MachineSet, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func MachineDeploymentFuzzFunc(_ runtimeserializer.CodecFactory) []interface{} { @@ -252,6 +256,8 @@ func spokeMachineDeployment(in *MachineDeployment, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func spokeMachineDeploymentSpec(in *MachineDeploymentSpec, c randfill.Continue) { @@ -475,6 +481,8 @@ func spokeMachinePool(in *MachinePool, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func spokeMachinePoolSpec(in *MachinePoolSpec, c randfill.Continue) { diff --git a/internal/api/core/v1alpha3/zz_generated.conversion.go b/internal/api/core/v1alpha3/zz_generated.conversion.go index 9b67fd37ae8c..4e1c0caf14b5 100644 --- a/internal/api/core/v1alpha3/zz_generated.conversion.go +++ b/internal/api/core/v1alpha3/zz_generated.conversion.go @@ -1451,9 +1451,15 @@ func autoConvert_v1alpha3_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, ou if err := v1beta1.Convert_v1_ObjectReference_To_v1beta2_ContractVersionedObjectReference(&in.InfrastructureRef, &out.InfrastructureRef, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) - out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.ProviderID, &out.ProviderID, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type return nil } @@ -1466,9 +1472,15 @@ func autoConvert_v1beta2_MachineSpec_To_v1alpha3_MachineSpec(in *v1beta2.Machine if err := v1beta1.Convert_v1beta2_ContractVersionedObjectReference_To_v1_ObjectReference(&in.InfrastructureRef, &out.InfrastructureRef, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) - out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.ProviderID, &out.ProviderID, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type diff --git a/internal/api/core/v1alpha4/conversion.go b/internal/api/core/v1alpha4/conversion.go index 1e85171336a8..eacc895314a9 100644 --- a/internal/api/core/v1alpha4/conversion.go +++ b/internal/api/core/v1alpha4/conversion.go @@ -381,6 +381,8 @@ func (dst *Machine) ConvertFrom(srcRaw conversion.Hub) error { dst.Status.InfrastructureReady = ptr.Deref(src.Status.Initialization.InfrastructureProvisioned, false) } + dropEmptyStringsMachineSpec(&dst.Spec) + // Preserve Hub data on down-conversion except for metadata if err := utilconversion.MarshalData(src, dst); err != nil { return err @@ -479,6 +481,8 @@ func (dst *MachineSet) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = ptr.Deref(src.Spec.Template.Spec.MinReadySeconds, 0) + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, dst) } @@ -582,6 +586,8 @@ func (dst *MachineDeployment) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = src.Spec.Template.Spec.MinReadySeconds + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, dst) } @@ -638,6 +644,8 @@ func (dst *MachineHealthCheck) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.RemediationTemplate.Namespace = src.Namespace } + dropEmptyStringsMachineHealthCheck(dst) + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, dst) } @@ -746,6 +754,8 @@ func (dst *MachinePool) ConvertFrom(srcRaw conversion.Hub) error { dst.Spec.MinReadySeconds = src.Spec.Template.Spec.MinReadySeconds + dropEmptyStringsMachineSpec(&dst.Spec.Template.Spec) + // Preserve Hub data on down-conversion except for metadata return utilconversion.MarshalData(src, dst) } @@ -1144,3 +1154,19 @@ func convertToObjectReference(ref *clusterv1.ContractVersionedObjectReference, n Name: ref.Name, }, nil } + +func dropEmptyStringsMachineSpec(spec *MachineSpec) { + dropEmptyString(&spec.Version) + dropEmptyString(&spec.ProviderID) + dropEmptyString(&spec.FailureDomain) +} + +func dropEmptyStringsMachineHealthCheck(dst *MachineHealthCheck) { + dropEmptyString(&dst.Spec.UnhealthyRange) +} + +func dropEmptyString(s **string) { + if *s != nil && **s == "" { + *s = nil + } +} diff --git a/internal/api/core/v1alpha4/conversion_test.go b/internal/api/core/v1alpha4/conversion_test.go index 7236bc4828b5..2d2dba931f5c 100644 --- a/internal/api/core/v1alpha4/conversion_test.go +++ b/internal/api/core/v1alpha4/conversion_test.go @@ -138,6 +138,8 @@ func spokeMachine(in *Machine, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec) } func fillMachineSpec(spec *MachineSpec, c randfill.Continue, namespace string) { @@ -403,6 +405,8 @@ func spokeMachineSet(in *MachineSet, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func MachineDeploymentFuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { @@ -435,6 +439,8 @@ func spokeMachineDeployment(in *MachineDeployment, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func spokeMachineDeploymentSpec(in *MachineDeploymentSpec, c randfill.Continue) { @@ -471,6 +477,8 @@ func spokeMachineHealthCheck(in *MachineHealthCheck, c randfill.Continue) { c.FillNoCustom(in) in.Namespace = "foo" + + dropEmptyStringsMachineHealthCheck(in) } func spokeObjectReference(in *corev1.ObjectReference, c randfill.Continue) { @@ -523,6 +531,8 @@ func spokeMachinePool(in *MachinePool, c randfill.Continue) { c.FillNoCustom(in) fillMachineSpec(&in.Spec.Template.Spec, c, in.Namespace) + + dropEmptyStringsMachineSpec(&in.Spec.Template.Spec) } func spokeMachineHealthCheckSpec(in *MachineHealthCheckSpec, c randfill.Continue) { diff --git a/internal/api/core/v1alpha4/zz_generated.conversion.go b/internal/api/core/v1alpha4/zz_generated.conversion.go index 3c66c9f509b2..c7bf882f67b7 100644 --- a/internal/api/core/v1alpha4/zz_generated.conversion.go +++ b/internal/api/core/v1alpha4/zz_generated.conversion.go @@ -1436,7 +1436,9 @@ func autoConvert_v1alpha4_MachineHealthCheckSpec_To_v1beta2_MachineHealthCheckSp out.Selector = in.Selector // WARNING: in.UnhealthyConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) - out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) + if err := v1.Convert_Pointer_string_To_string(&in.UnhealthyRange, &out.UnhealthyRange, s); err != nil { + return err + } // WARNING: in.NodeStartupTimeout requires manual conversion: does not exist in peer-type if in.RemediationTemplate != nil { in, out := &in.RemediationTemplate, &out.RemediationTemplate @@ -1455,7 +1457,9 @@ func autoConvert_v1beta2_MachineHealthCheckSpec_To_v1alpha4_MachineHealthCheckSp out.Selector = in.Selector // WARNING: in.UnhealthyNodeConditions requires manual conversion: does not exist in peer-type out.MaxUnhealthy = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnhealthy)) - out.UnhealthyRange = (*string)(unsafe.Pointer(in.UnhealthyRange)) + if err := v1.Convert_string_To_Pointer_string(&in.UnhealthyRange, &out.UnhealthyRange, s); err != nil { + return err + } // WARNING: in.NodeStartupTimeoutSeconds requires manual conversion: does not exist in peer-type if in.RemediationTemplate != nil { in, out := &in.RemediationTemplate, &out.RemediationTemplate @@ -1908,9 +1912,15 @@ func autoConvert_v1alpha4_MachineSpec_To_v1beta2_MachineSpec(in *MachineSpec, ou if err := v1beta1.Convert_v1_ObjectReference_To_v1beta2_ContractVersionedObjectReference(&in.InfrastructureRef, &out.InfrastructureRef, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) - out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_Pointer_string_To_string(&in.Version, &out.Version, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.ProviderID, &out.ProviderID, s); err != nil { + return err + } + if err := v1.Convert_Pointer_string_To_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type return nil } @@ -1923,9 +1933,15 @@ func autoConvert_v1beta2_MachineSpec_To_v1alpha4_MachineSpec(in *v1beta2.Machine if err := v1beta1.Convert_v1beta2_ContractVersionedObjectReference_To_v1_ObjectReference(&in.InfrastructureRef, &out.InfrastructureRef, s); err != nil { return err } - out.Version = (*string)(unsafe.Pointer(in.Version)) - out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) - out.FailureDomain = (*string)(unsafe.Pointer(in.FailureDomain)) + if err := v1.Convert_string_To_Pointer_string(&in.Version, &out.Version, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.ProviderID, &out.ProviderID, s); err != nil { + return err + } + if err := v1.Convert_string_To_Pointer_string(&in.FailureDomain, &out.FailureDomain, s); err != nil { + return err + } // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.NodeDrainTimeoutSeconds requires manual conversion: does not exist in peer-type diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index 0709ecf39536..40508042777e 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -405,7 +405,7 @@ func TestClusterReconciler(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: cluster.Name, - ProviderID: ptr.To("aws:///id-node-1"), + ProviderID: "aws:///id-node-1", InfrastructureRef: clusterv1.ContractVersionedObjectReference{ APIGroup: builder.InfrastructureGroupVersion.Group, Kind: builder.TestInfrastructureMachineKind, diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index e28bdf65d260..4e39fd9cba94 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -309,7 +309,7 @@ func (r *Reconciler) reconcileVariables(ctx context.Context, s *scope) (ctrl.Res // to the ClusterClass status. if feature.Gates.Enabled(feature.RuntimeSDK) { for _, patch := range clusterClass.Spec.Patches { - if patch.External == nil || patch.External.DiscoverVariablesExtension == nil { + if patch.External == nil || patch.External.DiscoverVariablesExtension == "" { continue } req := &runtimehooksv1.DiscoverVariablesRequest{} @@ -320,7 +320,7 @@ func (r *Reconciler) reconcileVariables(ctx context.Context, s *scope) (ctrl.Res // This also mitigates spikes when ClusterClass re-syncs happen or when changes to the ExtensionConfig are applied. // DiscoverVariables is expected to return a "static" response and usually there are few ExtensionConfigs in a mgmt cluster. resp := &runtimehooksv1.DiscoverVariablesResponse{} - err := r.RuntimeClient.CallExtension(ctx, runtimehooksv1.DiscoverVariables, clusterClass, *patch.External.DiscoverVariablesExtension, req, resp, + err := r.RuntimeClient.CallExtension(ctx, runtimehooksv1.DiscoverVariables, clusterClass, patch.External.DiscoverVariablesExtension, req, resp, runtimeclient.WithCaching{Cache: r.discoverVariablesCache, CacheKeyFunc: cacheKeyFunc}) if err != nil { errs = append(errs, errors.Wrapf(err, "failed to call DiscoverVariables for patch %s", patch.Name)) @@ -538,8 +538,8 @@ func (r *Reconciler) extensionConfigToClusterClass(ctx context.Context, o client continue } for _, patch := range clusterClass.Spec.Patches { - if patch.External != nil && patch.External.DiscoverVariablesExtension != nil { - extName, err := internalruntimeclient.ExtensionNameFromHandlerName(*patch.External.DiscoverVariablesExtension) + if patch.External != nil && patch.External.DiscoverVariablesExtension != "" { + extName, err := internalruntimeclient.ExtensionNameFromHandlerName(patch.External.DiscoverVariablesExtension) if err != nil { log.Error(err, "failed to reconcile ClusterClass for ExtensionConfig") continue diff --git a/internal/controllers/clusterclass/clusterclass_controller_test.go b/internal/controllers/clusterclass/clusterclass_controller_test.go index 5df145043eb0..bda9c261491a 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_test.go +++ b/internal/controllers/clusterclass/clusterclass_controller_test.go @@ -537,7 +537,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: ptr.To("variables-one"), + DiscoverVariablesExtension: "variables-one", }, }, }). @@ -749,7 +749,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: ptr.To("variables-one"), + DiscoverVariablesExtension: "variables-one", }, }, }). @@ -781,7 +781,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: ptr.To("variables-one"), + DiscoverVariablesExtension: "variables-one", }, }, }). @@ -951,7 +951,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: ptr.To("variables-one"), + DiscoverVariablesExtension: "variables-one", }, }, }). @@ -991,7 +991,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: ptr.To("variables-one"), + DiscoverVariablesExtension: "variables-one", }, }, }). @@ -1036,7 +1036,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: ptr.To("variables-one"), + DiscoverVariablesExtension: "variables-one", }, }, }). @@ -1105,7 +1105,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: ptr.To("variables-one"), + DiscoverVariablesExtension: "variables-one", }, }, }). @@ -1224,20 +1224,20 @@ func TestReconciler_extensionConfigToClusterClass(t *testing.T) { // These ClusterClasses will be reconciled as they both reference the passed ExtensionConfig `runtime1`. onePatchClusterClass := builder.ClusterClass(metav1.NamespaceDefault, "cc1"). WithPatches([]clusterv1.ClusterClassPatch{ - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.runtime1")}}, + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: "discover-variables.runtime1"}}, }). Build() twoPatchClusterClass := builder.ClusterClass(metav1.NamespaceDefault, "cc2"). WithPatches([]clusterv1.ClusterClassPatch{ - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.runtime1")}}, - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.runtime2")}}, + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: "discover-variables.runtime1"}}, + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: "discover-variables.runtime2"}}, }). Build() // This ClusterClasses will not be reconciled as it does not reference the passed ExtensionConfig `runtime1`. notReconciledClusterClass := builder.ClusterClass(metav1.NamespaceDefault, "cc3"). WithPatches([]clusterv1.ClusterClassPatch{ - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.other-runtime-class")}}, + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: "discover-variables.other-runtime-class"}}, }). Build() diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index f4155b5184e8..0577a96455bc 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -300,7 +300,7 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.DrainingSucceededV1Beta1Condition, ), - v1beta1conditions.WithStepCounterIf(machine.DeletionTimestamp.IsZero() && machine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIf(machine.DeletionTimestamp.IsZero() && machine.Spec.ProviderID == ""), v1beta1conditions.WithStepCounterIfOnly( clusterv1.BootstrapReadyV1Beta1Condition, clusterv1.InfrastructureReadyV1Beta1Condition, @@ -720,8 +720,8 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1 } var providerID string - if machine.Spec.ProviderID != nil { - providerID = *machine.Spec.ProviderID + if machine.Spec.ProviderID != "" { + providerID = machine.Spec.ProviderID } else if infraMachine != nil { // Fallback to retrieve from infraMachine. if providerIDFromInfraMachine, err := contract.InfrastructureMachine().ProviderID().Get(infraMachine); err == nil { diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 92ab6cf5589a..0c9bd9ea5784 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -68,7 +68,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, } // Check that the Machine has a valid ProviderID. - if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { + if machine.Spec.ProviderID == "" { log.Info("Waiting for infrastructure provider to report spec.providerID", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name)) v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyV1Beta1Condition, clusterv1.WaitingForNodeRefV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil @@ -81,7 +81,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, } // Even if Status.NodeRef exists, continue to do the following checks to make sure Node is healthy - node, err := r.getNode(ctx, remoteClient, *machine.Spec.ProviderID) + node, err := r.getNode(ctx, remoteClient, machine.Spec.ProviderID) if err != nil { if err == ErrNodeNotFound { if !s.machine.DeletionTimestamp.IsZero() { @@ -96,7 +96,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, return ctrl.Result{}, errors.Wrapf(err, "no matching Node for Machine %q in namespace %q", machine.Name, machine.Namespace) } v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyV1Beta1Condition, clusterv1.NodeProvisioningV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Waiting for a node with matching ProviderID to exist") - log.Info("Infrastructure provider reporting spec.providerID, matching Kubernetes node is not yet available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", *machine.Spec.ProviderID) + log.Info("Infrastructure provider reporting spec.providerID, matching Kubernetes node is not yet available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", machine.Spec.ProviderID) // No need to requeue here. Nodes emit an event that triggers reconciliation. return ctrl.Result{}, nil } @@ -112,7 +112,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, machine.Status.NodeRef = &clusterv1.MachineNodeReference{ Name: s.node.Name, } - log.Info("Infrastructure provider reporting spec.providerID, Kubernetes node is now available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", *machine.Spec.ProviderID, "Node", klog.KRef("", machine.Status.NodeRef.Name)) + log.Info("Infrastructure provider reporting spec.providerID, Kubernetes node is now available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", machine.Spec.ProviderID, "Node", klog.KRef("", machine.Status.NodeRef.Name)) r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulSetNodeRef", machine.Status.NodeRef.Name) } diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index b200c8ad7c40..a0d2b02087c8 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -55,7 +55,7 @@ func TestReconcileNode(t *testing.T) { }, }, Spec: clusterv1.MachineSpec{ - ProviderID: ptr.To("aws://us-east-1/test-node-1"), + ProviderID: "aws://us-east-1/test-node-1", }, } @@ -148,7 +148,7 @@ func TestReconcileNode(t *testing.T) { }, }, Spec: clusterv1.MachineSpec{ - ProviderID: ptr.To("aws://us-east-1/test-node-1"), + ProviderID: "aws://us-east-1/test-node-1", }, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{ @@ -174,7 +174,7 @@ func TestReconcileNode(t *testing.T) { Finalizers: []string{"foo"}, }, Spec: clusterv1.MachineSpec{ - ProviderID: ptr.To("aws://us-east-1/test-node-1"), + ProviderID: "aws://us-east-1/test-node-1", }, Status: clusterv1.MachineStatus{ NodeRef: &clusterv1.MachineNodeReference{ @@ -447,7 +447,7 @@ func TestNodeLabelSync(t *testing.T) { machine := defaultMachine.DeepCopy() machine.Namespace = ns.Name - machine.Spec.ProviderID = ptr.To(nodeProviderID) + machine.Spec.ProviderID = nodeProviderID // Set Machine labels. machine.Labels = map[string]string{} diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index 1f52ba8cc5da..fa898fc45f92 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -351,13 +351,13 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr return ctrl.Result{}, errors.Wrapf(err, "failed to read failureDomain from %s %s", s.infraMachine.GetKind(), klog.KObj(s.infraMachine)) default: - m.Spec.FailureDomain = failureDomain + m.Spec.FailureDomain = ptr.Deref(failureDomain, "") } // When we hit this point providerID is set, and either: // - the infra machine is reporting provisioned for the first time // - the infra machine already reported provisioned (and thus m.Status.InfrastructureReady is already true and it should not flip back) - m.Spec.ProviderID = providerID + m.Spec.ProviderID = *providerID if m.Status.Initialization == nil { m.Status.Initialization = &clusterv1.MachineInitializationStatus{} } diff --git a/internal/controllers/machine/machine_controller_phases_test.go b/internal/controllers/machine/machine_controller_phases_test.go index 2c75641cb495..a65fa3926965 100644 --- a/internal/controllers/machine/machine_controller_phases_test.go +++ b/internal/controllers/machine/machine_controller_phases_test.go @@ -468,8 +468,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeFalse()) - g.Expect(m.Spec.ProviderID).To(BeNil()) - g.Expect(m.Spec.FailureDomain).To(BeNil()) + g.Expect(m.Spec.ProviderID).To(BeEmpty()) + g.Expect(m.Spec.FailureDomain).To(BeEmpty()) g.Expect(m.Status.Addresses).To(BeNil()) }, }, @@ -496,8 +496,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(m.Spec.FailureDomain).To(BeNil()) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(BeEmpty()) g.Expect(m.Status.Addresses).To(BeNil()) }, }, @@ -526,8 +526,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(m.Spec.FailureDomain).To(BeNil()) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(BeEmpty()) g.Expect(m.Status.Addresses).To(BeNil()) }, }, @@ -555,8 +555,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(ptr.Deref(m.Spec.FailureDomain, "")).To(Equal("foo")) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(Equal("foo")) g.Expect(m.Status.Addresses).To(BeNil()) }, }, @@ -593,8 +593,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(m.Spec.FailureDomain).To(BeNil()) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(BeEmpty()) g.Expect(m.Status.Addresses).To(HaveLen(2)) }, }, @@ -632,8 +632,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(ptr.Deref(m.Spec.FailureDomain, "")).To(Equal("foo")) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(Equal("foo")) g.Expect(m.Status.Addresses).To(HaveLen(2)) }, }, @@ -674,8 +674,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(ptr.Deref(m.Spec.FailureDomain, "")).To(Equal("foo")) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(Equal("foo")) g.Expect(m.Status.Addresses).To(HaveLen(2)) }, }, @@ -713,8 +713,8 @@ func TestReconcileInfrastructure(t *testing.T) { Kind: "GenericInfrastructureMachine", Name: "infra-config1", }, - ProviderID: ptr.To("test://something"), - FailureDomain: ptr.To("something"), + ProviderID: "test://something", + FailureDomain: "something", }, Status: clusterv1.MachineStatus{ Initialization: &clusterv1.MachineInitializationStatus{ @@ -758,8 +758,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(ptr.Deref(m.Spec.FailureDomain, "")).To(Equal("foo")) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(Equal("foo")) g.Expect(m.Status.Addresses).To(HaveLen(2)) }, }, @@ -777,8 +777,8 @@ func TestReconcileInfrastructure(t *testing.T) { Kind: "GenericInfrastructureMachine", Name: "infra-config1", }, - ProviderID: ptr.To("test://something"), - FailureDomain: ptr.To("something"), + ProviderID: "test://something", + FailureDomain: "something", }, Status: clusterv1.MachineStatus{ Initialization: &clusterv1.MachineInitializationStatus{ @@ -822,8 +822,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.Initialization != nil && ptr.Deref(m.Status.Initialization.InfrastructureProvisioned, false)).To(BeTrue()) - g.Expect(ptr.Deref(m.Spec.ProviderID, "")).To(Equal("test://id-1")) - g.Expect(ptr.Deref(m.Spec.FailureDomain, "")).To(Equal("foo")) + g.Expect(m.Spec.ProviderID).To(Equal("test://id-1")) + g.Expect(m.Spec.FailureDomain).To(Equal("foo")) g.Expect(m.Status.Addresses).To(HaveLen(2)) }, }, diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index 794aefd0f3d9..84f78674201f 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -382,10 +382,10 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl // If the machine is at the end of the provisioning phase, with ProviderID set, but still waiting // for a matching Node to exists, surface this. - if ptr.Deref(machine.Spec.ProviderID, "") != "" { + if machine.Spec.ProviderID != "" { setNodeConditions(machine, metav1.ConditionUnknown, clusterv1.MachineNodeInspectionFailedReason, - fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", *machine.Spec.ProviderID)) + fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", machine.Spec.ProviderID)) return } @@ -794,7 +794,7 @@ func setMachinePhaseAndLastUpdated(_ context.Context, m *clusterv1.Machine) { } // Set the phase to "provisioned" if there is a provider ID. - if m.Spec.ProviderID != nil { + if m.Spec.ProviderID != "" { m.Status.SetTypedPhase(clusterv1.MachinePhaseProvisioned) } diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 87c9098c40aa..bf785c2e5d95 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -887,7 +887,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { cluster: defaultCluster, machine: func() *clusterv1.Machine { m := defaultMachine.DeepCopy() - m.Spec.ProviderID = ptr.To("foo://test-node-1") + m.Spec.ProviderID = "foo://test-node-1" return m }(), node: nil, @@ -2190,7 +2190,7 @@ func TestReconcileMachinePhases(t *testing.T) { return false } g.Expect(machine.Status.Addresses).To(HaveLen(2)) - g.Expect(*machine.Spec.FailureDomain).To(Equal("us-east-2a")) + g.Expect(machine.Spec.FailureDomain).To(Equal("us-east-2a")) g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseRunning)) // Verify that the LastUpdated timestamp was updated g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) @@ -2376,7 +2376,7 @@ func TestReconcileMachinePhases(t *testing.T) { machine := defaultMachine.DeepCopy() machine.Namespace = ns.Name // Set Machine ProviderID. - machine.Spec.ProviderID = ptr.To(nodeProviderID) + machine.Spec.ProviderID = nodeProviderID g.Expect(env.Create(ctx, cluster)).To(Succeed()) defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster)) diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 1151acdd44d6..c932081aa141 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1052,7 +1052,7 @@ func TestMachineV1Beta1Conditions(t *testing.T) { Finalizers: []string{clusterv1.MachineFinalizer}, }, Spec: clusterv1.MachineSpec{ - ProviderID: ptr.To("test://id-1"), + ProviderID: "test://id-1", ClusterName: "test-cluster", InfrastructureRef: clusterv1.ContractVersionedObjectReference{ APIGroup: clusterv1.GroupVersionInfrastructure.Group, @@ -3515,7 +3515,7 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { Name: "infra-config1", }, Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, - ProviderID: ptr.To("test://id-1"), + ProviderID: "test://id-1", }, } diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 029b9fe93ece..ef58b616cce0 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -127,7 +127,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: testCluster.Name, - Version: &version, + Version: version, InfrastructureRef: clusterv1.ContractVersionedObjectReference{ APIGroup: clusterv1.GroupVersionInfrastructure.Group, Kind: "GenericInfrastructureMachineTemplate", @@ -224,7 +224,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { t.Log("Verify MachineSet has expected replicas and version") firstMachineSet := machineSets.Items[0] g.Expect(*firstMachineSet.Spec.Replicas).To(BeEquivalentTo(2)) - g.Expect(*firstMachineSet.Spec.Template.Spec.Version).To(BeEquivalentTo("v1.10.3")) + g.Expect(firstMachineSet.Spec.Template.Spec.Version).To(BeEquivalentTo("v1.10.3")) t.Log("Verify MachineSet has expected ClusterNameLabel and MachineDeploymentNameLabel") g.Expect(firstMachineSet.Labels[clusterv1.ClusterNameLabel]).To(Equal(testCluster.Name)) @@ -550,7 +550,7 @@ func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testi }, Spec: clusterv1.MachineSpec{ ClusterName: testCluster.Name, - Version: &version, + Version: version, InfrastructureRef: clusterv1.ContractVersionedObjectReference{ APIGroup: clusterv1.GroupVersionInfrastructure.Group, Kind: "GenericInfrastructureMachineTemplate", @@ -628,7 +628,7 @@ func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testi Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To("data-secret-name"), }, - Version: &version, + Version: version, }, }, }, diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index e9c5d2e15a3b..0aa6041b45a4 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -560,7 +560,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { Annotations: map[string]string{"machine-annotation1": "machine-value1"}, }, Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.25.3"), + Version: "v1.25.3", InfrastructureRef: infraRef, Bootstrap: clusterv1.Bootstrap{ ConfigRef: &bootstrapRef, diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 9eb0194c9303..6e41bb04112e 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -382,10 +382,10 @@ func MachineTemplateUpToDate(current, desired *clusterv1.MachineTemplateSpec) (u currentCopy := MachineTemplateDeepCopyRolloutFields(current) desiredCopy := MachineTemplateDeepCopyRolloutFields(desired) - if !reflect.DeepEqual(currentCopy.Spec.Version, desiredCopy.Spec.Version) { - logMessages = append(logMessages, fmt.Sprintf("spec.version %s, %s required", ptr.Deref(currentCopy.Spec.Version, "nil"), ptr.Deref(desiredCopy.Spec.Version, "nil"))) + if currentCopy.Spec.Version != desiredCopy.Spec.Version { + logMessages = append(logMessages, fmt.Sprintf("spec.version %s, %s required", currentCopy.Spec.Version, desiredCopy.Spec.Version)) // Note: the code computing the message for MachineDeployment's RolloutOut condition is making assumptions on the format/content of this message. - conditionMessages = append(conditionMessages, fmt.Sprintf("Version %s, %s required", ptr.Deref(currentCopy.Spec.Version, "nil"), ptr.Deref(desiredCopy.Spec.Version, "nil"))) + conditionMessages = append(conditionMessages, fmt.Sprintf("Version %s, %s required", currentCopy.Spec.Version, desiredCopy.Spec.Version)) } // Note: we return a message based on desired.bootstrap.ConfigRef != nil, but we always compare the entire bootstrap @@ -411,9 +411,9 @@ func MachineTemplateUpToDate(current, desired *clusterv1.MachineTemplateSpec) (u conditionMessages = append(conditionMessages, fmt.Sprintf("%s is not up-to-date", strings.TrimSuffix(currentCopy.Spec.InfrastructureRef.Kind, clusterv1.TemplateSuffix))) } - if !reflect.DeepEqual(currentCopy.Spec.FailureDomain, desiredCopy.Spec.FailureDomain) { - logMessages = append(logMessages, fmt.Sprintf("spec.failureDomain %s, %s required", ptr.Deref(currentCopy.Spec.FailureDomain, "nil"), ptr.Deref(desiredCopy.Spec.FailureDomain, "nil"))) - conditionMessages = append(conditionMessages, fmt.Sprintf("Failure domain %s, %s required", ptr.Deref(currentCopy.Spec.FailureDomain, "nil"), ptr.Deref(desiredCopy.Spec.FailureDomain, "nil"))) + if currentCopy.Spec.FailureDomain != desiredCopy.Spec.FailureDomain { + logMessages = append(logMessages, fmt.Sprintf("spec.failureDomain %s, %s required", currentCopy.Spec.FailureDomain, desiredCopy.Spec.FailureDomain)) + conditionMessages = append(conditionMessages, fmt.Sprintf("Failure domain %s, %s required", currentCopy.Spec.FailureDomain, desiredCopy.Spec.FailureDomain)) } if len(logMessages) > 0 || len(conditionMessages) > 0 { diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index 6d86a176ac76..b34c96475e27 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -182,8 +182,8 @@ func TestMachineTemplateUpToDate(t *testing.T) { NodeDeletionTimeoutSeconds: ptr.To(int32(10)), NodeVolumeDetachTimeoutSeconds: ptr.To(int32(10)), ClusterName: "cluster1", - Version: ptr.To("v1.25.0"), - FailureDomain: ptr.To("failure-domain1"), + Version: "v1.25.0", + FailureDomain: "failure-domain1", MinReadySeconds: ptr.To[int32](10), InfrastructureRef: clusterv1.ContractVersionedObjectReference{ Name: "infra1", @@ -225,10 +225,10 @@ func TestMachineTemplateUpToDate(t *testing.T) { machineTemplateWithDifferentClusterName.Spec.ClusterName = "cluster2" machineTemplateWithDifferentVersion := machineTemplate.DeepCopy() - machineTemplateWithDifferentVersion.Spec.Version = ptr.To("v1.26.0") + machineTemplateWithDifferentVersion.Spec.Version = "v1.26.0" machineTemplateWithDifferentFailureDomain := machineTemplate.DeepCopy() - machineTemplateWithDifferentFailureDomain.Spec.FailureDomain = ptr.To("failure-domain2") + machineTemplateWithDifferentFailureDomain.Spec.FailureDomain = "failure-domain2" machineTemplateWithDifferentInfraRef := machineTemplate.DeepCopy() machineTemplateWithDifferentInfraRef.Spec.InfrastructureRef.Name = "infra2" diff --git a/internal/controllers/machinedeployment/suite_test.go b/internal/controllers/machinedeployment/suite_test.go index 0ce72e31c823..5803d4032fd2 100644 --- a/internal/controllers/machinedeployment/suite_test.go +++ b/internal/controllers/machinedeployment/suite_test.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -208,7 +207,7 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { // Patch the Machine. patchMachine := client.MergeFrom(m.DeepCopy()) - m.Spec.ProviderID = ptr.To(pid) + m.Spec.ProviderID = pid g.Expect(env.Patch(ctx, m, patchMachine)).To(Succeed()) patchMachine = client.MergeFrom(m.DeepCopy()) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 11166e4edfb3..5639278fe6c7 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -250,7 +250,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster if !remediationAllowed { var message string - if m.Spec.UnhealthyRange == nil { + if m.Spec.UnhealthyRange == "" { logger.V(3).Info( "Short-circuiting remediation", totalTargetKeyLog, totalTargets, @@ -265,13 +265,13 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster logger.V(3).Info( "Short-circuiting remediation", totalTargetKeyLog, totalTargets, - unhealthyRangeKeyLog, *m.Spec.UnhealthyRange, + unhealthyRangeKeyLog, m.Spec.UnhealthyRange, unhealthyTargetsKeyLog, len(unhealthy), ) message = fmt.Sprintf("Remediation is not allowed, the number of not started or unhealthy machines does not fall within the range (total: %v, unhealthy: %v, unhealthyRange: %v)", totalTargets, len(unhealthy), - *m.Spec.UnhealthyRange) + m.Spec.UnhealthyRange) } // Remediation not allowed, the number of not started or unhealthy machines either exceeds maxUnhealthy (or) not within unhealthyRange @@ -324,7 +324,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster return reconcile.Result{Requeue: true}, nil } - if m.Spec.UnhealthyRange == nil { + if m.Spec.UnhealthyRange == "" { logger.V(3).Info( "Remediations are allowed", totalTargetKeyLog, totalTargets, @@ -335,7 +335,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster logger.V(3).Info( "Remediations are allowed", totalTargetKeyLog, totalTargets, - unhealthyRangeKeyLog, *m.Spec.UnhealthyRange, + unhealthyRangeKeyLog, m.Spec.UnhealthyRange, unhealthyTargetsKeyLog, len(unhealthy), ) } @@ -657,7 +657,7 @@ func machineNames(machines []*clusterv1.Machine) []string { func isAllowedRemediation(mhc *clusterv1.MachineHealthCheck) (bool, int32, error) { var remediationAllowed bool var remediationCount int32 - if mhc.Spec.UnhealthyRange != nil { + if mhc.Spec.UnhealthyRange != "" { minVal, maxVal, err := getUnhealthyRange(mhc) if err != nil { return false, 0, err @@ -684,7 +684,7 @@ func isAllowedRemediation(mhc *clusterv1.MachineHealthCheck) (bool, int32, error // Eg. [2-5] will return (2,5,nil). func getUnhealthyRange(mhc *clusterv1.MachineHealthCheck) (int, int, error) { // remove '[' and ']' - unhealthyRange := (*(mhc.Spec.UnhealthyRange))[1 : len(*mhc.Spec.UnhealthyRange)-1] + unhealthyRange := (mhc.Spec.UnhealthyRange)[1 : len(mhc.Spec.UnhealthyRange)-1] parts := strings.Split(unhealthyRange, "-") diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 28c8d17d8f02..13c2459f0b97 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -780,7 +780,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) unhealthyRange := "[1-3]" - mhc.Spec.UnhealthyRange = &unhealthyRange + mhc.Spec.UnhealthyRange = unhealthyRange g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { @@ -853,7 +853,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) unhealthyRange := "[3-5]" - mhc.Spec.UnhealthyRange = &unhealthyRange + mhc.Spec.UnhealthyRange = unhealthyRange g.Expect(env.Create(ctx, mhc)).To(Succeed()) defer func(do ...client.Object) { diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 0301eddc3a7d..141ea08a8892 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -117,7 +117,7 @@ func TestMachineSetReconciler(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: testCluster.Name, - Version: &version, + Version: version, Bootstrap: clusterv1.Bootstrap{ ConfigRef: &clusterv1.ContractVersionedObjectReference{ APIGroup: clusterv1.GroupVersionBootstrap.Group, @@ -412,8 +412,7 @@ func TestMachineSetReconciler(t *testing.T) { continue } - g.Expect(m.Spec.Version).ToNot(BeNil()) - g.Expect(*m.Spec.Version).To(BeEquivalentTo("v1.14.2")) + g.Expect(m.Spec.Version).To(BeEquivalentTo("v1.14.2")) fakeBootstrapRefDataSecretCreated(*m.Spec.Bootstrap.ConfigRef, m.Namespace, bootstrapResource, g) providerID := fakeInfrastructureRefProvisioned(m.Spec.InfrastructureRef, m.Namespace, infraResource, g) fakeMachineNodeRef(&m, providerID, g) @@ -967,7 +966,7 @@ func TestMachineSetReconcile_MachinesCreatedConditionFalseOnBadInfraRef(t *testi // Try to break Infra Cloning Name: "something_invalid", }, - Version: &version, + Version: version, }, }, Selector: metav1.LabelSelector{ @@ -1140,7 +1139,7 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: testCluster.Name, - Version: &version, + Version: version, Bootstrap: clusterv1.Bootstrap{ ConfigRef: &clusterv1.ContractVersionedObjectReference{ APIGroup: clusterv1.GroupVersionBootstrap.Group, @@ -2295,7 +2294,7 @@ func TestMachineSetReconciler_syncReplicas_WithErrors(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: testCluster.Name, - Version: ptr.To("v1.14.2"), + Version: "v1.14.2", Bootstrap: clusterv1.Bootstrap{ ConfigRef: &clusterv1.ContractVersionedObjectReference{ APIGroup: clusterv1.GroupVersionBootstrap.Group, @@ -2454,7 +2453,7 @@ func TestComputeDesiredMachine(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: testClusterName, - Version: ptr.To("v1.25.3"), + Version: "v1.25.3", InfrastructureRef: infraRef, Bootstrap: clusterv1.Bootstrap{ ConfigRef: &bootstrapRef, @@ -2479,7 +2478,7 @@ func TestComputeDesiredMachine(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: testClusterName, - Version: ptr.To("v1.25.3"), + Version: "v1.25.3", NodeDrainTimeoutSeconds: duration10s, NodeVolumeDetachTimeoutSeconds: duration10s, NodeDeletionTimeoutSeconds: duration10s, @@ -2848,7 +2847,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineDeploymentSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2857,7 +2856,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2874,7 +2873,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineDeploymentSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2883,7 +2882,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.30.0"), + Version: "v1.30.0", }, }, }, @@ -2902,7 +2901,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { RolloutAfter: &metav1.Time{Time: reconciliationTime.Add(1 * time.Hour)}, // rollout after not yet expired Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2914,7 +2913,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2932,7 +2931,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { RolloutAfter: &metav1.Time{Time: reconciliationTime.Add(-1 * time.Hour)}, // rollout after expired Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2944,7 +2943,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2963,7 +2962,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { RolloutAfter: &metav1.Time{Time: reconciliationTime.Add(-2 * time.Hour)}, // rollout after expired Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2975,7 +2974,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, @@ -2993,7 +2992,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { RolloutAfter: &metav1.Time{Time: reconciliationTime.Add(-1 * time.Hour)}, // rollout after expired Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.30.0"), + Version: "v1.30.0", }, }, }, @@ -3005,7 +3004,7 @@ func TestNewMachineUpToDateCondition(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.31.0"), + Version: "v1.31.0", }, }, }, diff --git a/internal/controllers/machineset/machineset_preflight.go b/internal/controllers/machineset/machineset_preflight.go index 3a7d48465257..0be375c78367 100644 --- a/internal/controllers/machineset/machineset_preflight.go +++ b/internal/controllers/machineset/machineset_preflight.go @@ -98,8 +98,8 @@ func (r *Reconciler) runPreflightChecks(ctx context.Context, cluster *clusterv1. } // Check the version skew policies only if version is defined in the MachineSet. - if ms.Spec.Template.Spec.Version != nil { - msVersion := *ms.Spec.Template.Spec.Version + if ms.Spec.Template.Spec.Version != "" { + msVersion := ms.Spec.Template.Spec.Version msSemver, err := semver.ParseTolerant(msVersion) if err != nil { return nil, errors.Wrapf(err, "failed to perform %q: failed to perform preflight checks: failed to parse version %q of MachineSet %s", action, msVersion, klog.KObj(ms)) diff --git a/internal/controllers/machineset/machineset_preflight_test.go b/internal/controllers/machineset/machineset_preflight_test.go index 9e020237fc1d..5450c0a03a55 100644 --- a/internal/controllers/machineset/machineset_preflight_test.go +++ b/internal/controllers/machineset/machineset_preflight_test.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -211,7 +210,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.26.2"), + Version: "v1.26.2", Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{Kind: "KubeadmConfigTemplate"}}, }, }, @@ -273,7 +272,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.27.0.0"), + Version: "v1.27.0.0", }, }, }, @@ -299,7 +298,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.27.0"), + Version: "v1.27.0", }, }, }, @@ -327,7 +326,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.24.0"), + Version: "v1.24.0", }, }, }, @@ -358,7 +357,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.27.0"), + Version: "v1.27.0", }, }, }, @@ -384,7 +383,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.25.0"), + Version: "v1.25.0", }, }, }, @@ -410,7 +409,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.25.5"), + Version: "v1.25.5", Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapv1.GroupVersion.Group, Kind: "KubeadmConfigTemplate", @@ -442,7 +441,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.25.0"), + Version: "v1.25.0", }, }, }, @@ -471,7 +470,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.25.0"), + Version: "v1.25.0", Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapv1.GroupVersion.Group, Kind: "KubeadmConfigTemplate", @@ -501,7 +500,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.26.2"), + Version: "v1.26.2", Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapv1.GroupVersion.Group, Kind: "KubeadmConfigTemplate", @@ -535,7 +534,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.26.0"), + Version: "v1.26.0", }, }, }, @@ -565,7 +564,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.26.0"), + Version: "v1.26.0", }, }, }, @@ -595,7 +594,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.26.0"), + Version: "v1.26.0", }, }, }, @@ -625,7 +624,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.26.2"), + Version: "v1.26.2", }, }, }, @@ -678,7 +677,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("v1.26.0"), + Version: "v1.26.0", Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{ APIGroup: bootstrapv1.GroupVersion.Group, Kind: "KubeadmConfigTemplate", diff --git a/internal/controllers/machineset/suite_test.go b/internal/controllers/machineset/suite_test.go index ddac83225946..588a999a269f 100644 --- a/internal/controllers/machineset/suite_test.go +++ b/internal/controllers/machineset/suite_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/uuid" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -218,7 +217,7 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { // Patch the Machine. patchMachine := client.MergeFrom(m.DeepCopy()) - m.Spec.ProviderID = ptr.To(pid) + m.Spec.ProviderID = pid g.Expect(env.Patch(ctx, m, patchMachine)).To(Succeed()) patchMachine = client.MergeFrom(m.DeepCopy()) diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 4f190c505504..bcf0d0f1170e 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -1074,8 +1074,8 @@ func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error { if *md.Spec.Replicas != *topologyMD.Replicas { return fmt.Errorf("replicas %v does not match expected %v", *md.Spec.Replicas, *topologyMD.Replicas) } - if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { - return fmt.Errorf("version %v does not match expected %v", *md.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) + if md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { + return fmt.Errorf("version %v does not match expected %v", md.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) } // Check if the InfrastructureReference exists. @@ -1160,8 +1160,8 @@ func assertMachinePoolsReconcile(cluster *clusterv1.Cluster) error { if *mp.Spec.Replicas != *topologyMP.Replicas { return fmt.Errorf("replicas %v does not match expected %v", mp.Spec.Replicas, topologyMP.Replicas) } - if *mp.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { - return fmt.Errorf("version %v does not match expected %v", *mp.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) + if mp.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { + return fmt.Errorf("version %v does not match expected %v", mp.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) } // Check if the InfrastructureReference exists. diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 2104f89111aa..7dba2bd717a2 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -124,7 +124,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d for i := range blueprint.ClusterClass.Spec.Patches { clusterClassPatch := blueprint.ClusterClass.Spec.Patches[i] - if clusterClassPatch.External == nil || clusterClassPatch.External.ValidateTopologyExtension == nil { + if clusterClassPatch.External == nil || clusterClassPatch.External.ValidateTopologyExtension == "" { continue } @@ -408,7 +408,7 @@ func createPatchGenerator(runtimeClient runtimeclient.Client, patch *clusterv1.C return inline.NewGenerator(patch), nil } // Return an externalPatchGenerator if there is an external configuration in the patch. - if patch.External != nil && patch.External.GeneratePatchesExtension != nil { + if patch.External != nil && patch.External.GeneratePatchesExtension != "" { if !feature.Gates.Enabled(feature.RuntimeSDK) { return nil, errors.Errorf("can not use external patch %q if RuntimeSDK feature flag is disabled", patch.Name) } diff --git a/internal/controllers/topology/cluster/patches/engine_test.go b/internal/controllers/topology/cluster/patches/engine_test.go index 9371fe3d7498..9cd7ae20d5e3 100644 --- a/internal/controllers/topology/cluster/patches/engine_test.go +++ b/internal/controllers/topology/cluster/patches/engine_test.go @@ -407,8 +407,8 @@ func TestApply(t *testing.T) { { Name: "fake-patch1", External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("patch-infrastructureCluster"), - ValidateTopologyExtension: ptr.To("validate-infrastructureCluster"), + GeneratePatchesExtension: "patch-infrastructureCluster", + ValidateTopologyExtension: "validate-infrastructureCluster", }, }, }, @@ -444,8 +444,8 @@ func TestApply(t *testing.T) { { Name: "fake-patch1", External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("patch-infrastructureCluster"), - ValidateTopologyExtension: ptr.To("validate-infrastructureCluster"), + GeneratePatchesExtension: "patch-infrastructureCluster", + ValidateTopologyExtension: "validate-infrastructureCluster", }, }, }, @@ -478,13 +478,13 @@ func TestApply(t *testing.T) { { Name: "fake-patch1", External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("patch-infrastructureCluster"), + GeneratePatchesExtension: "patch-infrastructureCluster", }, }, { Name: "fake-patch2", External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("patch-controlPlane"), + GeneratePatchesExtension: "patch-controlPlane", }, }, }, @@ -551,7 +551,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/clusterName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster.name"), + Variable: "builtin.cluster.name", }, }, }, @@ -569,7 +569,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/controlPlaneName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.controlPlane.name"), + Variable: "builtin.controlPlane.name", }, }, }, @@ -587,7 +587,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/controlPlaneName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.controlPlane.name"), + Variable: "builtin.controlPlane.name", }, }, }, @@ -607,7 +607,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machineDeploymentTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.machineDeployment.topologyName"), + Variable: "builtin.machineDeployment.topologyName", }, }, }, @@ -627,7 +627,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machineDeploymentTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.machineDeployment.topologyName"), + Variable: "builtin.machineDeployment.topologyName", }, }, }, @@ -647,7 +647,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machinePoolTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.machinePool.topologyName"), + Variable: "builtin.machinePool.topologyName", }, }, }, @@ -667,7 +667,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machinePoolTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.machinePool.topologyName"), + Variable: "builtin.machinePool.topologyName", }, }, }, @@ -761,7 +761,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("infraCluster"), + Variable: "infraCluster", }, }, }, @@ -779,7 +779,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/controlPlaneField", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("controlPlaneVariable"), + Variable: "controlPlaneVariable", }, }, }, @@ -797,7 +797,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/controlPlaneInfraMachineTemplateField", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("controlPlaneVariable"), + Variable: "controlPlaneVariable", }, }, }, @@ -817,7 +817,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("defaultMDWorkerVariable"), + Variable: "defaultMDWorkerVariable", }, }, }, @@ -837,7 +837,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("defaultMDWorkerVariable"), + Variable: "defaultMDWorkerVariable", }, }, }, @@ -857,7 +857,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("defaultMPWorkerVariable"), + Variable: "defaultMPWorkerVariable", }, }, }, @@ -877,7 +877,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("defaultMPWorkerVariable"), + Variable: "defaultMPWorkerVariable", }, }, }, diff --git a/internal/controllers/topology/cluster/patches/external/external_patch_generator.go b/internal/controllers/topology/cluster/patches/external/external_patch_generator.go index ea76c9c5cabd..5be3a1d4be77 100644 --- a/internal/controllers/topology/cluster/patches/external/external_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/external/external_patch_generator.go @@ -46,7 +46,7 @@ func NewGenerator(runtimeClient runtimeclient.Client, patch *clusterv1.ClusterCl func (e externalPatchGenerator) Generate(ctx context.Context, forObject client.Object, req *runtimehooksv1.GeneratePatchesRequest) (*runtimehooksv1.GeneratePatchesResponse, error) { if !feature.Gates.Enabled(feature.RuntimeSDK) { - return nil, errors.Errorf("can not use external patch %q if RuntimeSDK feature flag is disabled", *e.patch.External.GeneratePatchesExtension) + return nil, errors.Errorf("can not use external patch %q if RuntimeSDK feature flag is disabled", e.patch.External.GeneratePatchesExtension) } // Set the settings defined in external patch definition on the request object. @@ -59,7 +59,7 @@ func (e externalPatchGenerator) Generate(ctx context.Context, forObject client.O }() resp := &runtimehooksv1.GeneratePatchesResponse{} - err := e.runtimeClient.CallExtension(ctx, runtimehooksv1.GeneratePatches, forObject, *e.patch.External.GeneratePatchesExtension, req, resp) + err := e.runtimeClient.CallExtension(ctx, runtimehooksv1.GeneratePatches, forObject, e.patch.External.GeneratePatchesExtension, req, resp) if err != nil { return nil, err } diff --git a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go index c93f26328ca8..a298910dddda 100644 --- a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go @@ -23,7 +23,6 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" @@ -50,10 +49,10 @@ func TestExternalPatchGenerator_Generate(t *testing.T) { patch: &clusterv1.ClusterClassPatch{ Name: "", Description: "", - EnabledIf: nil, + EnabledIf: "", Definitions: nil, External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("test-generate-extension"), + GeneratePatchesExtension: "test-generate-extension", Settings: nil, }, }, @@ -69,10 +68,10 @@ func TestExternalPatchGenerator_Generate(t *testing.T) { patch: &clusterv1.ClusterClassPatch{ Name: "", Description: "", - EnabledIf: nil, + EnabledIf: "", Definitions: nil, External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("test-generate-extension"), + GeneratePatchesExtension: "test-generate-extension", Settings: map[string]string{ "key1": "value1", }, diff --git a/internal/controllers/topology/cluster/patches/external/external_validator.go b/internal/controllers/topology/cluster/patches/external/external_validator.go index 680fa1a3ec0d..686f9e61b189 100644 --- a/internal/controllers/topology/cluster/patches/external/external_validator.go +++ b/internal/controllers/topology/cluster/patches/external/external_validator.go @@ -46,7 +46,7 @@ func NewValidator(runtimeClient runtimeclient.Client, patch *clusterv1.ClusterCl func (e externalValidator) Validate(ctx context.Context, forObject client.Object, req *runtimehooksv1.ValidateTopologyRequest) (*runtimehooksv1.ValidateTopologyResponse, error) { if !feature.Gates.Enabled(feature.RuntimeSDK) { - return nil, errors.Errorf("can not use external patch %q if RuntimeSDK feature flag is disabled", *e.patch.External.ValidateTopologyExtension) + return nil, errors.Errorf("can not use external patch %q if RuntimeSDK feature flag is disabled", e.patch.External.ValidateTopologyExtension) } // Set the settings defined in external patch definition on the request object. @@ -59,7 +59,7 @@ func (e externalValidator) Validate(ctx context.Context, forObject client.Object }() resp := &runtimehooksv1.ValidateTopologyResponse{} - err := e.runtimeClient.CallExtension(ctx, runtimehooksv1.ValidateTopology, forObject, *e.patch.External.ValidateTopologyExtension, req, resp) + err := e.runtimeClient.CallExtension(ctx, runtimehooksv1.ValidateTopology, forObject, e.patch.External.ValidateTopologyExtension, req, resp) if err != nil { return nil, err } diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go index 926c5e64b4f9..1cf4af843786 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go @@ -220,14 +220,14 @@ func matchesSelector(req *runtimehooksv1.GeneratePatchesRequestItem, templateVar return false } -func patchIsEnabled(enabledIf *string, variables map[string]apiextensionsv1.JSON) (bool, error) { +func patchIsEnabled(enabledIf string, variables map[string]apiextensionsv1.JSON) (bool, error) { // If enabledIf is not set, patch is enabled. - if enabledIf == nil { + if enabledIf == "" { return true, nil } // Rendered template. - value, err := renderValueTemplate(*enabledIf, variables) + value, err := renderValueTemplate(enabledIf, variables) if err != nil { return false, errors.Wrapf(err, "failed to calculate value for enabledIf") } @@ -282,10 +282,10 @@ func calculateValue(patch clusterv1.JSONPatch, variables map[string]apiextension if patch.Value != nil && patch.ValueFrom != nil { return nil, errors.Errorf("failed to calculate value: both .value and .valueFrom are set") } - if patch.ValueFrom != nil && patch.ValueFrom.Variable == nil && patch.ValueFrom.Template == nil { + if patch.ValueFrom != nil && patch.ValueFrom.Variable == "" && patch.ValueFrom.Template == "" { return nil, errors.Errorf("failed to calculate value: .valueFrom is set, but neither .valueFrom.variable nor .valueFrom.template are set") } - if patch.ValueFrom != nil && patch.ValueFrom.Variable != nil && patch.ValueFrom.Template != nil { + if patch.ValueFrom != nil && patch.ValueFrom.Variable != "" && patch.ValueFrom.Template != "" { return nil, errors.Errorf("failed to calculate value: .valueFrom is set, but both .valueFrom.variable and .valueFrom.template are set") } @@ -295,8 +295,8 @@ func calculateValue(patch clusterv1.JSONPatch, variables map[string]apiextension } // Return variable. - if patch.ValueFrom.Variable != nil { - value, err := patchvariables.GetVariableValue(variables, *patch.ValueFrom.Variable) + if patch.ValueFrom.Variable != "" { + value, err := patchvariables.GetVariableValue(variables, patch.ValueFrom.Variable) if err != nil { return nil, errors.Wrapf(err, "failed to calculate value") } @@ -304,7 +304,7 @@ func calculateValue(patch clusterv1.JSONPatch, variables map[string]apiextension } // Return rendered value template. - value, err := renderValueTemplate(*patch.ValueFrom.Template, variables) + value, err := renderValueTemplate(patch.ValueFrom.Template, variables) if err != nil { return nil, errors.Wrapf(err, "failed to calculate value for template") } diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go index d216a7cc728e..0ff15019a575 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go @@ -65,7 +65,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/valueFrom/variable", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableA"), + Variable: "variableA", }, }, // .valueFrom.template using sprig functions @@ -73,7 +73,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/valueFrom/template", ValueFrom: &clusterv1.JSONPatchValue{ - Template: ptr.To(`template {{ .variableB | lower | repeat 5 }}`), + Template: `template {{ .variableB | lower | repeat 5 }}`, }, }, // template-specific variable takes precedent, if the same variable exists @@ -82,7 +82,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/templatePrecedent", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableC"), + Variable: "variableC", }, }, // global builtin variable should work. @@ -91,7 +91,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/builtinClusterName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster.name"), + Variable: "builtin.cluster.name", }, }, // template-specific builtin variable should work. @@ -100,7 +100,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/builtinControlPlaneReplicas", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.controlPlane.replicas"), + Variable: "builtin.controlPlane.replicas", }, }, // test .builtin.controlPlane.machineTemplate.InfrastructureRef.name var. @@ -108,7 +108,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: ptr.To(`[{"contentFrom":{"secret":{"key":"control-plane-azure.json","name":"{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json"}}}]`), + Template: `[{"contentFrom":{"secret":{"key":"control-plane-azure.json","name":"{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json"}}}]`, }, }, }, @@ -207,20 +207,20 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster.name"), + Variable: "builtin.cluster.name", }, }, { Op: "replace", Path: "/spec/template/spec/kubeadmConfigSpec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: ptr.To(` + Template: ` - contentFrom: secret: key: control-plane-azure.json name: "{{ .builtin.cluster.name }}-control-plane-azure-json" owner: root:root -`), +`, }, }, { @@ -244,14 +244,14 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster.name"), + Variable: "builtin.cluster.name", }, }, { Op: "replace", Path: "/spec/template/spec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: ptr.To(` + Template: ` [{ "contentFrom":{ "secret":{ @@ -260,7 +260,7 @@ func TestGenerate(t *testing.T) { } }, "owner":"root:root" -}]`), +}]`, }, }, }, @@ -280,14 +280,14 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster.name"), + Variable: "builtin.cluster.name", }, }, { Op: "replace", Path: "/spec/template/spec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: ptr.To(` + Template: ` [{ "contentFrom":{ "secret":{ @@ -296,7 +296,7 @@ func TestGenerate(t *testing.T) { } }, "owner":"root:root" -}]`), +}]`, }, }, }, @@ -1282,43 +1282,43 @@ func TestMatchesSelector(t *testing.T) { func TestPatchIsEnabled(t *testing.T) { tests := []struct { name string - enabledIf *string + enabledIf string variables map[string]apiextensionsv1.JSON want bool wantErr bool }{ { name: "Enabled if enabledIf is not set", - enabledIf: nil, + enabledIf: "", want: true, }, { name: "Fail if template is invalid", - enabledIf: ptr.To(`{{ variable }}`), // . is missing + enabledIf: `{{ variable }}`, // . is missing wantErr: true, }, // Hardcoded value. { name: "Enabled if template is true ", - enabledIf: ptr.To(`true`), + enabledIf: `true`, want: true, }, { name: "Enabled if template is true (even with leading and trailing new line)", - enabledIf: ptr.To(` + enabledIf: ` true -`), +`, want: true, }, { name: "Disabled if template is false", - enabledIf: ptr.To(`false`), + enabledIf: `false`, want: false, }, // Boolean variable. { name: "Enabled if simple template with boolean variable evaluates to true", - enabledIf: ptr.To(`{{ .httpProxyEnabled }}`), + enabledIf: `{{ .httpProxyEnabled }}`, variables: map[string]apiextensionsv1.JSON{ "httpProxyEnabled": {Raw: []byte(`true`)}, }, @@ -1326,9 +1326,9 @@ true }, { name: "Enabled if simple template with boolean variable evaluates to true (even with leading and trailing new line", - enabledIf: ptr.To(` + enabledIf: ` {{ .httpProxyEnabled }} -`), +`, variables: map[string]apiextensionsv1.JSON{ "httpProxyEnabled": {Raw: []byte(`true`)}, }, @@ -1336,7 +1336,7 @@ true }, { name: "Disabled if simple template with boolean variable evaluates to false", - enabledIf: ptr.To(`{{ .httpProxyEnabled }}`), + enabledIf: `{{ .httpProxyEnabled }}`, variables: map[string]apiextensionsv1.JSON{ "httpProxyEnabled": {Raw: []byte(`false`)}, }, @@ -1346,7 +1346,7 @@ true { name: "Enabled if template with if evaluates to true", // Else is not needed because we check if the result is equal to true. - enabledIf: ptr.To(`{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{end}}`), + enabledIf: `{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1354,7 +1354,7 @@ true }, { name: "Disabled if template with if evaluates to false", - enabledIf: ptr.To(`{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{end}}`), + enabledIf: `{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1362,7 +1362,7 @@ true }, { name: "Enabled if template with if/else evaluates to true", - enabledIf: ptr.To(`{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{else}}false{{end}}`), + enabledIf: `{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{else}}false{{end}}`, variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1370,7 +1370,7 @@ true }, { name: "Disabled if template with if/else evaluates to false", - enabledIf: ptr.To(`{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{else}}false{{end}}`), + enabledIf: `{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{else}}false{{end}}`, variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1379,7 +1379,7 @@ true // Render value with if to check if var is not empty. { name: "Enabled if template which checks if variable is set evaluates to true", - enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), + enabledIf: `{{ if .variableA }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "variableA": {Raw: []byte(`"abc"`)}, }, @@ -1387,7 +1387,7 @@ true }, { name: "Disabled if template which checks if variable is set evaluates to false (variable empty)", - enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), + enabledIf: `{{ if .variableA }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "variableA": {Raw: []byte(``)}, }, @@ -1395,7 +1395,7 @@ true }, { name: "Disabled if template which checks if variable is set evaluates to false (variable empty string)", - enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), + enabledIf: `{{ if .variableA }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "variableA": {Raw: []byte(`""`)}, }, @@ -1403,7 +1403,7 @@ true }, { name: "Disabled if template which checks if variable is set evaluates to false (variable does not exist)", - enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), + enabledIf: `{{ if .variableA }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "variableB": {Raw: []byte(``)}, }, @@ -1414,7 +1414,7 @@ true // test mostly exists to visualize how user-defined object variables can be used. { name: "Enabled if template with complex variable evaluates to true", - enabledIf: ptr.To(`{{ if .httpProxy.enabled }}true{{end}}`), + enabledIf: `{{ if .httpProxy.enabled }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "httpProxy": {Raw: []byte(`{"enabled": true, "url": "localhost:3128", "noProxy": "internal.example.com"}`)}, }, @@ -1422,7 +1422,7 @@ true }, { name: "Disabled if template with complex variable evaluates to false", - enabledIf: ptr.To(`{{ if .httpProxy.enabled }}true{{end}}`), + enabledIf: `{{ if .httpProxy.enabled }}true{{end}}`, variables: map[string]apiextensionsv1.JSON{ "httpProxy": {Raw: []byte(`{"enabled": false, "url": "localhost:3128", "noProxy": "internal.example.com"}`)}, }, @@ -1463,7 +1463,7 @@ func TestCalculateValue(t *testing.T) { patch: clusterv1.JSONPatch{ Value: &apiextensionsv1.JSON{Raw: []byte(`"value"`)}, ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableA"), + Variable: "variableA", }, }, wantErr: true, @@ -1472,8 +1472,8 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable and .valueFrom.template are set", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableA"), - Template: ptr.To("template"), + Variable: "variableA", + Template: "template", }, }, wantErr: true, @@ -1496,7 +1496,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableA"), + Variable: "variableA", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1508,7 +1508,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable is set but variable does not exist", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableA"), + Variable: "variableA", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1520,7 +1520,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: builtinVariable int", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.controlPlane.replicas"), + Variable: "builtin.controlPlane.replicas", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1532,7 +1532,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: builtinVariable string", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster.topology.version"), + Variable: "builtin.cluster.topology.version", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1544,7 +1544,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: variable 'builtin'", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin"), + Variable: "builtin", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1556,7 +1556,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: variable 'builtin.cluster'", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster"), + Variable: "builtin.cluster", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1568,7 +1568,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: variable 'builtin.cluster.topology'", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.cluster.topology"), + Variable: "builtin.cluster.topology", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1581,7 +1581,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return rendered .valueFrom.template if set", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Template: ptr.To("{{ .variableA }}"), + Template: "{{ .variableA }}", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1594,7 +1594,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: whole object", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject"), + Variable: "variableObject", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1606,7 +1606,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested bool property", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.boolProperty"), + Variable: "variableObject.boolProperty", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1618,7 +1618,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested integer property", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.integerProperty"), + Variable: "variableObject.integerProperty", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1630,7 +1630,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested string property", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.enumProperty"), + Variable: "variableObject.enumProperty", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1642,7 +1642,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable object variable does not exist", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.enumProperty"), + Variable: "variableObject.enumProperty", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1654,7 +1654,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable nested object property does not exist", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.nonExistingProperty"), + Variable: "variableObject.nonExistingProperty", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1667,7 +1667,7 @@ func TestCalculateValue(t *testing.T) { patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ // NOTE: it's not possible to access a property of an array element without index. - Variable: ptr.To("variableObject.nonExistingProperty"), + Variable: "variableObject.nonExistingProperty", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1680,7 +1680,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property top-level", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject"), + Variable: "variableObject", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1692,7 +1692,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property firstLevel", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.firstLevel"), + Variable: "variableObject.firstLevel", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1704,7 +1704,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property secondLevel", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.firstLevel.secondLevel"), + Variable: "variableObject.firstLevel.secondLevel", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1716,7 +1716,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property leaf", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableObject.firstLevel.secondLevel.leaf"), + Variable: "variableObject.firstLevel.secondLevel.leaf", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1729,7 +1729,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: array", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray"), + Variable: "variableArray", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1741,7 +1741,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: array element", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray[0]"), + Variable: "variableArray[0]", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1753,7 +1753,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested array", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel"), + Variable: "variableArray.firstLevel", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1765,7 +1765,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested array element", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel[1]"), + Variable: "variableArray.firstLevel[1]", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1777,7 +1777,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested field of nested array element", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel[1].secondLevel"), + Variable: "variableArray.firstLevel[1].secondLevel", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1789,7 +1789,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: only left delimiter", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel["), + Variable: "variableArray.firstLevel[", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1801,7 +1801,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: only right delimiter", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel]"), + Variable: "variableArray.firstLevel]", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1813,7 +1813,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: no index", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel[]"), + Variable: "variableArray.firstLevel[]", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1825,7 +1825,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: text index", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel[someText]"), + Variable: "variableArray.firstLevel[someText]", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1837,7 +1837,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: negative index", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel[-1]"), + Variable: "variableArray.firstLevel[-1]", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1849,7 +1849,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: index out of bounds", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel[1]"), + Variable: "variableArray.firstLevel[1]", }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1861,7 +1861,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: variable is an object instead", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableArray.firstLevel[1]"), + Variable: "variableArray.firstLevel[1]", }, }, variables: map[string]apiextensionsv1.JSON{ diff --git a/internal/controllers/topology/cluster/patches/variables/variables.go b/internal/controllers/topology/cluster/patches/variables/variables.go index 05dab59a0762..2e4406047ae4 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables.go +++ b/internal/controllers/topology/cluster/patches/variables/variables.go @@ -181,7 +181,7 @@ func MachineDeployment(mdTopology *clusterv1.MachineDeploymentTopology, md *clus // Construct builtin variable. builtin := runtimehooksv1.Builtins{ MachineDeployment: &runtimehooksv1.MachineDeploymentBuiltins{ - Version: *md.Spec.Template.Spec.Version, + Version: md.Spec.Template.Spec.Version, Class: mdTopology.Class, Name: md.Name, TopologyName: mdTopology.Name, @@ -237,7 +237,7 @@ func MachinePool(mpTopology *clusterv1.MachinePoolTopology, mp *clusterv1.Machin // Construct builtin variable. builtin := runtimehooksv1.Builtins{ MachinePool: &runtimehooksv1.MachinePoolBuiltins{ - Version: *mp.Spec.Template.Spec.Version, + Version: mp.Spec.Template.Spec.Version, Class: mpTopology.Class, Name: mp.Name, TopologyName: mpTopology.Name, diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 0a3cca7249c7..f841d4f0a948 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -826,12 +826,12 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope } func logMachineDeploymentVersionChange(current, desired *clusterv1.MachineDeployment) string { - if current.Spec.Template.Spec.Version == nil || desired.Spec.Template.Spec.Version == nil { + if current.Spec.Template.Spec.Version == "" || desired.Spec.Template.Spec.Version == "" { return "" } - if *current.Spec.Template.Spec.Version != *desired.Spec.Template.Spec.Version { - return fmt.Sprintf(" with version change from %s to %s", *current.Spec.Template.Spec.Version, *desired.Spec.Template.Spec.Version) + if current.Spec.Template.Spec.Version != desired.Spec.Template.Spec.Version { + return fmt.Sprintf(" with version change from %s to %s", current.Spec.Template.Spec.Version, desired.Spec.Template.Spec.Version) } return "" } @@ -1108,12 +1108,12 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, mpTo } func logMachinePoolVersionChange(current, desired *clusterv1.MachinePool) string { - if current.Spec.Template.Spec.Version == nil || desired.Spec.Template.Spec.Version == nil { + if current.Spec.Template.Spec.Version == "" || desired.Spec.Template.Spec.Version == "" { return "" } - if *current.Spec.Template.Spec.Version != *desired.Spec.Template.Spec.Version { - return fmt.Sprintf(" with version change from %s to %s", *current.Spec.Template.Spec.Version, *desired.Spec.Template.Spec.Version) + if current.Spec.Template.Spec.Version != desired.Spec.Template.Spec.Version { + return fmt.Sprintf(" with version change from %s to %s", current.Spec.Template.Spec.Version, desired.Spec.Template.Spec.Version) } return "" } diff --git a/internal/runtime/client/client.go b/internal/runtime/client/client.go index ca107de30492..13062b072975 100644 --- a/internal/runtime/client/client.go +++ b/internal/runtime/client/client.go @@ -553,16 +553,16 @@ func urlForExtension(config runtimev1.ClientConfig, gvh runtimecatalog.GroupVers Scheme: "https", Host: host, } - if svc.Path != nil { - u.Path = *svc.Path + if svc.Path != "" { + u.Path = svc.Path } } else { - if config.URL == nil { + if config.URL == "" { return nil, errors.New("failed to compute URL: at least one of service and url should be defined in config") } var err error - u, err = url.Parse(*config.URL) + u, err = url.Parse(config.URL) if err != nil { return nil, errors.Wrap(err, "failed to compute URL: failed to parse url from clientConfig") } diff --git a/internal/runtime/client/client_test.go b/internal/runtime/client/client_test.go index 5c4828713284..edfb85a2b46f 100644 --- a/internal/runtime/client/client_test.go +++ b/internal/runtime/client/client_test.go @@ -196,7 +196,7 @@ func TestClient_httpCall(t *testing.T) { defer srv.Close() // set url to srv for in tt.opts - tt.opts.config.URL = ptr.To(srv.URL) + tt.opts.config.URL = srv.URL tt.opts.config.CABundle = testcerts.CACert } @@ -297,7 +297,7 @@ func TestURLForExtension(t *testing.T) { name: "ClientConfig using URL should have correct URL values", args: args{ config: runtimev1.ClientConfig{ - URL: ptr.To("https://extension-host.com"), + URL: "https://extension-host.com", }, gvh: gvh, extensionHandlerName: "test-handler", @@ -551,7 +551,7 @@ func TestClient_CallExtension(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ // Set a fake URL, in test cases where we start the test server the URL will be overridden. - URL: ptr.To("https://127.0.0.1/"), + URL: "https://127.0.0.1/", CABundle: testcerts.CACert, }, NamespaceSelector: &metav1.LabelSelector{}, @@ -577,7 +577,7 @@ func TestClient_CallExtension(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ // Set a fake URL, in test cases where we start the test server the URL will be overridden. - URL: ptr.To("https://127.0.0.1/"), + URL: "https://127.0.0.1/", CABundle: testcerts.CACert, }, NamespaceSelector: &metav1.LabelSelector{}}, @@ -776,7 +776,7 @@ func TestClient_CallExtension(t *testing.T) { // Set the URL to the real address of the test server. for i := range tt.registeredExtensionConfigs { - tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = ptr.To(fmt.Sprintf("https://%s/", srv.Listener.Addr().String())) + tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = fmt.Sprintf("https://%s/", srv.Listener.Addr().String()) } } @@ -946,7 +946,7 @@ func TestClient_CallAllExtensions(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ // Set a fake URL, in test cases where we start the test server the URL will be overridden. - URL: ptr.To("https://127.0.0.1/"), + URL: "https://127.0.0.1/", CABundle: testcerts.CACert, }, NamespaceSelector: &metav1.LabelSelector{}, @@ -1102,7 +1102,7 @@ func TestClient_CallAllExtensions(t *testing.T) { // Set the URL to the real address of the test server. for i := range tt.registeredExtensionConfigs { - tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = ptr.To(fmt.Sprintf("https://%s/", srv.Listener.Addr().String())) + tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = fmt.Sprintf("https://%s/", srv.Listener.Addr().String()) } } diff --git a/internal/runtime/registry/registry_test.go b/internal/runtime/registry/registry_test.go index f9b0b3ea379d..ad3f4104ea11 100644 --- a/internal/runtime/registry/registry_test.go +++ b/internal/runtime/registry/registry_test.go @@ -24,7 +24,6 @@ import ( "github.com/onsi/gomega/types" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" runtimev1 "sigs.k8s.io/cluster-api/api/runtime/v1beta2" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" @@ -101,7 +100,7 @@ func TestRegistry(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: ptr.To("https://extesions1.com/"), + URL: "https://extesions1.com/", }, }, Status: runtimev1.ExtensionConfigStatus{ @@ -137,7 +136,7 @@ func TestRegistry(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: ptr.To("https://extesions2.com/"), + URL: "https://extesions2.com/", }, }, Status: runtimev1.ExtensionConfigStatus{ diff --git a/internal/topology/check/upgrade.go b/internal/topology/check/upgrade.go index 74c542c5e3a0..935de60d9539 100644 --- a/internal/topology/check/upgrade.go +++ b/internal/topology/check/upgrade.go @@ -34,7 +34,7 @@ import ( func IsMachineDeploymentUpgrading(ctx context.Context, c client.Reader, md *clusterv1.MachineDeployment) (bool, error) { // If the MachineDeployment has no version there is no definitive way to check if it is upgrading. Therefore, return false. // Note: This case should not happen. - if md.Spec.Template.Spec.Version == nil { + if md.Spec.Template.Spec.Version == "" { return false, nil } selectorMap, err := metav1.LabelSelectorAsMap(&md.Spec.Selector) @@ -45,14 +45,14 @@ func IsMachineDeploymentUpgrading(ctx context.Context, c client.Reader, md *clus if err := c.List(ctx, machines, client.InNamespace(md.Namespace), client.MatchingLabels(selectorMap)); err != nil { return false, errors.Wrapf(err, "failed to check if MachineDeployment %s is upgrading: failed to list Machines", md.Name) } - mdVersion := *md.Spec.Template.Spec.Version + mdVersion := md.Spec.Template.Spec.Version // Check if the versions of the all the Machines match the MachineDeployment version. for i := range machines.Items { machine := machines.Items[i] - if machine.Spec.Version == nil { + if machine.Spec.Version == "" { return false, fmt.Errorf("failed to check if MachineDeployment %s is upgrading: Machine %s has no version", md.Name, machine.Name) } - if *machine.Spec.Version != mdVersion { + if machine.Spec.Version != mdVersion { return true, nil } } @@ -65,10 +65,10 @@ func IsMachineDeploymentUpgrading(ctx context.Context, c client.Reader, md *clus func IsMachinePoolUpgrading(ctx context.Context, c client.Reader, mp *clusterv1.MachinePool) (bool, error) { // If the MachinePool has no version there is no definitive way to check if it is upgrading. Therefore, return false. // Note: This case should not happen. - if mp.Spec.Template.Spec.Version == nil { + if mp.Spec.Template.Spec.Version == "" { return false, nil } - mpVersion := *mp.Spec.Template.Spec.Version + mpVersion := mp.Spec.Template.Spec.Version // Check if the kubelet versions of the MachinePool noderefs match the MachinePool version. for _, nodeRef := range mp.Status.NodeRefs { node := &corev1.Node{} diff --git a/internal/util/ssa/patch_test.go b/internal/util/ssa/patch_test.go index e519ef67638c..fa579e2255a2 100644 --- a/internal/util/ssa/patch_test.go +++ b/internal/util/ssa/patch_test.go @@ -106,7 +106,7 @@ func TestPatch(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "cluster-1", - Version: ptr.To("v1.25.0"), + Version: "v1.25.0", NodeDrainTimeoutSeconds: ptr.To(int32(10)), Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To("data-secret"), diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 1e04ebb956de..e998399c142a 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -545,10 +545,10 @@ func validateTopologyMachineDeploymentVersions(ctx context.Context, ctrlClient c for i := range mds.Items { md := &mds.Items[i] - mdVersion, err := semver.ParseTolerant(*md.Spec.Template.Spec.Version) + mdVersion, err := semver.ParseTolerant(md.Spec.Template.Spec.Version) if err != nil { // NOTE: this should never happen. Nevertheless, handling this for extra caution. - return errors.Wrapf(err, "failed to check if MachineDeployment %s is upgrading: failed to parse version %s", md.Name, *md.Spec.Template.Spec.Version) + return errors.Wrapf(err, "failed to check if MachineDeployment %s is upgrading: failed to parse version %s", md.Name, md.Spec.Template.Spec.Version) } if mdVersion.String() != oldVersion.String() { @@ -601,10 +601,10 @@ func validateTopologyMachinePoolVersions(ctx context.Context, ctrlClient client. for i := range mps.Items { mp := &mps.Items[i] - mpVersion, err := semver.ParseTolerant(*mp.Spec.Template.Spec.Version) + mpVersion, err := semver.ParseTolerant(mp.Spec.Template.Spec.Version) if err != nil { // NOTE: this should never happen. Nevertheless, handling this for extra caution. - return errors.Wrapf(err, "failed to check if MachinePool %s is upgrading: failed to parse version %s", mp.Name, *mp.Spec.Template.Spec.Version) + return errors.Wrapf(err, "failed to check if MachinePool %s is upgrading: failed to parse version %s", mp.Name, mp.Spec.Template.Spec.Version) } if mpVersion.String() != oldVersion.String() { diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index ff5ac032f6db..cc3da15ef946 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -394,76 +394,76 @@ func validateMachineHealthCheckClasses(clusterClass *clusterv1.ClusterClass) fie func validateNamingStrategies(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList - if clusterClass.Spec.Infrastructure.NamingStrategy != nil && clusterClass.Spec.Infrastructure.NamingStrategy.Template != nil { - name, err := topologynames.InfraClusterNameGenerator(*clusterClass.Spec.Infrastructure.NamingStrategy.Template, "cluster").GenerateName() + if clusterClass.Spec.Infrastructure.NamingStrategy != nil && clusterClass.Spec.Infrastructure.NamingStrategy.Template != "" { + name, err := topologynames.InfraClusterNameGenerator(clusterClass.Spec.Infrastructure.NamingStrategy.Template, "cluster").GenerateName() templateFldPath := field.NewPath("spec", "infrastructure", "namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( templateFldPath, - *clusterClass.Spec.Infrastructure.NamingStrategy.Template, + clusterClass.Spec.Infrastructure.NamingStrategy.Template, fmt.Sprintf("invalid InfraCluster name template: %v", err), )) } else { for _, err := range validation.IsDNS1123Subdomain(name) { - allErrs = append(allErrs, field.Invalid(templateFldPath, *clusterClass.Spec.Infrastructure.NamingStrategy.Template, err)) + allErrs = append(allErrs, field.Invalid(templateFldPath, clusterClass.Spec.Infrastructure.NamingStrategy.Template, err)) } } } - if clusterClass.Spec.ControlPlane.NamingStrategy != nil && clusterClass.Spec.ControlPlane.NamingStrategy.Template != nil { - name, err := topologynames.ControlPlaneNameGenerator(*clusterClass.Spec.ControlPlane.NamingStrategy.Template, "cluster").GenerateName() + if clusterClass.Spec.ControlPlane.NamingStrategy != nil && clusterClass.Spec.ControlPlane.NamingStrategy.Template != "" { + name, err := topologynames.ControlPlaneNameGenerator(clusterClass.Spec.ControlPlane.NamingStrategy.Template, "cluster").GenerateName() templateFldPath := field.NewPath("spec", "controlPlane", "namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( templateFldPath, - *clusterClass.Spec.ControlPlane.NamingStrategy.Template, + clusterClass.Spec.ControlPlane.NamingStrategy.Template, fmt.Sprintf("invalid ControlPlane name template: %v", err), )) } else { for _, err := range validation.IsDNS1123Subdomain(name) { - allErrs = append(allErrs, field.Invalid(templateFldPath, *clusterClass.Spec.ControlPlane.NamingStrategy.Template, err)) + allErrs = append(allErrs, field.Invalid(templateFldPath, clusterClass.Spec.ControlPlane.NamingStrategy.Template, err)) } } } for _, md := range clusterClass.Spec.Workers.MachineDeployments { - if md.NamingStrategy == nil || md.NamingStrategy.Template == nil { + if md.NamingStrategy == nil || md.NamingStrategy.Template == "" { continue } - name, err := topologynames.MachineDeploymentNameGenerator(*md.NamingStrategy.Template, "cluster", "mdtopology").GenerateName() + name, err := topologynames.MachineDeploymentNameGenerator(md.NamingStrategy.Template, "cluster", "mdtopology").GenerateName() templateFldPath := field.NewPath("spec", "workers", "machineDeployments").Key(md.Class).Child("namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( templateFldPath, - *md.NamingStrategy.Template, + md.NamingStrategy.Template, fmt.Sprintf("invalid MachineDeployment name template: %v", err), )) } else { for _, err := range validation.IsDNS1123Subdomain(name) { - allErrs = append(allErrs, field.Invalid(templateFldPath, *md.NamingStrategy.Template, err)) + allErrs = append(allErrs, field.Invalid(templateFldPath, md.NamingStrategy.Template, err)) } } } for _, mp := range clusterClass.Spec.Workers.MachinePools { - if mp.NamingStrategy == nil || mp.NamingStrategy.Template == nil { + if mp.NamingStrategy == nil || mp.NamingStrategy.Template == "" { continue } - name, err := topologynames.MachinePoolNameGenerator(*mp.NamingStrategy.Template, "cluster", "mptopology").GenerateName() + name, err := topologynames.MachinePoolNameGenerator(mp.NamingStrategy.Template, "cluster", "mptopology").GenerateName() templateFldPath := field.NewPath("spec", "workers", "machinePools").Key(mp.Class).Child("namingStrategy", "template") if err != nil { allErrs = append(allErrs, field.Invalid( templateFldPath, - *mp.NamingStrategy.Template, + mp.NamingStrategy.Template, fmt.Sprintf("invalid MachinePool name template: %v", err), )) } else { for _, err := range validation.IsDNS1123Subdomain(name) { - allErrs = append(allErrs, field.Invalid(templateFldPath, *mp.NamingStrategy.Template, err)) + allErrs = append(allErrs, field.Invalid(templateFldPath, mp.NamingStrategy.Template, err)) } } } diff --git a/internal/webhooks/clusterclass_test.go b/internal/webhooks/clusterclass_test.go index 08d06be3645d..6d1af42472de 100644 --- a/internal/webhooks/clusterclass_test.go +++ b/internal/webhooks/clusterclass_test.go @@ -1430,11 +1430,11 @@ func TestClusterClassValidation(t *testing.T) { in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). WithInfrastructureClusterTemplate( builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). - WithInfraClusterStrategy(&clusterv1.InfrastructureClassNamingStrategy{Template: ptr.To("{{ .cluster.name }}-infra-{{ .random }}")}). + WithInfraClusterStrategy(&clusterv1.InfrastructureClassNamingStrategy{Template: "{{ .cluster.name }}-infra-{{ .random }}"}). WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). - WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: ptr.To("{{ .cluster.name }}-cp-{{ .random }}")}). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: "{{ .cluster.name }}-cp-{{ .random }}"}). WithControlPlaneInfrastructureMachineTemplate( builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cp-infra1"). Build()). @@ -1444,7 +1444,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). - WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: ptr.To("{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}")}). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: "{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}"}). Build()). WithWorkerMachinePoolClasses( *builder.MachinePoolClass("bb"). @@ -1452,7 +1452,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "infra2").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2").Build()). - WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: ptr.To("{{ .cluster.name }}-md-{{ .machinePool.topologyName }}-{{ .random }}")}). + WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: "{{ .cluster.name }}-md-{{ .machinePool.topologyName }}-{{ .random }}"}). Build()). Build(), expectErr: false, @@ -1462,7 +1462,7 @@ func TestClusterClassValidation(t *testing.T) { in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). WithInfrastructureClusterTemplate( builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). - WithInfraClusterStrategy(&clusterv1.InfrastructureClassNamingStrategy{Template: ptr.To("template-infra-{{ .invalidkey }}")}). + WithInfraClusterStrategy(&clusterv1.InfrastructureClassNamingStrategy{Template: "template-infra-{{ .invalidkey }}"}). WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). @@ -1477,7 +1477,7 @@ func TestClusterClassValidation(t *testing.T) { in: builder.ClusterClass(metav1.NamespaceDefault, "class1"). WithInfrastructureClusterTemplate( builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infra1").Build()). - WithInfraClusterStrategy(&clusterv1.InfrastructureClassNamingStrategy{Template: ptr.To("template-infra-{{ .cluster.name }}-")}). + WithInfraClusterStrategy(&clusterv1.InfrastructureClassNamingStrategy{Template: "template-infra-{{ .cluster.name }}-"}). WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). @@ -1495,7 +1495,7 @@ func TestClusterClassValidation(t *testing.T) { WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). - WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: ptr.To("template-cp-{{ .invalidkey }}")}). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: "template-cp-{{ .invalidkey }}"}). WithControlPlaneInfrastructureMachineTemplate( builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cp-infra1"). Build()). @@ -1510,7 +1510,7 @@ func TestClusterClassValidation(t *testing.T) { WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). - WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: ptr.To("template-cp-{{ .cluster.name }}-")}). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: "template-cp-{{ .cluster.name }}-"}). WithControlPlaneInfrastructureMachineTemplate( builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cp-infra1"). Build()). @@ -1534,7 +1534,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). - WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: ptr.To("template-md-{{ .cluster.name")}). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: "template-md-{{ .cluster.name"}). Build()). Build(), expectErr: true, @@ -1556,7 +1556,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). - WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: ptr.To("template-md-{{ .cluster.name }}-")}). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: "template-md-{{ .cluster.name }}-"}). Build()). Build(), expectErr: true, @@ -1578,7 +1578,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "infra2").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2").Build()). - WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: ptr.To("template-mp-{{ .cluster.name")}). + WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: "template-mp-{{ .cluster.name"}). Build()). Build(), expectErr: true, @@ -1600,7 +1600,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "infra2").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2").Build()). - WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: ptr.To("template-mp-{{ .cluster.name }}-")}). + WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: "template-mp-{{ .cluster.name }}-"}). Build()). Build(), expectErr: true, diff --git a/internal/webhooks/machine.go b/internal/webhooks/machine.go index 6f5193b1ad60..38f51e51617c 100644 --- a/internal/webhooks/machine.go +++ b/internal/webhooks/machine.go @@ -65,9 +65,9 @@ func (webhook *Machine) Default(_ context.Context, obj runtime.Object) error { } m.Labels[clusterv1.ClusterNameLabel] = m.Spec.ClusterName - if m.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Version, "v") { - normalizedVersion := "v" + *m.Spec.Version - m.Spec.Version = &normalizedVersion + if m.Spec.Version != "" && !strings.HasPrefix(m.Spec.Version, "v") { + normalizedVersion := "v" + m.Spec.Version + m.Spec.Version = normalizedVersion } if m.Spec.NodeDeletionTimeoutSeconds == nil { @@ -130,9 +130,9 @@ func (webhook *Machine) validate(oldM, newM *clusterv1.Machine) error { ) } - if newM.Spec.Version != nil { - if !version.KubeSemver.MatchString(*newM.Spec.Version) { - allErrs = append(allErrs, field.Invalid(specPath.Child("version"), *newM.Spec.Version, "must be a valid semantic version")) + if newM.Spec.Version != "" { + if !version.KubeSemver.MatchString(newM.Spec.Version) { + allErrs = append(allErrs, field.Invalid(specPath.Child("version"), newM.Spec.Version, "must be a valid semantic version")) } } diff --git a/internal/webhooks/machine_test.go b/internal/webhooks/machine_test.go index 1f0894b55599..783c2e727f91 100644 --- a/internal/webhooks/machine_test.go +++ b/internal/webhooks/machine_test.go @@ -36,7 +36,7 @@ func TestMachineDefault(t *testing.T) { }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ConfigRef: &clusterv1.ContractVersionedObjectReference{}}, - Version: ptr.To("1.17.5"), + Version: "1.17.5", }, } @@ -46,7 +46,7 @@ func TestMachineDefault(t *testing.T) { g.Expect(webhook.Default(ctx, m)).To(Succeed()) g.Expect(m.Labels[clusterv1.ClusterNameLabel]).To(Equal(m.Spec.ClusterName)) - g.Expect(*m.Spec.Version).To(Equal("v1.17.5")) + g.Expect(m.Spec.Version).To(Equal("v1.17.5")) g.Expect(*m.Spec.NodeDeletionTimeoutSeconds).To(Equal(defaultNodeDeletionTimeoutSeconds)) } @@ -195,7 +195,7 @@ func TestMachineVersionValidation(t *testing.T) { m := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: &tt.version, + Version: tt.version, Bootstrap: clusterv1.Bootstrap{ConfigRef: nil, DataSecretName: ptr.To("test")}, }, } diff --git a/internal/webhooks/machinedeployment.go b/internal/webhooks/machinedeployment.go index 1c5ea9080cd1..0b50dad33932 100644 --- a/internal/webhooks/machinedeployment.go +++ b/internal/webhooks/machinedeployment.go @@ -142,9 +142,9 @@ func (webhook *MachineDeployment) Default(ctx context.Context, obj runtime.Objec m.Spec.Template.Labels[clusterv1.ClusterNameLabel] = m.Spec.ClusterName // tolerate version strings without a "v" prefix: prepend it if it's not there - if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { - normalizedVersion := "v" + *m.Spec.Template.Spec.Version - m.Spec.Template.Spec.Version = &normalizedVersion + if m.Spec.Template.Spec.Version != "" && !strings.HasPrefix(m.Spec.Template.Spec.Version, "v") { + normalizedVersion := "v" + m.Spec.Template.Spec.Version + m.Spec.Template.Spec.Version = normalizedVersion } return nil @@ -274,9 +274,9 @@ func (webhook *MachineDeployment) validate(oldMD, newMD *clusterv1.MachineDeploy } } - if newMD.Spec.Template.Spec.Version != nil { - if !version.KubeSemver.MatchString(*newMD.Spec.Template.Spec.Version) { - allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), *newMD.Spec.Template.Spec.Version, "must be a valid semantic version")) + if newMD.Spec.Template.Spec.Version != "" { + if !version.KubeSemver.MatchString(newMD.Spec.Template.Spec.Version) { + allErrs = append(allErrs, field.Invalid(specPath.Child("template", "spec", "version"), newMD.Spec.Template.Spec.Version, "must be a valid semantic version")) } } diff --git a/internal/webhooks/machinedeployment_test.go b/internal/webhooks/machinedeployment_test.go index aaaa6e6157f2..8b714d74827a 100644 --- a/internal/webhooks/machinedeployment_test.go +++ b/internal/webhooks/machinedeployment_test.go @@ -43,7 +43,7 @@ func TestMachineDeploymentDefault(t *testing.T) { ClusterName: "test-cluster", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.19.10"), + Version: "1.19.10", }, }, }, @@ -79,7 +79,7 @@ func TestMachineDeploymentDefault(t *testing.T) { g.Expect(md.Spec.Strategy.RollingUpdate.MaxSurge.IntValue()).To(Equal(1)) g.Expect(md.Spec.Strategy.RollingUpdate.MaxUnavailable.IntValue()).To(Equal(0)) - g.Expect(*md.Spec.Template.Spec.Version).To(Equal("v1.19.10")) + g.Expect(md.Spec.Template.Spec.Version).To(Equal("v1.19.10")) } func TestMachineDeploymentReferenceDefault(t *testing.T) { @@ -92,7 +92,7 @@ func TestMachineDeploymentReferenceDefault(t *testing.T) { ClusterName: "test-cluster", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.19.10"), + Version: "1.19.10", Bootstrap: clusterv1.Bootstrap{ ConfigRef: &clusterv1.ContractVersionedObjectReference{}, }, @@ -559,7 +559,7 @@ func TestMachineDeploymentVersionValidation(t *testing.T) { Spec: clusterv1.MachineDeploymentSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To(tt.version), + Version: tt.version, }, }, }, diff --git a/internal/webhooks/machineset.go b/internal/webhooks/machineset.go index 0b11579abb5f..6352065922b4 100644 --- a/internal/webhooks/machineset.go +++ b/internal/webhooks/machineset.go @@ -121,9 +121,9 @@ func (webhook *MachineSet) Default(ctx context.Context, obj runtime.Object) erro m.Spec.Template.Labels[clusterv1.MachineSetNameLabel] = format.MustFormatValue(m.Name) } - if m.Spec.Template.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Template.Spec.Version, "v") { - normalizedVersion := "v" + *m.Spec.Template.Spec.Version - m.Spec.Template.Spec.Version = &normalizedVersion + if m.Spec.Template.Spec.Version != "" && !strings.HasPrefix(m.Spec.Template.Spec.Version, "v") { + normalizedVersion := "v" + m.Spec.Template.Spec.Version + m.Spec.Template.Spec.Version = normalizedVersion } return nil @@ -198,13 +198,13 @@ func (webhook *MachineSet) validate(oldMS, newMS *clusterv1.MachineSet) error { ) } - if newMS.Spec.Template.Spec.Version != nil { - if !version.KubeSemver.MatchString(*newMS.Spec.Template.Spec.Version) { + if newMS.Spec.Template.Spec.Version != "" { + if !version.KubeSemver.MatchString(newMS.Spec.Template.Spec.Version) { allErrs = append( allErrs, field.Invalid( specPath.Child("template", "spec", "version"), - *newMS.Spec.Template.Spec.Version, + newMS.Spec.Template.Spec.Version, "must be a valid semantic version", ), ) diff --git a/internal/webhooks/machineset_test.go b/internal/webhooks/machineset_test.go index 8da51aeb91d5..388648940636 100644 --- a/internal/webhooks/machineset_test.go +++ b/internal/webhooks/machineset_test.go @@ -40,7 +40,7 @@ func TestMachineSetDefault(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.19.10"), + Version: "1.19.10", }, }, }, @@ -60,7 +60,7 @@ func TestMachineSetDefault(t *testing.T) { g.Expect(ms.Spec.DeletePolicy).To(Equal(clusterv1.RandomMachineSetDeletePolicy)) g.Expect(ms.Spec.Selector.MatchLabels).To(HaveKeyWithValue(clusterv1.MachineSetNameLabel, "test-ms")) g.Expect(ms.Spec.Template.Labels).To(HaveKeyWithValue(clusterv1.MachineSetNameLabel, "test-ms")) - g.Expect(*ms.Spec.Template.Spec.Version).To(Equal("v1.19.10")) + g.Expect(ms.Spec.Template.Spec.Version).To(Equal("v1.19.10")) } func TestCalculateMachineSetReplicas(t *testing.T) { @@ -390,7 +390,7 @@ func TestMachineSetVersionValidation(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To(tt.version), + Version: tt.version, }, }, }, diff --git a/internal/webhooks/patch_validation.go b/internal/webhooks/patch_validation.go index 45a17beeaff0..97a9c109cb05 100644 --- a/internal/webhooks/patch_validation.go +++ b/internal/webhooks/patch_validation.go @@ -128,7 +128,7 @@ func validatePatchDefinitions(patch clusterv1.ClusterClassPatch, clusterClass *c "patch.external can be used only if the RuntimeSDK feature flag is enabled", )) } - if patch.External.ValidateTopologyExtension == nil && patch.External.GeneratePatchesExtension == nil { + if patch.External.ValidateTopologyExtension == "" && patch.External.GeneratePatchesExtension == "" { allErrs = append(allErrs, field.Invalid( path.Child("external"), @@ -141,17 +141,17 @@ func validatePatchDefinitions(patch clusterv1.ClusterClassPatch, clusterClass *c } // validateSelectors validates if enabledIf is a valid template if it is set. -func validateEnabledIf(enabledIf *string, path *field.Path) field.ErrorList { +func validateEnabledIf(enabledIf string, path *field.Path) field.ErrorList { var allErrs field.ErrorList - if enabledIf != nil { + if enabledIf != "" { // Error if template can not be parsed. - _, err := template.New("enabledIf").Funcs(sprig.HermeticTxtFuncMap()).Parse(*enabledIf) + _, err := template.New("enabledIf").Funcs(sprig.HermeticTxtFuncMap()).Parse(enabledIf) if err != nil { allErrs = append(allErrs, field.Invalid( path, - *enabledIf, + enabledIf, fmt.Sprintf("template can not be parsed: %v", err), )) } @@ -395,7 +395,7 @@ func validateJSONPatchValues(jsonPatch clusterv1.JSONPatch, variableSet map[stri )) } } - if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Template == nil && jsonPatch.ValueFrom.Variable == nil { + if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Template == "" && jsonPatch.ValueFrom.Variable == "" { allErrs = append(allErrs, field.Invalid( path.Child("valueFrom"), @@ -403,7 +403,7 @@ func validateJSONPatchValues(jsonPatch clusterv1.JSONPatch, variableSet map[stri "valueFrom must set either template or variable", )) } - if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Template != nil && jsonPatch.ValueFrom.Variable != nil { + if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Template != "" && jsonPatch.ValueFrom.Variable != "" { allErrs = append(allErrs, field.Invalid( path.Child("valueFrom"), @@ -412,28 +412,28 @@ func validateJSONPatchValues(jsonPatch clusterv1.JSONPatch, variableSet map[stri )) } - if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Template != nil { + if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Template != "" { // Error if template can not be parsed. - _, err := template.New("valueFrom.template").Funcs(sprig.HermeticTxtFuncMap()).Parse(*jsonPatch.ValueFrom.Template) + _, err := template.New("valueFrom.template").Funcs(sprig.HermeticTxtFuncMap()).Parse(jsonPatch.ValueFrom.Template) if err != nil { allErrs = append(allErrs, field.Invalid( path.Child("valueFrom", "template"), - *jsonPatch.ValueFrom.Template, + jsonPatch.ValueFrom.Template, fmt.Sprintf("template can not be parsed: %v", err), )) } } // If set validate that the variable is valid. - if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Variable != nil { + if jsonPatch.ValueFrom != nil && jsonPatch.ValueFrom.Variable != "" { // If the variable is one of the list of builtin variables it's valid. - if strings.HasPrefix(*jsonPatch.ValueFrom.Variable, "builtin.") { - if _, ok := builtinVariables[*jsonPatch.ValueFrom.Variable]; !ok { + if strings.HasPrefix(jsonPatch.ValueFrom.Variable, "builtin.") { + if _, ok := builtinVariables[jsonPatch.ValueFrom.Variable]; !ok { allErrs = append(allErrs, field.Invalid( path.Child("valueFrom", "variable"), - *jsonPatch.ValueFrom.Variable, + jsonPatch.ValueFrom.Variable, "not a defined builtin variable", )) } @@ -442,13 +442,13 @@ func validateJSONPatchValues(jsonPatch clusterv1.JSONPatch, variableSet map[stri // validating if the whole path is an existing variable. // This could be done by re-using getVariableValue of the json patch // generator but requires a refactoring first. - variableName := getVariableName(*jsonPatch.ValueFrom.Variable) + variableName := getVariableName(jsonPatch.ValueFrom.Variable) if _, ok := variableSet[variableName]; !ok { allErrs = append(allErrs, field.Invalid( path.Child("valueFrom", "variable"), - *jsonPatch.ValueFrom.Variable, - fmt.Sprintf("variable with name %s cannot be found", *jsonPatch.ValueFrom.Variable), + jsonPatch.ValueFrom.Variable, + fmt.Sprintf("variable with name %s cannot be found", jsonPatch.ValueFrom.Variable), )) } } diff --git a/internal/webhooks/patch_validation_test.go b/internal/webhooks/patch_validation_test.go index ee4f891a9446..9209c030f932 100644 --- a/internal/webhooks/patch_validation_test.go +++ b/internal/webhooks/patch_validation_test.go @@ -68,7 +68,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/variableSetting/variableValue1", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName1"), + Variable: "variableName1", }, }, }, @@ -91,7 +91,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/variableSetting/variableValue2", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName2"), + Variable: "variableName2", }, }, }, @@ -155,7 +155,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, }, }, @@ -208,7 +208,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName1"), + Variable: "variableName1", }, }, }, @@ -231,7 +231,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/variableSetting/variableValue", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName2"), + Variable: "variableName2", }, }, }, @@ -280,7 +280,7 @@ func TestValidatePatches(t *testing.T) { Patches: []clusterv1.ClusterClassPatch{ { Name: "patch1", - EnabledIf: ptr.To(`template {{ .variableB }}`), + EnabledIf: `template {{ .variableB }}`, Definitions: []clusterv1.PatchDefinition{}, }, }, @@ -303,7 +303,7 @@ func TestValidatePatches(t *testing.T) { Patches: []clusterv1.ClusterClassPatch{ { Name: "patch1", - EnabledIf: ptr.To(`template {{{{{{{{ .variableB }}`), + EnabledIf: `template {{{{{{{{ .variableB }}`, }, }, }, @@ -423,7 +423,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/0/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, }, }, @@ -475,7 +475,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/1/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, }, }, @@ -528,7 +528,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/01/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, }, }, @@ -581,7 +581,7 @@ func TestValidatePatches(t *testing.T) { Op: "remove", Path: "/spec/template/0/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, }, }, @@ -634,7 +634,7 @@ func TestValidatePatches(t *testing.T) { Op: "replace", Path: "/spec/template/0/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, }, }, @@ -728,7 +728,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, Value: &apiextensionsv1.JSON{Raw: []byte("1")}, }, @@ -989,8 +989,8 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), - Template: ptr.To(`template {{ .variableB }}`), + Variable: "variableName", + Template: `template {{ .variableB }}`, }, }, }, @@ -1044,7 +1044,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Template: ptr.To(`template {{ .variableB }}`), + Template: `template {{ .variableB }}`, }, }, }, @@ -1097,7 +1097,7 @@ func TestValidatePatches(t *testing.T) { Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ // Template is invalid - too many leading curly braces. - Template: ptr.To(`template {{{{{{{{ .variableB }}`), + Template: `template {{{{{{{{ .variableB }}`, }, }, }, @@ -1151,7 +1151,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("undefinedVariable"), + Variable: "undefinedVariable", }, }, }, @@ -1203,7 +1203,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName"), + Variable: "variableName", }, }, }, @@ -1255,7 +1255,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("variableName.nestedField"), + Variable: "variableName.nestedField", }, }, }, @@ -1312,7 +1312,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.notDefined"), + Variable: "builtin.notDefined", }, }, }, @@ -1354,7 +1354,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: ptr.To("builtin.machineDeployment.version"), + Variable: "builtin.machineDeployment.version", }, }, }, @@ -1385,8 +1385,8 @@ func TestValidatePatches(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("generate-extension"), - ValidateTopologyExtension: ptr.To("generate-extension"), + GeneratePatchesExtension: "generate-extension", + ValidateTopologyExtension: "generate-extension", }, }, }, @@ -1412,8 +1412,8 @@ func TestValidatePatches(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("generate-extension"), - ValidateTopologyExtension: ptr.To("generate-extension"), + GeneratePatchesExtension: "generate-extension", + ValidateTopologyExtension: "generate-extension", }, }, }, @@ -1463,8 +1463,8 @@ func TestValidatePatches(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - GeneratePatchesExtension: ptr.To("generate-extension"), - ValidateTopologyExtension: ptr.To("generate-extension"), + GeneratePatchesExtension: "generate-extension", + ValidateTopologyExtension: "generate-extension", }, Definitions: []clusterv1.PatchDefinition{}, }, diff --git a/internal/webhooks/runtime/extensionconfig_webhook.go b/internal/webhooks/runtime/extensionconfig_webhook.go index f1c372e917e3..003a522bb3dc 100644 --- a/internal/webhooks/runtime/extensionconfig_webhook.go +++ b/internal/webhooks/runtime/extensionconfig_webhook.go @@ -131,13 +131,13 @@ func validateExtensionConfigSpec(e *runtimev1.ExtensionConfig) field.ErrorList { specPath := field.NewPath("spec") - if e.Spec.ClientConfig.URL == nil && e.Spec.ClientConfig.Service == nil { + if e.Spec.ClientConfig.URL == "" && e.Spec.ClientConfig.Service == nil { allErrs = append(allErrs, field.Required( specPath.Child("clientConfig"), "either url or service must be defined", )) } - if e.Spec.ClientConfig.URL != nil && e.Spec.ClientConfig.Service != nil { + if e.Spec.ClientConfig.URL != "" && e.Spec.ClientConfig.Service != nil { allErrs = append(allErrs, field.Forbidden( specPath.Child("clientConfig"), "only one of url or service can be defined", @@ -145,17 +145,17 @@ func validateExtensionConfigSpec(e *runtimev1.ExtensionConfig) field.ErrorList { } // Validate URL - if e.Spec.ClientConfig.URL != nil { - if uri, err := url.ParseRequestURI(*e.Spec.ClientConfig.URL); err != nil { + if e.Spec.ClientConfig.URL != "" { + if uri, err := url.ParseRequestURI(e.Spec.ClientConfig.URL); err != nil { allErrs = append(allErrs, field.Invalid( specPath.Child("clientConfig", "url"), - *e.Spec.ClientConfig.URL, + e.Spec.ClientConfig.URL, fmt.Sprintf("must be a valid URL, e.g. https://example.com: %v", err), )) } else if uri.Scheme != "https" { allErrs = append(allErrs, field.Invalid( specPath.Child("clientConfig", "url"), - *e.Spec.ClientConfig.URL, + e.Spec.ClientConfig.URL, "'https' is the only allowed URL scheme, e.g. https://example.com", )) } @@ -194,8 +194,8 @@ func validateExtensionConfigSpec(e *runtimev1.ExtensionConfig) field.ErrorList { )) } - if e.Spec.ClientConfig.Service.Path != nil { - path := *e.Spec.ClientConfig.Service.Path + if e.Spec.ClientConfig.Service.Path != "" { + path := e.Spec.ClientConfig.Service.Path if _, err := url.ParseRequestURI(path); err != nil { allErrs = append(allErrs, field.Invalid( specPath.Child("clientConfig", "service", "path"), diff --git a/internal/webhooks/runtime/extensionconfig_webhook_test.go b/internal/webhooks/runtime/extensionconfig_webhook_test.go index 6d625eaa2346..66a8ed9362b1 100644 --- a/internal/webhooks/runtime/extensionconfig_webhook_test.go +++ b/internal/webhooks/runtime/extensionconfig_webhook_test.go @@ -48,13 +48,13 @@ func TestExtensionConfigValidationFeatureGated(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: ptr.To("https://extension-address.com"), + URL: "https://extension-address.com", }, NamespaceSelector: &metav1.LabelSelector{}, }, } updatedExtension := extension.DeepCopy() - updatedExtension.Spec.ClientConfig.URL = ptr.To("https://a-new-extension-address.com") + updatedExtension.Spec.ClientConfig.URL = "https://a-new-extension-address.com" tests := []struct { name string new *runtimev1.ExtensionConfig @@ -140,7 +140,7 @@ func TestExtensionConfigValidate(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: ptr.To("https://extension-address.com"), + URL: "https://extension-address.com", }, }, } @@ -152,7 +152,7 @@ func TestExtensionConfigValidate(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ Service: &runtimev1.ServiceReference{ - Path: ptr.To("/path/to/handler"), + Path: "/path/to/handler", Port: ptr.To[int32](1), Name: "foo", Namespace: "bar", @@ -168,13 +168,13 @@ func TestExtensionConfigValidate(t *testing.T) { // Valid updated Extension updatedExtension := extensionWithURL.DeepCopy() - updatedExtension.Spec.ClientConfig.URL = ptr.To("https://a-in-extension-address.com") + updatedExtension.Spec.ClientConfig.URL = "https://a-in-extension-address.com" extensionWithoutURLOrService := extensionWithURL.DeepCopy() - extensionWithoutURLOrService.Spec.ClientConfig.URL = nil + extensionWithoutURLOrService.Spec.ClientConfig.URL = "" extensionWithInvalidServicePath := extensionWithService.DeepCopy() - extensionWithInvalidServicePath.Spec.ClientConfig.Service.Path = ptr.To("https://example.com") + extensionWithInvalidServicePath.Spec.ClientConfig.Service.Path = "https://example.com" extensionWithNoServiceName := extensionWithService.DeepCopy() extensionWithNoServiceName.Spec.ClientConfig.Service.Name = "" @@ -189,10 +189,10 @@ func TestExtensionConfigValidate(t *testing.T) { extensionWithBadServiceNamespace.Spec.ClientConfig.Service.Namespace = "INVALID" badURLExtension := extensionWithURL.DeepCopy() - badURLExtension.Spec.ClientConfig.URL = ptr.To("https//extension-address.com") + badURLExtension.Spec.ClientConfig.URL = "https//extension-address.com" badSchemeExtension := extensionWithURL.DeepCopy() - badSchemeExtension.Spec.ClientConfig.URL = ptr.To("unknown://extension-address.com") + badSchemeExtension.Spec.ClientConfig.URL = "unknown://extension-address.com" extensionWithInvalidServicePort := extensionWithService.DeepCopy() extensionWithInvalidServicePort.Spec.ClientConfig.Service.Port = ptr.To[int32](90000) diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index afa973034d9a..ad18ee28f91a 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -616,8 +616,8 @@ func beforeClusterUpgradeAnnotationIsBlocking(ctx context.Context, c client.Clie controlPlaneMachines := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{Lister: c, ClusterName: clusterRef.Name, Namespace: clusterRef.Namespace}) for _, machine := range controlPlaneMachines { - if *machine.Spec.Version == toVersion { - return errors.Errorf("Machine's %s version (%s) does match %s", klog.KObj(&machine), *machine.Spec.Version, toVersion) + if machine.Spec.Version == toVersion { + return errors.Errorf("Machine's %s version (%s) does match %s", klog.KObj(&machine), machine.Spec.Version, toVersion) } } @@ -663,7 +663,7 @@ func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, clust controlPlaneMachines := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{Lister: c, ClusterName: cluster.Name, Namespace: cluster.Namespace}) for _, machine := range controlPlaneMachines { - if *machine.Spec.Version == toVersion { + if machine.Spec.Version == toVersion { blocked = false } } @@ -682,7 +682,7 @@ func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, c framework.GetMachineDeploymentsByClusterInput{ClusterName: cluster.Name, Namespace: cluster.Namespace, Lister: c}) // If any of the MachineDeployments have the target Kubernetes Version, the hook is unblocked. for _, md := range mds { - if *md.Spec.Template.Spec.Version == version { + if md.Spec.Template.Spec.Version == version { blocked = false } } diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index b101ba72e17c..f6628e3f3b95 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -116,7 +116,7 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA var upgradedAndHealthy int64 deletingMachines := []clusterv1.Machine{} for _, m := range machines { - if *m.Spec.Version == cluster.Spec.Topology.Version && conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { + if m.Spec.Version == cluster.Spec.Topology.Version && conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { upgradedAndHealthy++ } if !m.DeletionTimestamp.IsZero() { diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index 62f65ae65667..ceea31cfcb2e 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -707,7 +707,7 @@ func assertMachineDeploymentTopologyFields(g Gomega, md clusterv1.MachineDeploym g.Expect(md.Spec.Strategy).To(BeComparableTo(mdTopology.Strategy)) } - if mdTopology.FailureDomain != nil { + if mdTopology.FailureDomain != "" { g.Expect(md.Spec.Template.Spec.FailureDomain).To(Equal(mdTopology.FailureDomain)) } } diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index 06c37ec75f2e..ce5c8d61c142 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -223,10 +223,10 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu bootstrapSecrets := map[string]bootstrapv1.KubeadmConfig{} for _, b := range bootstrap.Items { - if b.Status.DataSecretName == nil { + if b.Status.DataSecretName == "" { continue } - bootstrapSecrets[*b.Status.DataSecretName] = b + bootstrapSecrets[b.Status.DataSecretName] = b } for _, s := range secrets.Items { diff --git a/test/framework/controlplane_helpers.go b/test/framework/controlplane_helpers.go index 0791e2b5c182..4e35ba998cc6 100644 --- a/test/framework/controlplane_helpers.go +++ b/test/framework/controlplane_helpers.go @@ -226,8 +226,8 @@ func AssertControlPlaneFailureDomains(ctx context.Context, input AssertControlPl }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Couldn't list control-plane machines for the cluster %q", input.Cluster.Name) for _, machine := range machineList.Items { - if machine.Spec.FailureDomain != nil { - machineFD := *machine.Spec.FailureDomain + if machine.Spec.FailureDomain != "" { + machineFD := machine.Spec.FailureDomain if !controlPlaneFailureDomains.Has(machineFD) { Fail(fmt.Sprintf("Machine %s is in the %q failure domain, expecting one of the failure domain defined at cluster level", machine.Name, machineFD)) } diff --git a/test/framework/machine_helpers.go b/test/framework/machine_helpers.go index 5a06f0a51de7..52447caac09a 100644 --- a/test/framework/machine_helpers.go +++ b/test/framework/machine_helpers.go @@ -170,7 +170,7 @@ func WaitForControlPlaneMachinesToBeUpgraded(ctx context.Context, input WaitForC upgraded := 0 for _, machine := range machines { m := machine - if *m.Spec.Version == input.KubernetesUpgradeVersion && conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { + if m.Spec.Version == input.KubernetesUpgradeVersion && conditions.IsTrue(&m, clusterv1.MachineNodeHealthyCondition) { upgraded++ } } @@ -210,7 +210,7 @@ func WaitForMachineDeploymentMachinesToBeUpgraded(ctx context.Context, input Wai upgraded := 0 for _, machine := range machines { - if *machine.Spec.Version == input.KubernetesUpgradeVersion { + if machine.Spec.Version == input.KubernetesUpgradeVersion { upgraded++ } } diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index ef9a9b315fde..461625cabd83 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -160,7 +160,7 @@ func AssertMachineDeploymentFailureDomains(ctx context.Context, input AssertMach Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling AssertMachineDeploymentFailureDomains") Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling AssertMachineDeploymentFailureDomains") - machineDeploymentFD := ptr.Deref(input.MachineDeployment.Spec.Template.Spec.FailureDomain, "") + machineDeploymentFD := input.MachineDeployment.Spec.Template.Spec.FailureDomain Byf("Checking all the machines controlled by %s are in the %q failure domain", input.MachineDeployment.Name, machineDeploymentFD) selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector) @@ -173,7 +173,7 @@ func AssertMachineDeploymentFailureDomains(ctx context.Context, input AssertMach for i := range ms.Items { machineSet := ms.Items[i] - machineSetFD := ptr.Deref(machineSet.Spec.Template.Spec.FailureDomain, "") + machineSetFD := machineSet.Spec.Template.Spec.FailureDomain Expect(machineSetFD).To(Equal(machineDeploymentFD), "MachineSet %s is in the %q failure domain, expecting %q", machineSet.Name, machineSetFD, machineDeploymentFD) selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) @@ -185,7 +185,7 @@ func AssertMachineDeploymentFailureDomains(ctx context.Context, input AssertMach }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Machines for Cluster %s", klog.KObj(input.Cluster)) for _, machine := range machines.Items { - machineFD := ptr.Deref(machine.Spec.FailureDomain, "") + machineFD := machine.Spec.FailureDomain Expect(machineFD).To(Equal(machineDeploymentFD), "Machine %s is in the %q failure domain, expecting %q", machine.Name, machineFD, machineDeploymentFD) } } @@ -250,7 +250,7 @@ func UpgradeMachineDeploymentsAndWait(ctx context.Context, input UpgradeMachineD Expect(err).ToNot(HaveOccurred()) oldVersion := deployment.Spec.Template.Spec.Version - deployment.Spec.Template.Spec.Version = &input.UpgradeVersion + deployment.Spec.Template.Spec.Version = input.UpgradeVersion if input.UpgradeMachineTemplate != nil { deployment.Spec.Template.Spec.InfrastructureRef.Name = *input.UpgradeMachineTemplate } @@ -259,7 +259,7 @@ func UpgradeMachineDeploymentsAndWait(ctx context.Context, input UpgradeMachineD }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch Kubernetes version on MachineDeployment %s", klog.KObj(deployment)) log.Logf("Waiting for Kubernetes versions of machines in MachineDeployment %s to be upgraded from %s to %s", - klog.KObj(deployment), *oldVersion, input.UpgradeVersion) + klog.KObj(deployment), oldVersion, input.UpgradeVersion) WaitForMachineDeploymentMachinesToBeUpgraded(ctx, WaitForMachineDeploymentMachinesToBeUpgradedInput{ Lister: mgmtClient, Cluster: input.Cluster, diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go index 2b4dba97fcd3..b396e94572e2 100644 --- a/test/framework/machinepool_helpers.go +++ b/test/framework/machinepool_helpers.go @@ -152,14 +152,14 @@ func UpgradeMachinePoolAndWait(ctx context.Context, input UpgradeMachinePoolAndW oldVersion := mp.Spec.Template.Spec.Version // Upgrade to new Version. - mp.Spec.Template.Spec.Version = &input.UpgradeVersion + mp.Spec.Template.Spec.Version = input.UpgradeVersion Eventually(func() error { return patchHelper.Patch(ctx, mp) }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch the new Kubernetes version to Machine Pool %s", klog.KObj(mp)) log.Logf("Waiting for Kubernetes versions of machines in MachinePool %s to be upgraded from %s to %s", - klog.KObj(mp), *oldVersion, input.UpgradeVersion) + klog.KObj(mp), oldVersion, input.UpgradeVersion) WaitForMachinePoolInstancesToBeUpgraded(ctx, WaitForMachinePoolInstancesToBeUpgradedInput{ Getter: mgmtClient, WorkloadClusterGetter: input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient(), diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go index 8c9fa36b8846..fbd9bb9d1598 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go @@ -92,7 +92,7 @@ func createDockerContainer(ctx context.Context, name string, cluster *clusterv1. // For MachinePools placement is expected to be managed by the underlying infrastructure primitive, but // given that there is no such an thing in CAPD, we are picking a random failure domain. randomIndex := rand.Intn(len(machinePool.Spec.FailureDomains)) //nolint:gosec - for k, v := range docker.FailureDomainLabel(&machinePool.Spec.FailureDomains[randomIndex]) { + for k, v := range docker.FailureDomainLabel(machinePool.Spec.FailureDomains[randomIndex]) { labels[k] = v } } @@ -354,7 +354,7 @@ func isMachineMatchingInfrastructureSpec(_ context.Context, machine *docker.Mach // NOTE: With the current implementation we are checking if the machine is using a kindest/node image for the expected version, // but not checking if the machine has the expected extra.mounts or pre.loaded images. - semVer, err := semver.ParseTolerant(*machinePool.Spec.Template.Spec.Version) + semVer, err := semver.ParseTolerant(machinePool.Spec.Template.Spec.Version) if err != nil { // TODO: consider if to return an error panic(errors.Wrap(err, "failed to parse DockerMachine version").Error()) diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go index e043dfcf0190..ed66a59edd7f 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go @@ -82,7 +82,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster } var dataSecretName *string - var version *string + var version string if labels.IsMachinePoolOwned(dockerMachine) { machinePool, err := utilexp.GetMachinePoolByLabels(ctx, r.Client, dockerMachine.GetNamespace(), dockerMachine.Labels) diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go index cb819c1ff471..4adb63cdbeff 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go @@ -955,7 +955,7 @@ func (r *MachineBackendReconciler) reconcileNormalKubeProxy(ctx context.Context, Containers: []corev1.Container{ { Name: "kube-proxy", - Image: fmt.Sprintf("registry.k8s.io/kube-proxy:%s", *machine.Spec.Version), + Image: fmt.Sprintf("registry.k8s.io/kube-proxy:%s", machine.Spec.Version), }, }, }, diff --git a/test/infrastructure/docker/internal/docker/machine.go b/test/infrastructure/docker/internal/docker/machine.go index c18a34d5ba8e..4d261813c3b2 100644 --- a/test/infrastructure/docker/internal/docker/machine.go +++ b/test/infrastructure/docker/internal/docker/machine.go @@ -201,7 +201,7 @@ func (m *Machine) ContainerImage() string { } // Create creates a docker container hosting a Kubernetes node. -func (m *Machine) Create(ctx context.Context, image string, role string, version *string, labels map[string]string, mounts []infrav1.Mount) error { +func (m *Machine) Create(ctx context.Context, image string, role string, version string, labels map[string]string, mounts []infrav1.Mount) error { log := ctrl.LoggerFrom(ctx) // Create if not exists. @@ -211,11 +211,11 @@ func (m *Machine) Create(ctx context.Context, image string, role string, version // Get the KindMapping for the target K8s version. // NOTE: The KindMapping allows to select the most recent kindest/node image available, if any, as well as // provide info about the mode to be used when starting the kindest/node image itself. - if version == nil { + if version == "" { return errors.New("cannot create a DockerMachine for a nil version") } - semVer, err := semver.ParseTolerant(*version) + semVer, err := semver.ParseTolerant(version) if err != nil { return errors.Wrap(err, "failed to parse DockerMachine version") } @@ -329,7 +329,7 @@ func (m *Machine) PreloadLoadImages(ctx context.Context, images []string) error } // ExecBootstrap runs bootstrap on a node, this is generally `kubeadm `. -func (m *Machine) ExecBootstrap(ctx context.Context, data string, format bootstrapv1.Format, version *string, image string) error { +func (m *Machine) ExecBootstrap(ctx context.Context, data string, format bootstrapv1.Format, version string, image string) error { log := ctrl.LoggerFrom(ctx) if m.container == nil { @@ -339,11 +339,11 @@ func (m *Machine) ExecBootstrap(ctx context.Context, data string, format bootstr // Get the kindMapping for the target K8s version. // NOTE: The kindMapping allows to select the most recent kindest/node image available, if any, as well as // provide info about the mode to be used when starting the kindest/node image itself. - if version == nil { + if version == "" { return errors.New("cannot create a DockerMachine for a nil version") } - semVer, err := semver.ParseTolerant(*version) + semVer, err := semver.ParseTolerant(version) if err != nil { return errors.Wrap(err, "failed to parse DockerMachine version") } diff --git a/test/infrastructure/docker/internal/docker/util.go b/test/infrastructure/docker/internal/docker/util.go index 7955cd381d31..6fcc6259de26 100644 --- a/test/infrastructure/docker/internal/docker/util.go +++ b/test/infrastructure/docker/internal/docker/util.go @@ -37,9 +37,9 @@ const ( ) // FailureDomainLabel returns a map with the docker label for the given failure domain. -func FailureDomainLabel(failureDomain *string) map[string]string { - if failureDomain != nil && *failureDomain != "" { - return map[string]string{failureDomainLabelKey: *failureDomain} +func FailureDomainLabel(failureDomain string) map[string]string { + if failureDomain != "" { + return map[string]string{failureDomainLabelKey: failureDomain} } return nil } diff --git a/util/collections/machine_collection.go b/util/collections/machine_collection.go index a090521cdf70..299c81f6de7c 100644 --- a/util/collections/machine_collection.go +++ b/util/collections/machine_collection.go @@ -47,8 +47,8 @@ type machinesByVersion []*clusterv1.Machine func (v machinesByVersion) Len() int { return len(v) } func (v machinesByVersion) Swap(i, j int) { v[i], v[j] = v[j], v[i] } func (v machinesByVersion) Less(i, j int) bool { - vi, _ := semver.ParseTolerant(*v[i].Spec.Version) - vj, _ := semver.ParseTolerant(*v[j].Spec.Version) + vi, _ := semver.ParseTolerant(v[i].Spec.Version) + vj, _ := semver.ParseTolerant(v[j].Spec.Version) comp := version.Compare(vi, vj, version.WithBuildTags()) if comp == 0 { return v[i].Name < v[j].Name @@ -273,10 +273,10 @@ func (s Machines) sortedByVersion() []*clusterv1.Machine { // LowestVersion returns the lowest version among all the machine with // defined versions. If no machine has a defined version it returns an // empty string. -func (s Machines) LowestVersion() *string { +func (s Machines) LowestVersion() string { machines := s.Filter(WithVersion()) if len(machines) == 0 { - return nil + return "" } m := machines.sortedByVersion()[0] return m.Spec.Version diff --git a/util/collections/machine_collection_test.go b/util/collections/machine_collection_test.go index e6bd0e0770b4..6ea654110b26 100644 --- a/util/collections/machine_collection_test.go +++ b/util/collections/machine_collection_test.go @@ -22,7 +22,6 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/collections" @@ -82,12 +81,12 @@ func TestMachinesLowestVersion(t *testing.T) { tests := []struct { name string machines collections.Machines - expected *string + expected string }{ { name: "return empty for empty machines collection", machines: collections.New(), - expected: nil, + expected: "", }, { name: "return empty if machines dont have version", @@ -96,58 +95,58 @@ func TestMachinesLowestVersion(t *testing.T) { machines.Insert(&clusterv1.Machine{}) return machines }(), - expected: nil, + expected: "", }, { name: "return lowest version from machines", machines: func() collections.Machines { machines := collections.New() machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.20"), + Version: "1.20", }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.19.8"), + Version: "1.19.8", }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To(""), + Version: "", }}) return machines }(), - expected: ptr.To("1.19.8"), + expected: "1.19.8", }, { name: "return lowest version from machines with pre release versions", machines: func() collections.Machines { machines := collections.New() machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.20.1"), + Version: "1.20.1", }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.20.1-alpha.1"), + Version: "1.20.1-alpha.1", }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To(""), + Version: "", }}) return machines }(), - expected: ptr.To("1.20.1-alpha.1"), + expected: "1.20.1-alpha.1", }, { name: "return lowest version from machines with build identifier versions", machines: func() collections.Machines { machines := collections.New() machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.20.1+xyz.2"), + Version: "1.20.1+xyz.2", }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.20.1+xyz.1"), + Version: "1.20.1+xyz.1", }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ - Version: ptr.To(""), + Version: "", }}) return machines }(), - expected: ptr.To("1.20.1+xyz.1"), + expected: "1.20.1+xyz.1", }, } diff --git a/util/collections/machine_filters.go b/util/collections/machine_filters.go index e185e25cd3bd..d679f12f21f2 100644 --- a/util/collections/machine_filters.go +++ b/util/collections/machine_filters.go @@ -75,23 +75,23 @@ func HasControllerRef(machine *clusterv1.Machine) bool { // InFailureDomains returns a filter to find all machines // in any of the given failure domains. -func InFailureDomains(failureDomains ...*string) Func { +func InFailureDomains(failureDomains ...string) Func { return func(machine *clusterv1.Machine) bool { if machine == nil { return false } for i := range failureDomains { fd := failureDomains[i] - if fd == nil { + if fd == "" { if fd == machine.Spec.FailureDomain { return true } continue } - if machine.Spec.FailureDomain == nil { + if machine.Spec.FailureDomain == "" { continue } - if *fd == *machine.Spec.FailureDomain { + if fd == machine.Spec.FailureDomain { return true } } @@ -276,10 +276,10 @@ func MatchesKubernetesVersion(kubernetesVersion string) Func { if machine == nil { return false } - if machine.Spec.Version == nil { + if machine.Spec.Version == "" { return false } - return *machine.Spec.Version == kubernetesVersion + return machine.Spec.Version == kubernetesVersion } } @@ -289,10 +289,10 @@ func WithVersion() Func { if machine == nil { return false } - if machine.Spec.Version == nil { + if machine.Spec.Version == "" { return false } - if _, err := semver.ParseTolerant(*machine.Spec.Version); err != nil { + if _, err := semver.ParseTolerant(machine.Spec.Version); err != nil { return false } return true diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index 5a4cfa6536d5..79afcb01af56 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -252,37 +252,37 @@ func TestHashAnnotationKey(t *testing.T) { func TestInFailureDomain(t *testing.T) { t.Run("nil machine returns false", func(t *testing.T) { g := NewWithT(t) - g.Expect(collections.InFailureDomains(ptr.To("test"))(nil)).To(BeFalse()) + g.Expect(collections.InFailureDomains("test")(nil)).To(BeFalse()) }) t.Run("machine with given failure domain returns true", func(t *testing.T) { g := NewWithT(t) - m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("test")}} - g.Expect(collections.InFailureDomains(ptr.To("test"))(m)).To(BeTrue()) + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: "test"}} + g.Expect(collections.InFailureDomains("test")(m)).To(BeTrue()) }) t.Run("machine with a different failure domain returns false", func(t *testing.T) { g := NewWithT(t) - m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("notTest")}} + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: "notTest"}} g.Expect(collections.InFailureDomains( - ptr.To("test"), - ptr.To("test2"), - ptr.To("test3"), - nil, - ptr.To("foo"))(m)).To(BeFalse()) + "test", + "test2", + "test3", + "", + "foo")(m)).To(BeFalse()) }) t.Run("machine without failure domain returns false", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - g.Expect(collections.InFailureDomains(ptr.To("test"))(m)).To(BeFalse()) + g.Expect(collections.InFailureDomains("test")(m)).To(BeFalse()) }) - t.Run("machine without failure domain returns true, when nil used for failure domain", func(t *testing.T) { + t.Run("machine without failure domain returns true, when \"\" used for failure domain", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - g.Expect(collections.InFailureDomains(nil)(m)).To(BeTrue()) + g.Expect(collections.InFailureDomains("")(m)).To(BeTrue()) }) t.Run("machine with failure domain returns true, when one of multiple failure domains match", func(t *testing.T) { g := NewWithT(t) - m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("test")}} - g.Expect(collections.InFailureDomains(ptr.To("foo"), ptr.To("test"))(m)).To(BeTrue()) + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: "test"}} + g.Expect(collections.InFailureDomains("foo", "test")(m)).To(BeTrue()) }) } @@ -314,11 +314,11 @@ func TestMatchesKubernetesVersion(t *testing.T) { g.Expect(collections.MatchesKubernetesVersion("some_ver")(nil)).To(BeFalse()) }) - t.Run("nil machine.Spec.Version returns false", func(t *testing.T) { + t.Run("empty machine.Spec.Version returns false", func(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: nil, + Version: "", }, } g.Expect(collections.MatchesKubernetesVersion("some_ver")(machine)).To(BeFalse()) @@ -329,7 +329,7 @@ func TestMatchesKubernetesVersion(t *testing.T) { kversion := "some_ver" machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: &kversion, + Version: kversion, }, } g.Expect(collections.MatchesKubernetesVersion("some_ver")(machine)).To(BeTrue()) @@ -340,7 +340,7 @@ func TestMatchesKubernetesVersion(t *testing.T) { kversion := "some_ver_2" machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: &kversion, + Version: kversion, }, } g.Expect(collections.MatchesKubernetesVersion("some_ver")(machine)).To(BeFalse()) @@ -353,21 +353,11 @@ func TestWithVersion(t *testing.T) { g.Expect(collections.WithVersion()(nil)).To(BeFalse()) }) - t.Run("nil machine.Spec.Version returns false", func(t *testing.T) { - g := NewWithT(t) - machine := &clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ - Version: nil, - }, - } - g.Expect(collections.WithVersion()(machine)).To(BeFalse()) - }) - t.Run("empty machine.Spec.Version returns false", func(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: ptr.To(""), + Version: "", }, } g.Expect(collections.WithVersion()(machine)).To(BeFalse()) @@ -377,7 +367,7 @@ func TestWithVersion(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("1..20"), + Version: "1..20", }, } g.Expect(collections.WithVersion()(machine)).To(BeFalse()) @@ -387,7 +377,7 @@ func TestWithVersion(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: ptr.To("1.20"), + Version: "1.20", }, } g.Expect(collections.WithVersion()(machine)).To(BeTrue()) diff --git a/util/conversion/conversion_test.go b/util/conversion/conversion_test.go index 364ea47aacfd..51af54d74590 100644 --- a/util/conversion/conversion_test.go +++ b/util/conversion/conversion_test.go @@ -50,8 +50,8 @@ func TestMarshalData(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", - Version: &version, - ProviderID: &providerID, + Version: version, + ProviderID: providerID, }, } diff --git a/util/failuredomains/failure_domains.go b/util/failuredomains/failure_domains.go index 5ca891493156..6d12116871ec 100644 --- a/util/failuredomains/failure_domains.go +++ b/util/failuredomains/failure_domains.go @@ -22,7 +22,6 @@ import ( "fmt" "sort" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -73,16 +72,16 @@ func (f failureDomainAggregations) Swap(i, j int) { } // PickMost returns the failure domain from which we have to delete a control plane machine, which is the failure domain with most machines and at least one eligible machine in it. -func PickMost(ctx context.Context, failureDomains []clusterv1.FailureDomain, allMachines, eligibleMachines collections.Machines) *string { +func PickMost(ctx context.Context, failureDomains []clusterv1.FailureDomain, allMachines, eligibleMachines collections.Machines) string { aggregations := countByFailureDomain(ctx, failureDomains, allMachines, eligibleMachines) if len(aggregations) == 0 { - return nil + return "" } sort.Sort(sort.Reverse(aggregations)) if len(aggregations) > 0 && aggregations[0].countPriority > 0 { - return ptr.To(aggregations[0].id) + return aggregations[0].id } - return nil + return "" } // PickFewest returns the failure domain that will be used for placement of a new control plane machine, which is the failure domain with the fewest @@ -93,13 +92,13 @@ func PickMost(ctx context.Context, failureDomains []clusterv1.FailureDomain, all // // In case of tie (more failure domain with the same number of up-to-date, not deleted machines) the failure domain with the fewest number of // machine overall is picked to ensure a better spreading of machines while the rollout is performed. -func PickFewest(ctx context.Context, failureDomains []clusterv1.FailureDomain, allMachines, upToDateMachines collections.Machines) *string { +func PickFewest(ctx context.Context, failureDomains []clusterv1.FailureDomain, allMachines, upToDateMachines collections.Machines) string { aggregations := countByFailureDomain(ctx, failureDomains, allMachines, upToDateMachines) if len(aggregations) == 0 { - return nil + return "" } sort.Sort(aggregations) - return ptr.To(aggregations[0].id) + return aggregations[0].id } // countByFailureDomain returns failure domains with the number of machines in it. @@ -125,10 +124,10 @@ func countByFailureDomain(ctx context.Context, failureDomains []clusterv1.Failur // Count how many machines are in each failure domain. for _, m := range allMachines { - if m.Spec.FailureDomain == nil { + if m.Spec.FailureDomain == "" { continue } - id := *m.Spec.FailureDomain + id := m.Spec.FailureDomain if _, ok := counters[id]; !ok { var knownFailureDomains []string for _, fd := range failureDomains { @@ -143,10 +142,10 @@ func countByFailureDomain(ctx context.Context, failureDomains []clusterv1.Failur } for _, m := range priorityMachines { - if m.Spec.FailureDomain == nil { + if m.Spec.FailureDomain == "" { continue } - id := *m.Spec.FailureDomain + id := m.Spec.FailureDomain if _, ok := counters[id]; !ok { continue } diff --git a/util/failuredomains/failure_domains_test.go b/util/failuredomains/failure_domains_test.go index 5fcebb5215c5..7760342cf6cc 100644 --- a/util/failuredomains/failure_domains_test.go +++ b/util/failuredomains/failure_domains_test.go @@ -34,22 +34,22 @@ var ( ) func TestNewFailureDomainPicker(t *testing.T) { - a := ptr.To("us-west-1a") - b := ptr.To("us-west-1b") + a := "us-west-1a" + b := "us-west-1b" fds := []clusterv1.FailureDomain{ - {Name: *a}, - {Name: *b}, + {Name: a}, + {Name: b}, } machinea := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: a}} machineb := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: b}} - machinenil := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: nil}} + machinenil := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ""}} testcases := []struct { name string fds []clusterv1.FailureDomain machines collections.Machines - expected []*string + expected []string }{ { name: "simple", @@ -59,41 +59,41 @@ func TestNewFailureDomainPicker(t *testing.T) { name: "no machines", fds: []clusterv1.FailureDomain{ { - Name: *a, + Name: a, }, }, - expected: []*string{a}, + expected: []string{a}, }, { name: "one machine in a failure domain", fds: fds, machines: collections.FromMachines(machinea.DeepCopy()), - expected: []*string{b}, + expected: []string{b}, }, { name: "no failure domain specified on machine", fds: []clusterv1.FailureDomain{ { - Name: *a, + Name: a, }, }, machines: collections.FromMachines(machinenil.DeepCopy()), - expected: []*string{a}, + expected: []string{a}, }, { name: "mismatched failure domain on machine", fds: []clusterv1.FailureDomain{ { - Name: *a, + Name: a, }, }, machines: collections.FromMachines(machineb.DeepCopy()), - expected: []*string{a}, + expected: []string{a}, }, { name: "failure domains and no machines should return a valid failure domain", fds: fds, - expected: []*string{a, b}, + expected: []string{a, b}, }, } for _, tc := range testcases { @@ -102,7 +102,7 @@ func TestNewFailureDomainPicker(t *testing.T) { fd := PickFewest(ctx, tc.fds, tc.machines, nil) if tc.expected == nil { - g.Expect(fd).To(BeNil()) + g.Expect(fd).To(BeEmpty()) } else { g.Expect(fd).To(BeElementOf(tc.expected)) } @@ -111,42 +111,42 @@ func TestNewFailureDomainPicker(t *testing.T) { } func TestPickMost(t *testing.T) { - a := ptr.To("us-west-1a") - b := ptr.To("us-west-1b") + a := "us-west-1a" + b := "us-west-1b" fds := []clusterv1.FailureDomain{ { - Name: *a, + Name: a, ControlPlane: ptr.To(true), }, { - Name: *b, + Name: b, ControlPlane: ptr.To(true), }, } machinea := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: a}} machineb := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: b}} - machinenil := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: nil}} + machinenil := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ""}} testcases := []struct { name string fds []clusterv1.FailureDomain allMachines collections.Machines eligibleMachines collections.Machines - expected *string + expected string }{ { name: "simple", - expected: nil, + expected: "", }, { name: "no machines should return nil", fds: []clusterv1.FailureDomain{ { - Name: *a, + Name: a, }, }, - expected: nil, + expected: "", }, { name: "one machine in a failure domain", @@ -159,40 +159,40 @@ func TestPickMost(t *testing.T) { name: "no failure domain specified on machine", fds: []clusterv1.FailureDomain{ { - Name: *a, + Name: a, ControlPlane: ptr.To(true), }, }, allMachines: collections.FromMachines(machinenil.DeepCopy()), eligibleMachines: collections.FromMachines(machinenil.DeepCopy()), - expected: nil, + expected: "", }, { name: "mismatched failure domain on machine should return nil", fds: []clusterv1.FailureDomain{ { - Name: *a, + Name: a, ControlPlane: ptr.To(true), }, }, allMachines: collections.FromMachines(machineb.DeepCopy()), eligibleMachines: collections.FromMachines(machineb.DeepCopy()), - expected: nil, + expected: "", }, { name: "failure domains and no machines should return nil", fds: fds, - expected: nil, + expected: "", }, { - name: "nil failure domains with machines", + name: "empty failure domains with machines", allMachines: collections.FromMachines(machineb.DeepCopy()), eligibleMachines: collections.FromMachines(machineb.DeepCopy()), - expected: nil, + expected: "", }, { - name: "nil failure domains with no machines", - expected: nil, + name: "empty failure domains with no machines", + expected: "", }, } for _, tc := range testcases { @@ -200,11 +200,7 @@ func TestPickMost(t *testing.T) { g := NewWithT(t) fd := PickMost(ctx, tc.fds, tc.allMachines, tc.eligibleMachines) - if tc.expected == nil { - g.Expect(fd).To(BeNil()) - } else { - g.Expect(fd).To(Equal(tc.expected)) - } + g.Expect(fd).To(Equal(tc.expected)) }) } } @@ -227,19 +223,19 @@ func TestPickFewestNew(t *testing.T) { fds0 := []clusterv1.FailureDomain{} - machineA1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineA2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineB1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineC1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(c)}} - machineA1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineA2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineB1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineB2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineC1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(c)}} - machineA1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineA2New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineB1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineC1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(c)}} + machineA1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineA2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineB1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineC1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1"}, Spec: clusterv1.MachineSpec{FailureDomain: c}} + machineA1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineA2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineB1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineB2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineC1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: c}} + machineA1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineA2New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-new"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineB1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineC1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: c}} testcases := []struct { name string @@ -490,10 +486,10 @@ func TestPickFewestNew(t *testing.T) { fd := PickFewest(ctx, tc.fds, tc.allMachines, tc.upToDateMachines) if tc.expected == nil { - g.Expect(fd).To(BeNil()) + g.Expect(fd).To(BeEmpty()) } else { - g.Expect(fd).ToNot(BeNil()) - g.Expect(tc.expected).To(ContainElement(*fd)) + g.Expect(fd).ToNot(BeEmpty()) + g.Expect(tc.expected).To(ContainElement(fd)) } }) } @@ -517,21 +513,21 @@ func TestPickMostNew(t *testing.T) { fds0 := []clusterv1.FailureDomain{} - machineA1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineA2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineB1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineB2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineC1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(c)}} - machineA1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineA2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineB1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineB2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineC1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(c)}} - machineA1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineA2New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineB1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineB2New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machineC1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(c)}} + machineA1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineA2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineB1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineB2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineC1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1"}, Spec: clusterv1.MachineSpec{FailureDomain: c}} + machineA1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineA2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineB1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineB2Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2-old"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineC1Old := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-old"}, Spec: clusterv1.MachineSpec{FailureDomain: c}} + machineA1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineA2New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2-new"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineB1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineB2New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b2-new"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machineC1New := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "c1-new"}, Spec: clusterv1.MachineSpec{FailureDomain: c}} testcases := []struct { name string @@ -831,10 +827,10 @@ func TestPickMostNew(t *testing.T) { fd := PickMost(ctx, tc.fds, tc.allMachines, tc.eligibleMachines) if tc.expected == nil { - g.Expect(fd).To(BeNil()) + g.Expect(fd).To(BeEmpty()) } else { - g.Expect(fd).ToNot(BeNil()) - g.Expect(tc.expected).To(ContainElement(*fd)) + g.Expect(fd).ToNot(BeEmpty()) + g.Expect(tc.expected).To(ContainElement(fd)) } }) } @@ -850,10 +846,10 @@ func TestCountByFailureDomain(t *testing.T) { {Name: a, ControlPlane: ptr.To(true)}, {Name: b, ControlPlane: ptr.To(true)}, } - machinea1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machinea2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(a)}} - machineb1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1"}, Spec: clusterv1.MachineSpec{FailureDomain: ptr.To(b)}} - machinenil := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "nil"}, Spec: clusterv1.MachineSpec{FailureDomain: nil}} + machinea1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a1"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machinea2 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "a2"}, Spec: clusterv1.MachineSpec{FailureDomain: a}} + machineb1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "b1"}, Spec: clusterv1.MachineSpec{FailureDomain: b}} + machinenil := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "nil"}, Spec: clusterv1.MachineSpec{FailureDomain: ""}} allMachines := collections.FromMachines(machinea1, machinea2, machineb1, machinenil) priorityMachines := collections.FromMachines(machinea1) diff --git a/util/test/builder/builders.go b/util/test/builder/builders.go index 87a9c65769be..e56bcdc2c7b1 100644 --- a/util/test/builder/builders.go +++ b/util/test/builder/builders.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/internal/contract" @@ -606,8 +607,8 @@ func (m *MachineDeploymentClassBuilder) WithReadinessGates(readinessGates []clus } // WithFailureDomain sets the FailureDomain for the MachineDeploymentClassBuilder. -func (m *MachineDeploymentClassBuilder) WithFailureDomain(f *string) *MachineDeploymentClassBuilder { - m.failureDomain = f +func (m *MachineDeploymentClassBuilder) WithFailureDomain(f string) *MachineDeploymentClassBuilder { + m.failureDomain = &f return m } @@ -671,7 +672,7 @@ func (m *MachineDeploymentClassBuilder) Build() *clusterv1.MachineDeploymentClas obj.ReadinessGates = m.readinessGates } if m.failureDomain != nil { - obj.FailureDomain = m.failureDomain + obj.FailureDomain = *m.failureDomain } if m.nodeDrainTimeout != nil { obj.NodeDrainTimeoutSeconds = m.nodeDrainTimeout @@ -1721,7 +1722,7 @@ func (m *MachinePoolBuilder) Build() *clusterv1.MachinePool { Replicas: m.replicas, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: m.version, + Version: ptr.Deref(m.version, ""), ClusterName: m.clusterName, }, }, @@ -1851,7 +1852,7 @@ func (m *MachineDeploymentBuilder) Build() *clusterv1.MachineDeployment { obj.Generation = *m.generation } if m.version != nil { - obj.Spec.Template.Spec.Version = m.version + obj.Spec.Template.Spec.Version = *m.version } obj.Spec.Replicas = m.replicas if m.bootstrapTemplate != nil { @@ -2028,7 +2029,7 @@ func (m *MachineBuilder) Build() *clusterv1.Machine { Labels: m.labels, }, Spec: clusterv1.MachineSpec{ - Version: m.version, + Version: ptr.Deref(m.version, ""), ClusterName: m.clusterName, }, } diff --git a/util/test/builder/zz_generated.deepcopy.go b/util/test/builder/zz_generated.deepcopy.go index 91a42069ea53..20546dca045f 100644 --- a/util/test/builder/zz_generated.deepcopy.go +++ b/util/test/builder/zz_generated.deepcopy.go @@ -160,12 +160,12 @@ func (in *ClusterClassBuilder) DeepCopyInto(out *ClusterClassBuilder) { if in.controlPlaneNamingStrategy != nil { in, out := &in.controlPlaneNamingStrategy, &out.controlPlaneNamingStrategy *out = new(v1beta2.ControlPlaneClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } if in.infraClusterNamingStrategy != nil { in, out := &in.infraClusterNamingStrategy, &out.infraClusterNamingStrategy *out = new(v1beta2.InfrastructureClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } if in.machineDeploymentClasses != nil { in, out := &in.machineDeploymentClasses, &out.machineDeploymentClasses @@ -582,7 +582,7 @@ func (in *MachineDeploymentClassBuilder) DeepCopyInto(out *MachineDeploymentClas if in.namingStrategy != nil { in, out := &in.namingStrategy, &out.namingStrategy *out = new(v1beta2.MachineDeploymentClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } } @@ -778,7 +778,7 @@ func (in *MachinePoolClassBuilder) DeepCopyInto(out *MachinePoolClassBuilder) { if in.namingStrategy != nil { in, out := &in.namingStrategy, &out.namingStrategy *out = new(v1beta2.MachinePoolClassNamingStrategy) - (*in).DeepCopyInto(*out) + **out = **in } }