diff --git a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml index 9ac21142d..ea34c2320 100644 --- a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml +++ b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml @@ -120,6 +120,7 @@ jobs: appframeworksS1, managersecret, managermc, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/arm-AL2023-int-test-workflow.yml b/.github/workflows/arm-AL2023-int-test-workflow.yml index 8a228fb8c..6a49dfb80 100644 --- a/.github/workflows/arm-AL2023-int-test-workflow.yml +++ b/.github/workflows/arm-AL2023-int-test-workflow.yml @@ -68,6 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-al2023 diff --git a/.github/workflows/arm-RHEL-build-test-push-workflow.yml b/.github/workflows/arm-RHEL-build-test-push-workflow.yml index dcc08f5ae..eece39362 100644 --- a/.github/workflows/arm-RHEL-build-test-push-workflow.yml +++ b/.github/workflows/arm-RHEL-build-test-push-workflow.yml @@ -68,6 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-rhel diff --git a/.github/workflows/arm-RHEL-int-test-workflow.yml b/.github/workflows/arm-RHEL-int-test-workflow.yml index dcc08f5ae..eece39362 100644 --- a/.github/workflows/arm-RHEL-int-test-workflow.yml +++ b/.github/workflows/arm-RHEL-int-test-workflow.yml @@ -68,6 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-rhel diff --git a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml index a16f17354..a2d87d17d 100644 --- a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml +++ b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml @@ -120,6 +120,7 @@ jobs: appframeworksS1, managersecret, managermc, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/arm-Ubuntu-int-test-workflow.yml b/.github/workflows/arm-Ubuntu-int-test-workflow.yml index 1815f177f..32738494e 100644 --- a/.github/workflows/arm-Ubuntu-int-test-workflow.yml +++ b/.github/workflows/arm-Ubuntu-int-test-workflow.yml @@ -68,6 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-ubuntu diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml index 5776eb081..3281c19ae 100644 --- a/.github/workflows/build-test-push-workflow.yml +++ b/.github/workflows/build-test-push-workflow.yml @@ -166,6 +166,7 @@ jobs: managerappframeworkm4, managersecret, managermc, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/distroless-build-test-push-workflow.yml b/.github/workflows/distroless-build-test-push-workflow.yml index 789196b02..9e8cf3340 100644 --- a/.github/workflows/distroless-build-test-push-workflow.yml +++ b/.github/workflows/distroless-build-test-push-workflow.yml @@ -167,6 +167,7 @@ jobs: managerappframeworkm4, managersecret, managermc, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/distroless-int-test-workflow.yml b/.github/workflows/distroless-int-test-workflow.yml index e234eb3d7..f4e85ebb1 100644 --- a/.github/workflows/distroless-int-test-workflow.yml +++ b/.github/workflows/distroless-int-test-workflow.yml @@ -64,6 +64,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-distroless diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 16fa24988..4d8b89770 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -100,8 +100,8 @@ jobs: version: ${{ steps.dotenv.outputs.KUBECTL_VERSION }} - name: Install kuttl run: | - sudo curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.12.0/kuttl_0.12.0_linux_x86_64.tar.gz - sudo tar -xvzf kuttl_0.12.0_linux_x86_64.tar.gz + sudo curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.22.0/kuttl_0.22.0_linux_x86_64.tar.gz + sudo tar -xvzf kuttl_0.22.0_linux_x86_64.tar.gz sudo chmod +x kubectl-kuttl sudo mv kubectl-kuttl /usr/local/bin/kubectl-kuttl - name: Install Python diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 6d2b9f6cc..45c4de109 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -61,6 +61,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml index 91e818d80..52f978540 100644 --- a/.github/workflows/manual-int-test-workflow.yml +++ b/.github/workflows/manual-int-test-workflow.yml @@ -23,6 +23,7 @@ jobs: managerscaling, managercrcrud, licensemanager, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/namespace-scope-int-workflow.yml b/.github/workflows/namespace-scope-int-workflow.yml index 646c662c6..7c8725d96 100644 --- a/.github/workflows/namespace-scope-int-workflow.yml +++ b/.github/workflows/namespace-scope-int-workflow.yml @@ -19,6 +19,7 @@ jobs: managerscaling, managercrcrud, licensemanager, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 811e75904..490536f2b 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -59,6 +59,7 @@ jobs: managerscaling, managercrcrud, licensemanager, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image diff --git a/PROJECT b/PROJECT index 62abf2007..983f3418b 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,7 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: splunk.com layout: - go.kubebuilder.io/v4 @@ -109,4 +113,22 @@ resources: kind: LicenseManager path: github.com/splunk/splunk-operator/api/v4 version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: IngestorCluster + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: BusConfiguration + path: github.com/splunk/splunk-operator/api/v4 + version: v4 version: "3" diff --git a/api/v4/busconfiguration_types.go b/api/v4/busconfiguration_types.go new file mode 100644 index 000000000..a4b76a00b --- /dev/null +++ b/api/v4/busconfiguration_types.go @@ -0,0 +1,134 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // BusConfigurationPausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + BusConfigurationPausedAnnotation = "busconfiguration.enterprise.splunk.com/paused" +) + +// BusConfigurationSpec defines the desired state of BusConfiguration +type BusConfigurationSpec struct { + Type string `json:"type"` + + SQS SQSSpec `json:"sqs"` +} + +type SQSSpec struct { + QueueName string `json:"queueName"` + + AuthRegion string `json:"authRegion"` + + Endpoint string `json:"endpoint"` + + LargeMessageStoreEndpoint string `json:"largeMessageStoreEndpoint"` + + LargeMessageStorePath string `json:"largeMessageStorePath"` + + DeadLetterQueueName string `json:"deadLetterQueueName"` +} + +// BusConfigurationStatus defines the observed state of BusConfiguration. +type BusConfigurationStatus struct { + // Phase of the bus configuration + Phase Phase `json:"phase"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // Auxillary message describing CR status + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BusConfiguration is the Schema for a Splunk Enterprise bus configuration +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=busconfigurations,scope=Namespaced,shortName=bus +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus configuration" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus configuration resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// BusConfiguration is the Schema for the busconfigurations API +type BusConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec BusConfigurationSpec `json:"spec"` + Status BusConfigurationStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *BusConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// BusConfigurationList contains a list of BusConfiguration +type BusConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BusConfiguration `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BusConfiguration{}, &BusConfigurationList{}) +} + +// NewEvent creates a new event associated with the object and ready +// to be published to Kubernetes API +func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.Event { + t := metav1.Now() + return corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: reason + "-", + Namespace: bc.ObjectMeta.Namespace, + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "BusConfiguration", + Namespace: bc.Namespace, + Name: bc.Name, + UID: bc.UID, + APIVersion: GroupVersion.String(), + }, + Reason: reason, + Message: message, + Source: corev1.EventSource{ + Component: "splunk-busconfiguration-controller", + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + ReportingController: "enterprise.splunk.com/busconfiguration-controller", + } +} diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 9bb7b31a8..493aeb0f3 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -38,6 +38,9 @@ const ( type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` + // Bus configuration reference + BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef,omitempty"` + // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` } @@ -111,6 +114,9 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` + + // Bus configuration + BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go new file mode 100644 index 000000000..364625e97 --- /dev/null +++ b/api/v4/ingestorcluster_types.go @@ -0,0 +1,148 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // IngestorClusterPausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + IngestorClusterPausedAnnotation = "ingestorcluster.enterprise.splunk.com/paused" +) + +// IngestorClusterSpec defines the spec of Ingestor Cluster +type IngestorClusterSpec struct { + // Common Splunk spec + CommonSplunkSpec `json:",inline"` + + // Number of ingestor pods + Replicas int32 `json:"replicas"` + + // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management + AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Bus configuration reference + BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef"` +} + +// IngestorClusterStatus defines the observed state of Ingestor Cluster +type IngestorClusterStatus struct { + // Phase of the ingestor pods + Phase Phase `json:"phase"` + + // Number of desired ingestor pods + Replicas int32 `json:"replicas"` + + // Number of ready ingestor pods + ReadyReplicas int32 `json:"readyReplicas"` + + // Selector for pods used by HorizontalPodAutoscaler + Selector string `json:"selector"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // App Framework context + AppContext AppDeploymentContext `json:"appContext"` + + // Telemetry App installation flag + TelAppInstalled bool `json:"telAppInstalled"` + + // Auxillary message describing CR status + Message string `json:"message"` + + // Bus configuration + BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// IngestorCluster is the Schema for a Splunk Enterprise ingestor cluster pods +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=ingestorclusters,scope=Namespaced,shortName=ing +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of ingestor cluster pods" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Number of desired ingestor cluster pods" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready ingestor cluster pods" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of ingestor cluster resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// IngestorCluster is the Schema for the ingestorclusters API +type IngestorCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec IngestorClusterSpec `json:"spec"` + Status IngestorClusterStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *IngestorCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// IngestorClusterList contains a list of IngestorCluster +type IngestorClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IngestorCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&IngestorCluster{}, &IngestorClusterList{}) +} + +// NewEvent creates a new event associated with the object and ready +// to be published to Kubernetes API +func (ic *IngestorCluster) NewEvent(eventType, reason, message string) corev1.Event { + t := metav1.Now() + return corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: reason + "-", + Namespace: ic.ObjectMeta.Namespace, + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "IngestorCluster", + Namespace: ic.Namespace, + Name: ic.Name, + UID: ic.UID, + APIVersion: GroupVersion.String(), + }, + Reason: reason, + Message: message, + Source: corev1.EventSource{ + Component: "splunk-ingestorcluster-controller", + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + ReportingController: "enterprise.splunk.com/ingestorcluster-controller", + } +} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 93e988463..fa23c996a 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v4 import ( "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -180,6 +180,95 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfiguration. +func (in *BusConfiguration) DeepCopy() *BusConfiguration { + if in == nil { + return nil + } + out := new(BusConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfigurationList) DeepCopyInto(out *BusConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BusConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationList. +func (in *BusConfigurationList) DeepCopy() *BusConfigurationList { + if in == nil { + return nil + } + out := new(BusConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BusConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfigurationSpec) DeepCopyInto(out *BusConfigurationSpec) { + *out = *in + out.SQS = in.SQS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationSpec. +func (in *BusConfigurationSpec) DeepCopy() *BusConfigurationSpec { + if in == nil { + return nil + } + out := new(BusConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationStatus. +func (in *BusConfigurationStatus) DeepCopy() *BusConfigurationStatus { + if in == nil { + return nil + } + out := new(BusConfigurationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -511,6 +600,7 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + out.BusConfigurationRef = in.BusConfigurationRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -543,6 +633,7 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } + out.BusConfiguration = in.BusConfiguration } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -555,6 +646,99 @@ func (in *IndexerClusterStatus) DeepCopy() *IndexerClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngestorCluster) DeepCopyInto(out *IngestorCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorCluster. +func (in *IngestorCluster) DeepCopy() *IngestorCluster { + if in == nil { + return nil + } + out := new(IngestorCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngestorClusterList) DeepCopyInto(out *IngestorClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngestorCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterList. +func (in *IngestorClusterList) DeepCopy() *IngestorClusterList { + if in == nil { + return nil + } + out := new(IngestorClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngestorClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { + *out = *in + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + out.BusConfigurationRef = in.BusConfigurationRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. +func (in *IngestorClusterSpec) DeepCopy() *IngestorClusterSpec { + if in == nil { + return nil + } + out := new(IngestorClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.AppContext.DeepCopyInto(&out.AppContext) + out.BusConfiguration = in.BusConfiguration +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. +func (in *IngestorClusterStatus) DeepCopy() *IngestorClusterStatus { + if in == nil { + return nil + } + out := new(IngestorClusterStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { *out = *in @@ -793,6 +977,21 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSSpec. +func (in *SQSSpec) DeepCopy() *SQSSpec { + if in == nil { + return nil + } + out := new(SQSSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SearchHeadCluster) DeepCopyInto(out *SearchHeadCluster) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 6a152ce16..1984474fa 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -23,10 +23,11 @@ import ( "os" "time" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + intController "github.com/splunk/splunk-operator/internal/controller" "github.com/splunk/splunk-operator/internal/controller/debug" "github.com/splunk/splunk-operator/pkg/config" - "sigs.k8s.io/controller-runtime/pkg/metrics/filters" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -47,6 +48,7 @@ import ( enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller" //+kubebuilder:scaffold:imports //extapi "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) @@ -107,11 +109,11 @@ func main() { // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName // to provide certificates, ensuring the server communicates using trusted and secure certificates. - TLSOpts: tlsOpts, + TLSOpts: tlsOpts, FilterProvider: filters.WithAuthenticationAndAuthorization, } - // TODO: enable https for /metrics endpoint by default + // TODO: enable https for /metrics endpoint by default // if secureMetrics { // // FilterProvider is used to protect the metrics endpoint with authn/authz. // // These configurations ensure that only authorized users and service accounts @@ -221,6 +223,20 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Standalone") os.Exit(1) } + if err := (&controller.IngestorClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") + os.Exit(1) + } + if err := (&controller.BusConfigurationReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BusConfiguration") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml b/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml new file mode 100644 index 000000000..9f80cdbea --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml @@ -0,0 +1,106 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: busconfigurations.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: BusConfiguration + listKind: BusConfigurationList + plural: busconfigurations + shortNames: + - bus + singular: busconfiguration + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of bus configuration + jsonPath: .status.phase + name: Phase + type: string + - description: Age of bus configuration resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Auxillary message describing CR status + jsonPath: .status.message + name: Message + type: string + name: v4 + schema: + openAPIV3Schema: + description: BusConfiguration is the Schema for the busconfigurations API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BusConfigurationSpec defines the desired state of BusConfiguration + properties: + sqs: + properties: + authRegion: + type: string + deadLetterQueueName: + type: string + endpoint: + type: string + largeMessageStoreEndpoint: + type: string + largeMessageStorePath: + type: string + queueName: + type: string + type: object + type: + type: string + type: object + status: + description: BusConfigurationStatus defines the observed state of BusConfiguration. + properties: + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the bus configuration + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + resourceRevMap: + additionalProperties: + type: string + description: Resource revision tracker + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index a068f17c9..d66e057fb 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,6 +5165,49 @@ spec: x-kubernetes-list-type: atomic type: object type: object + busConfigurationRef: + description: Bus configuration reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -8251,6 +8294,27 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object + busConfiguration: + description: Bus configuration + properties: + sqs: + properties: + authRegion: + type: string + deadLetterQueueName: + type: string + endpoint: + type: string + largeMessageStoreEndpoint: + type: string + largeMessageStorePath: + type: string + queueName: + type: string + type: object + type: + type: string + type: object clusterManagerPhase: description: current phase of the cluster manager enum: diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml new file mode 100644 index 000000000..82f1f868a --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -0,0 +1,4611 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: ingestorclusters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: IngestorCluster + listKind: IngestorClusterList + plural: ingestorclusters + shortNames: + - ing + singular: ingestorcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of ingestor cluster pods + jsonPath: .status.phase + name: Phase + type: string + - description: Number of desired ingestor cluster pods + jsonPath: .status.replicas + name: Desired + type: integer + - description: Current number of ready ingestor cluster pods + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Age of ingestor cluster resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Auxillary message describing CR status + jsonPath: .status.message + name: Message + type: string + name: v4 + schema: + openAPIV3Schema: + description: IngestorCluster is the Schema for the ingestorclusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IngestorClusterSpec defines the spec of Ingestor Cluster + properties: + Mock: + description: Mock to differentiate between UTs and actual reconcile + type: boolean + affinity: + description: Kubernetes Affinity rules that control how pods are assigned + to particular nodes. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + appRepo: + description: Splunk Enterprise app repository that specifies remote + app location and scope for Splunk app management + properties: + appInstallPeriodSeconds: + default: 90 + description: |- + App installation period within a reconcile. Apps will be installed during this period before the next reconcile is attempted. + Note: Do not change this setting unless instructed to do so by Splunk Support + format: int64 + minimum: 30 + type: integer + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + premiumAppsProps: + description: Properties for premium apps, fill in when scope + premiumApps is chosen + properties: + esDefaults: + description: Enterpreise Security App defaults + properties: + sslEnablement: + description: "Sets the sslEnablement value for ES + app installation\n strict: Ensure that SSL + is enabled\n in the web.conf configuration + file to use\n this mode. Otherwise, + the installer exists\n\t \t with an error. + This is the DEFAULT mode used\n by + the operator if left empty.\n auto: Enables + SSL in the etc/system/local/web.conf\n configuration + file.\n ignore: Ignores whether SSL is enabled + or disabled." + type: string + type: object + type: + description: 'Type: enterpriseSecurity for now, can + accomodate itsi etc.. later' + type: string + type: object + scope: + description: 'Scope of the App deployment: cluster, clusterWithPreConfig, + local, premiumApps. Scope determines whether the App(s) + is/are installed locally, cluster-wide or its a premium + app' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: |- + Interval in seconds to check the Remote Storage for App changes. + The default value for this config is 1 hour(3600 sec), + minimum value is 1 minute(60sec) and maximum value is 1 day(86400 sec). + We assign the value based on following conditions - + 1. If no value or 0 is specified then it means periodic polling is disabled. + 2. If anything less than min is specified then we set it to 1 min. + 3. If anything more than the max value is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + premiumAppsProps: + description: Properties for premium apps, fill in when scope + premiumApps is chosen + properties: + esDefaults: + description: Enterpreise Security App defaults + properties: + sslEnablement: + description: "Sets the sslEnablement value for ES + app installation\n strict: Ensure that SSL is + enabled\n in the web.conf configuration + file to use\n this mode. Otherwise, the + installer exists\n\t \t with an error. This + is the DEFAULT mode used\n by the operator + if left empty.\n auto: Enables SSL in the etc/system/local/web.conf\n + \ configuration file.\n ignore: Ignores + whether SSL is enabled or disabled." + type: string + type: object + type: + description: 'Type: enterpriseSecurity for now, can accomodate + itsi etc.. later' + type: string + type: object + scope: + description: 'Scope of the App deployment: cluster, clusterWithPreConfig, + local, premiumApps. Scope determines whether the App(s) + is/are installed locally, cluster-wide or its a premium + app' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + installMaxRetries: + default: 2 + description: Maximum number of retries to install Apps + format: int32 + minimum: 0 + type: integer + maxConcurrentAppDownloads: + description: Maximum number of apps that can be downloaded at + same time + format: int64 + type: integer + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where apps + reside. Used for aws, if provided. Not used for minio + and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3, + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' + type: string + type: object + type: array + type: object + busConfigurationRef: + description: Bus configuration reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + clusterManagerRef: + description: ClusterManagerRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + clusterMasterRef: + description: ClusterMasterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + defaults: + description: Inline map of default.yml overrides used to initialize + the environment + type: string + defaultsUrl: + description: Full path or URL for one or more default.yml files, separated + by commas + type: string + defaultsUrlApps: + description: |- + Full path or URL for one or more defaults.yml files specific + to App install, separated by commas. The defaults listed here + will be installed on the CM, standalone, search head deployer + or license manager instance. + type: string + etcVolumeStorageConfig: + description: Storage configuration for /opt/splunk/etc volume + properties: + ephemeralStorage: + description: |- + If true, ephemeral (emptyDir) storage will be used + default false + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + extraEnv: + description: |- + ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers + WARNING: Setting environment variables used by Splunk or Ansible will affect Splunk installation and operation + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE + environment variables) + type: string + imagePullPolicy: + description: 'Sets pull policy for all images (either “Always” or + the default: “IfNotPresent”)' + enum: + - Always + - IfNotPresent + type: string + imagePullSecrets: + description: |- + Sets imagePullSecrets if image is being pulled from a private registry. + See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + licenseManagerRef: + description: LicenseManagerRef refers to a Splunk Enterprise license + manager managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + licenseMasterRef: + description: LicenseMasterRef refers to a Splunk Enterprise license + manager managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + licenseUrl: + description: Full path or URL for a Splunk Enterprise license file + type: string + livenessInitialDelaySeconds: + description: |- + LivenessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) for the Liveness probe + Note: If needed, Operator overrides with a higher value + format: int32 + minimum: 0 + type: integer + livenessProbe: + description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command + properties: + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + format: int32 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + monitoringConsoleRef: + description: MonitoringConsoleRef refers to a Splunk Enterprise monitoring + console managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + readinessInitialDelaySeconds: + description: |- + ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe + Note: If needed, Operator overrides with a higher value + format: int32 + minimum: 0 + type: integer + readinessProbe: + description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes + properties: + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + format: int32 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + replicas: + description: Number of ingestor pods + format: int32 + type: integer + resources: + description: resource requirements for the pod containers + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: Name of Scheduler to use for pod placement (defaults + to “default-scheduler”) + type: string + serviceAccount: + description: |- + ServiceAccount is the service account used by the pods deployed by the CRD. + If not specified uses the default serviceAccount for the namespace as per + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + type: string + serviceTemplate: + description: ServiceTemplate is a template used to create Kubernetes + services + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + type: object + spec: + description: |- + Spec defines the behavior of a service. + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + status: + description: |- + Most recently observed status of the service. + Populated by the system. + Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + conditions: + description: Current service state + items: + description: Condition contains details for one aspect of + the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + loadBalancer: + description: |- + LoadBalancer contains the current status of the load-balancer, + if one is present. + properties: + ingress: + description: |- + Ingress is a list containing ingress points for the load-balancer. + Traffic intended for the service should be sent to these ingress points. + items: + description: |- + LoadBalancerIngress represents the status of a load-balancer ingress point: + traffic intended for the service should be sent to an ingress point. + properties: + hostname: + description: |- + Hostname is set for load-balancer ingress points that are DNS based + (typically AWS load-balancers) + type: string + ip: + description: |- + IP is set for load-balancer ingress points that are IP based + (typically GCE or OpenStack load-balancers) + type: string + ipMode: + description: |- + IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + Setting this to "VIP" indicates that traffic is delivered to the node with + the destination set to the load-balancer's IP and port. + Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + the destination set to the node's IP and node port or the pod's IP and port. + Service implementations may use this information to adjust traffic routing. + type: string + ports: + description: |- + Ports is a list of records of service ports + If used, every port defined in the service should have an entry in it + items: + properties: + error: + description: |- + Error is to record the problem with the service port + The format of the error shall comply with the following rules: + - built-in error values shall be specified in this file and those shall use + CamelCase names + - cloud provider specific error values must have names that comply with the + format foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + port: + description: Port is the port number of the + service port of which status is recorded + here + format: int32 + type: integer + protocol: + description: |- + Protocol is the protocol of the service port of which status is recorded here + The supported values are: "TCP", "UDP", "SCTP" + type: string + required: + - error + - port + - protocol + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + type: object + startupProbe: + description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes + properties: + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + format: int32 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + tolerations: + description: Pod's tolerations for Kubernetes node's taint + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + varVolumeStorageConfig: + description: Storage configuration for /opt/splunk/var volume + properties: + ephemeralStorage: + description: |- + If true, ephemeral (emptyDir) storage will be used + default false + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + volumes: + description: List of one or more Kubernetes volumes. These will be + mounted in all pod containers as as /mnt/ + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: IngestorClusterStatus defines the observed state of Ingestor + Cluster + properties: + appContext: + description: App Framework context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appInstallPeriodSeconds: + default: 90 + description: |- + App installation period within a reconcile. Apps will be installed during this period before the next reconcile is attempted. + Note: Do not change this setting unless instructed to do so by Splunk Support + format: int64 + minimum: 30 + type: integer + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + premiumAppsProps: + description: Properties for premium apps, fill in when + scope premiumApps is chosen + properties: + esDefaults: + description: Enterpreise Security App defaults + properties: + sslEnablement: + description: "Sets the sslEnablement value for + ES app installation\n strict: Ensure that + SSL is enabled\n in the web.conf + configuration file to use\n this + mode. Otherwise, the installer exists\n\t + \ \t with an error. This is the DEFAULT + mode used\n by the operator if + left empty.\n auto: Enables SSL in the + etc/system/local/web.conf\n configuration + file.\n ignore: Ignores whether SSL is + enabled or disabled." + type: string + type: object + type: + description: 'Type: enterpriseSecurity for now, + can accomodate itsi etc.. later' + type: string + type: object + scope: + description: 'Scope of the App deployment: cluster, + clusterWithPreConfig, local, premiumApps. Scope determines + whether the App(s) is/are installed locally, cluster-wide + or its a premium app' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: |- + Interval in seconds to check the Remote Storage for App changes. + The default value for this config is 1 hour(3600 sec), + minimum value is 1 minute(60sec) and maximum value is 1 day(86400 sec). + We assign the value based on following conditions - + 1. If no value or 0 is specified then it means periodic polling is disabled. + 2. If anything less than min is specified then we set it to 1 min. + 3. If anything more than the max value is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + premiumAppsProps: + description: Properties for premium apps, fill in when + scope premiumApps is chosen + properties: + esDefaults: + description: Enterpreise Security App defaults + properties: + sslEnablement: + description: "Sets the sslEnablement value for + ES app installation\n strict: Ensure that + SSL is enabled\n in the web.conf + configuration file to use\n this + mode. Otherwise, the installer exists\n\t \t + \ with an error. This is the DEFAULT mode used\n + \ by the operator if left empty.\n + \ auto: Enables SSL in the etc/system/local/web.conf\n + \ configuration file.\n ignore: Ignores + whether SSL is enabled or disabled." + type: string + type: object + type: + description: 'Type: enterpriseSecurity for now, can + accomodate itsi etc.. later' + type: string + type: object + scope: + description: 'Scope of the App deployment: cluster, clusterWithPreConfig, + local, premiumApps. Scope determines whether the App(s) + is/are installed locally, cluster-wide or its a premium + app' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + installMaxRetries: + default: 2 + description: Maximum number of retries to install Apps + format: int32 + minimum: 0 + type: integer + maxConcurrentAppDownloads: + description: Maximum number of apps that can be downloaded + at same time + format: int64 + type: integer + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where + apps reside. Used for aws, if provided. Not used for + minio and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + description: |- + AppName is the name of app archive retrieved from the + remote bucket e.g app1.tgz or app2.spl + type: string + appPackageTopFolder: + description: |- + AppPackageTopFolder is the name of top folder when we untar the + app archive, which is also assumed to be same as the name of the + app after it is installed. + type: string + auxPhaseInfo: + description: |- + Used to track the copy and install status for each replica member. + Each Pod's phase info is mapped to its ordinal value. + Ignored, once the DeployStatus is marked as Complete + items: + description: PhaseInfo defines the status to track + the App framework installation phase + properties: + failCount: + description: represents number of failures + format: int32 + type: integer + phase: + description: Phase type + type: string + status: + description: Status of the phase + format: int32 + type: integer + type: object + type: array + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + isUpdate: + type: boolean + lastModifiedTime: + type: string + objectHash: + type: string + phaseInfo: + description: App phase info to track download, copy + and install + properties: + failCount: + description: represents number of failures + format: int32 + type: integer + phase: + description: Phase type + type: string + status: + description: Status of the phase + format: int32 + type: integer + type: object + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: |- + Interval in seconds to check the Remote Storage for App changes + This is introduced here so that we dont do spec validation in every reconcile just + because the spec and status are different. + format: int64 + type: integer + appsStatusMaxConcurrentAppDownloads: + description: Represents the Status field for maximum number of + apps that can be downloaded at same time + format: int64 + type: integer + bundlePushStatus: + description: Internal to the App framework. Used in case of CM(IDXC) + and deployer(SHC) + properties: + bundlePushStage: + description: Represents the current stage. Internal to the + App framework + type: integer + retryCount: + description: defines the number of retries completed so far + format: int32 + type: integer + type: object + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object + busConfiguration: + description: Bus configuration + properties: + sqs: + properties: + authRegion: + type: string + deadLetterQueueName: + type: string + endpoint: + type: string + largeMessageStoreEndpoint: + type: string + largeMessageStorePath: + type: string + queueName: + type: string + type: object + type: + type: string + type: object + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the ingestor pods + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: Number of ready ingestor pods + format: int32 + type: integer + replicas: + description: Number of desired ingestor pods + format: int32 + type: integer + resourceRevMap: + additionalProperties: + type: string + description: Resource revision tracker + type: object + selector: + description: Selector for pods used by HorizontalPodAutoscaler + type: string + telAppInstalled: + description: Telemetry App installation flag + type: boolean + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index dd0d870ec..679c1dc72 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -10,6 +10,8 @@ resources: - bases/enterprise.splunk.com_monitoringconsoles.yaml - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml +- bases/enterprise.splunk.com_ingestorclusters.yaml +- bases/enterprise.splunk.com_busconfigurations.yaml #+kubebuilder:scaffold:crdkustomizeresource @@ -25,6 +27,7 @@ patchesStrategicMerge: #- patches/webhook_in_monitoringconsoles.yaml #- patches/webhook_in_searchheadclusters.yaml #- patches/webhook_in_standalones.yaml +#- patches/webhook_in_ingestorclusters.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -37,6 +40,7 @@ patchesStrategicMerge: #- patches/cainjection_in_monitoringconsoles.yaml #- patches/cainjection_in_searchheadclusters.yaml #- patches/cainjection_in_standalones.yaml +#- patches/cainjection_in_ingestorclusters.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. @@ -74,3 +78,9 @@ patchesJson6902: version: v1 group: apiextensions.k8s.io name: standalones.enterprise.splunk.com + - path: patches/additional_supported_versions_patch_ingestorclusters.yaml + target: + kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + name: ingestorclusters.enterprise.splunk.com diff --git a/config/crd/patches/additional_supported_versions_patch_ingestorclusters.yaml b/config/crd/patches/additional_supported_versions_patch_ingestorclusters.yaml new file mode 100644 index 000000000..d32c85a4b --- /dev/null +++ b/config/crd/patches/additional_supported_versions_patch_ingestorclusters.yaml @@ -0,0 +1,26 @@ +- op: add + path: "/spec/versions/-" + value: + name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + apiVersion: + type: string +- op: add + path: "/spec/versions/-" + value: + name: v2 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + apiVersion: + type: string diff --git a/config/crd/patches/cainjection_in_ingestorclusters.yaml b/config/crd/patches/cainjection_in_ingestorclusters.yaml new file mode 100644 index 000000000..77bda7398 --- /dev/null +++ b/config/crd/patches/cainjection_in_ingestorclusters.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ingestorclusters.enterprise.splunk.com diff --git a/config/crd/patches/webhook_in_ingestorclusters.yaml b/config/crd/patches/webhook_in_ingestorclusters.yaml new file mode 100644 index 000000000..3c50a081d --- /dev/null +++ b/config/crd/patches/webhook_in_ingestorclusters.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ingestorclusters.enterprise.splunk.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/busconfiguration_editor_role.yaml b/config/rbac/busconfiguration_editor_role.yaml new file mode 100644 index 000000000..fde8687f7 --- /dev/null +++ b/config/rbac/busconfiguration_editor_role.yaml @@ -0,0 +1,30 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: busconfiguration-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get diff --git a/config/rbac/busconfiguration_viewer_role.yaml b/config/rbac/busconfiguration_viewer_role.yaml new file mode 100644 index 000000000..6230863a9 --- /dev/null +++ b/config/rbac/busconfiguration_viewer_role.yaml @@ -0,0 +1,26 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: busconfiguration-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get diff --git a/config/rbac/ingestorcluster_editor_role.yaml b/config/rbac/ingestorcluster_editor_role.yaml new file mode 100644 index 000000000..7faa1e8bb --- /dev/null +++ b/config/rbac/ingestorcluster_editor_role.yaml @@ -0,0 +1,30 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ingestorcluster-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get diff --git a/config/rbac/ingestorcluster_viewer_role.yaml b/config/rbac/ingestorcluster_viewer_role.yaml new file mode 100644 index 000000000..e02ffe8f4 --- /dev/null +++ b/config/rbac/ingestorcluster_viewer_role.yaml @@ -0,0 +1,26 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ingestorcluster-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2f9c5122c..78231b303 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,9 +47,11 @@ rules: - apiGroups: - enterprise.splunk.com resources: + - busconfigurations - clustermanagers - clustermasters - indexerclusters + - ingestorclusters - licensemanagers - licensemasters - monitoringconsoles @@ -66,9 +68,11 @@ rules: - apiGroups: - enterprise.splunk.com resources: + - busconfigurations/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers + - ingestorclusters/finalizers - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers @@ -79,9 +83,11 @@ rules: - apiGroups: - enterprise.splunk.com resources: + - busconfigurations/status - clustermanagers/status - clustermasters/status - indexerclusters/status + - ingestorclusters/status - licensemanagers/status - licensemasters/status - monitoringconsoles/status diff --git a/config/samples/enterprise_v4_busconfiguration.yaml b/config/samples/enterprise_v4_busconfiguration.yaml new file mode 100644 index 000000000..0cc1aed31 --- /dev/null +++ b/config/samples/enterprise_v4_busconfiguration.yaml @@ -0,0 +1,8 @@ +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: busconfiguration-sample + finalizers: + - "enterprise.splunk.com/delete-pvc" +spec: {} +# TODO(user): Add fields here diff --git a/config/samples/enterprise_v4_ingestorcluster.yaml b/config/samples/enterprise_v4_ingestorcluster.yaml new file mode 100644 index 000000000..2d022fd99 --- /dev/null +++ b/config/samples/enterprise_v4_ingestorcluster.yaml @@ -0,0 +1,8 @@ +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestorcluster-sample + finalizers: + - "enterprise.splunk.com/delete-pvc" +spec: {} +# TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 73c6d3649..88c71025d 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -13,4 +13,6 @@ resources: - enterprise_v4_searchheadcluster.yaml - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml +- enterprise_v4_ingestorcluster.yaml +- enterprise_v4_busconfiguration.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/AppFramework.md b/docs/AppFramework.md index 6c6da40f9..d9b66f8b4 100644 --- a/docs/AppFramework.md +++ b/docs/AppFramework.md @@ -21,6 +21,7 @@ - [App Framework Fields](#description-of-app-framework-specification-fields) - [App Framework Examples](#examples-of-app-framework-usage) - [Standalone](#how-to-use-the-app-framework-on-a-standalone-cr) + - [Ingestor Cluster](#how-to-use-the-app-framework-on-ingestor-cluster) - [Cluster Manager](#how-to-use-the-app-framework-on-indexer-cluster) - [Search Head Cluster](#how-to-use-the-app-framework-on-search-head-cluster) - [Multiple Scopes](#how-to-install-apps-for-both-local-and-cluster-scopes) @@ -812,11 +813,11 @@ Copy your Splunk App or Add-on archive files to the unique folders on the remote ## Description of App Framework Specification fields -The App Framework configuration is supported on the following Custom Resources: Standalone, ClusterManager, SearchHeadCluster, MonitoringConsole and LicenseManager. Configuring the App framework requires: +The App Framework configuration is supported on the following Custom Resources: Standalone, IngestorCluster, ClusterManager, SearchHeadCluster, MonitoringConsole and LicenseManager. Configuring the App framework requires: * Remote Source of Apps: Define the remote storage location, including unique folders, and the path to each folder. * Destination of Apps: Define which Custom Resources need to be configured. -* Scope of Apps: Define if the apps need to be installed and run locally (such as Standalone, Monitoring Console and License Manager,) or cluster-wide (such as Indexer Cluster, and Search Head Cluster.) +* Scope of Apps: Define if the apps need to be installed and run locally (such as Standalone, Monitoring Console, License Manager and Ingestor Cluster) or cluster-wide (such as Indexer Cluster, and Search Head Cluster.) Here is a typical App framework configuration in a Custom Resource definition: @@ -931,6 +932,7 @@ NOTE: If an app source name needs to be changed, make sure the name change is pe | Standalone | local | Yes | $SPLUNK_HOME/etc/apps | N/A | | LicenseManager | local | Yes | $SPLUNK_HOME/etc/apps | N/A | | MonitoringConsole | local | Yes | $SPLUNK_HOME/etc/apps | N/A | + | IngestorCluster | local | Yes | $SPLUNK_HOME/etc/apps | N/A | | IndexerCluster | N/A | No | N/A | $SPLUNK_HOME/etc/peer-apps | * `volume` refers to the remote storage volume name configured under the `volumes` stanza (see previous section.) @@ -1008,6 +1010,69 @@ volumes: Apply the Custom Resource specification: `kubectl apply -f Standalone.yaml` +### How to use the App Framework on Ingestor Cluster + +In this example, you'll deploy Ingestor Cluster with a remote storage volume, the location of the app archive, and set the installation location for the Splunk Enterprise Pod instance by using `scope`. + +Example using s3: IngestorCluster.yaml + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ic + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + replicas: 1 + appRepo: + appsRepoPollIntervalSeconds: 600 + defaults: + volumeName: volume_app_repo + scope: local + appSources: + - name: networkApps + location: networkAppsLoc/ + - name: authApps + location: authAppsLoc/ + volumes: + - name: volume_app_repo + storageType: s3 + provider: aws + path: bucket-app-framework/IngestorCluster-us/ + endpoint: https://s3-us-west-2.amazonaws.com + region: us-west-2 + secretRef: s3-secret +``` + +Volume variants for other providers (replace only the volumes stanza): + +Azure Blob volumes snippet: + +```yaml +volumes: + - name: volume_app_repo + storageType: blob + provider: azure + path: bucket-app-framework/IngestorCluster-us/ + endpoint: https://mystorageaccount.blob.core.windows.net + secretRef: azureblob-secret +``` + +GCP GCS volumes snippet: + +```yaml +volumes: + - name: volume_app_repo + storageType: gcs + provider: gcp + path: bucket-app-framework/IngestorCluster-us/ + endpoint: https://storage.googleapis.com + secretRef: gcs-secret +``` + +Apply the Custom Resource specification: `kubectl apply -f IngestorCluster.yaml` + ### How to use the App Framework on Indexer Cluster This example describes the installation of apps on an Indexer Cluster and Cluster Manager. This is achieved by deploying a ClusterManager CR with a remote storage volume, setting the location of the app archives, and the installation scope to support both local and cluster app path distribution. diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 6c0d2e87a..a5037627d 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -13,6 +13,7 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) + - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) - [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters) - [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos) - [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example) @@ -134,7 +135,7 @@ spec: The following additional configuration parameters may be used for all Splunk Enterprise resources, including: `Standalone`, `LicenseManager`, -`SearchHeadCluster`, `ClusterManager` and `IndexerCluster`: +`SearchHeadCluster`, `ClusterManager`, `IndexerCluster` and `IngestorCluster`: | Key | Type | Description | | ------------------ | ------- | ----------------------------------------------------------------------------- | @@ -321,6 +322,27 @@ the `IndexerCluster` resource provides the following `Spec` configuration parame | ---------- | ------- | ----------------------------------------------------- | | replicas | integer | The number of indexer cluster members (minimum of 3, which is the default) | +## IngestorCluster Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ic +spec: + replicas: 3 + busConfigurationRef: + name: bus-config +``` +Note: `busConfigurationRef` is required field in case of IngestorCluster resource since it will be used to connect the IngestorCluster to BusConfiguration resource. + +In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) +and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), +the `IngestorCluster` resource provides the following `Spec` configuration parameters: + +| Key | Type | Description | +| ---------- | ------- | ----------------------------------------------------- | +| replicas | integer | The number of ingestor peers (minimum of 3 which is the default) | ## MonitoringConsole Resource Spec Parameters @@ -436,6 +458,7 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" | | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | +| ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" | | licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" | | monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" | | searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" | @@ -505,6 +528,7 @@ Below is a table listing `app.kubernetes.io/name` values mapped to CRDs | clustermanager.enterprise.splunk.com | cluster-manager | | clustermaster.enterprise.splunk.com | cluster-master | | indexercluster.enterprise.splunk.com | indexer-cluster | +| ingestorcluster.enterprise.splunk.com | ingestor-cluster | | licensemanager.enterprise.splunk.com | license-manager | | licensemaster.enterprise.splunk.com | license-master | | monitoringconsole.enterprise.splunk.com | monitoring-console | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md new file mode 100644 index 000000000..dd53922ff --- /dev/null +++ b/docs/IndexIngestionSeparation.md @@ -0,0 +1,1028 @@ +# Background + +Separation between ingestion and indexing services within Splunk Operator for Kubernetes enables the operator to independently manage the ingestion service while maintaining seamless integration with the indexing service. + +This separation enables: +- Independent scaling: Match resource allocation to ingestion or indexing workload. +- Data durability: Off‑load buffer management and retry logic to a durable message bus. +- Operational clarity: Separate monitoring dashboards for ingestion throughput vs indexing latency. + +# Important Note + +> [!WARNING] +> **As of now, only brand new deployments are supported for Index and Ingestion Separation. No migration path is implemented, described or tested for existing deployments to move from a standard model to Index & Ingestion separation model.** + +# Document Variables + +- SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version + +# BusConfiguration + +BusConfiguration is introduced to store message bus configuration to be shared among IngestorCluster and IndexerCluster. + +## Spec + +BusConfiguration inputs can be found in the table below. As of now, only SQS type of message bus is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| type | string | Type of message bus (Only sqs_smartbus as of now) | +| sqs | SQS | SQS message bus inputs | + +SQS message bus inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| queueName | string | Name of the SQS queue | +| authRegion | string | Region where the SQS queue is located | +| endpoint | string | AWS SQS endpoint +| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint | +| largeMessageStorePath | string | S3 path for Large Message Store | +| deadLetterQueueName | string | Name of the SQS dead letter queue | + +Change of any of the bus inputs does not restart Splunk. It just updates the config values with no disruptions. + +## Example +``` +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: bus-config +spec: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +``` + +# IngestorCluster + +IngestorCluster is introduced for high‑throughput data ingestion into a durable message bus. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message bus. + +## Spec + +In addition to common spec inputs, the IngestorCluster resource provides the following Spec configuration parameters. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| replicas | integer | The number of replicas (defaults to 3) | +| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | + +## Example + +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Push Bus reference allows the user to specify queue and bucket settings for the ingestion process. + +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. + +``` +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + serviceAccount: ingestor-sa + replicas: 3 + image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + busConfigurationRef: + name: bus-config +``` + +# IndexerCluster + +IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the bus (inputs.conf) and index them. + +## Spec + +In addition to common spec inputs, the IndexerCluster resource provides the following Spec configuration parameters. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| replicas | integer | The number of replicas (defaults to 3) | +| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | + +## Example + +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Pull Bus reference allows the user to specify queue and bucket settings for the indexing process. + +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. + +``` +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + serviceAccount: ingestor-sa + image: splunk/splunk:${SPLUNK_IMAGE_VERSION} +--- +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + clusterManagerRef: + name: cm + serviceAccount: ingestor-sa + replicas: 3 + image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + busConfigurationRef: + name: bus-config +``` + +# Common Spec + +Common spec values for all SOK Custom Resources can be found in [CustomResources doc](CustomResources.md). + +# Helm Charts + +An IngestorCluster template has been added to the splunk/splunk-enterprise Helm chart. The IndexerCluster template has also been enhanced to support new inputs. + +## Example + +Below examples describe how to define values for BusConfiguration, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. + +``` +busConfiguration:: + enabled: true + name: bus-config + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +``` + +``` +ingestorCluster: + enabled: true + name: ingestor + replicaCount: 3 + serviceAccount: ingestor-sa + busConfigurationRef: + name: bus-config +``` + +``` +clusterManager: + enabled: true + name: cm + replicaCount: 1 + serviceAccount: ingestor-sa + +indexerCluster: + enabled: true + name: indexer + replicaCount: 3 + serviceAccount: ingestor-sa + clusterManagerRef: + name: cm + busConfigurationRef: + name: bus-config +``` + +# Service Account + +To be able to configure ingestion and indexing resources correctly in a secure manner, it is required to provide these resources with the service account that is configured with a minimum set of permissions to complete required operations. With this provided, the right credentials are used by Splunk to peform its tasks. + +## Example + +The example presented below configures the ingestor-sa service account by using esctl utility. It sets up the service account for cluster-name cluster in region us-west-2 with AmazonS3FullAccess and AmazonSQSFullAccess access policies. + +``` +eksctl create iamserviceaccount \ + --name ingestor-sa \ + --cluster ind-ing-sep-demo \ + --region us-west-2 \ + --attach-policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess \ + --attach-policy-arn arn:aws:iam::aws:policy/AmazonSQSFullAccess \ + --approve \ + --override-existing-serviceaccounts +``` + +``` +$ kubectl describe sa ingestor-sa +Name: ingestor-sa +Namespace: default +Labels: app.kubernetes.io/managed-by=eksctl +Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123 +Image pull secrets: +Mountable secrets: +Tokens: +Events: +``` + +``` +$ aws iam get-role --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123 +{ + "Role": { + "Path": "/", + "RoleName": "eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123", + "RoleId": "123456789012345678901", + "Arn": "arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123", + "CreateDate": "2025-08-07T12:03:31+00:00", + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:aud": "sts.amazonaws.com", + "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestor-sa" + } + } + } + ] + }, + "Description": "", + "MaxSessionDuration": 3600, + "Tags": [ + { + "Key": "alpha.eksctl.io/cluster-name", + "Value": "ind-ing-sep-demo" + }, + { + "Key": "alpha.eksctl.io/iamserviceaccount-name", + "Value": "default/ingestor-sa" + }, + { + "Key": "alpha.eksctl.io/eksctl-version", + "Value": "0.211.0" + }, + { + "Key": "eksctl.cluster.k8s.io/v1alpha1/cluster-name", + "Value": "ind-ing-sep-demo" + } + ], + "RoleLastUsed": { + "LastUsedDate": "2025-08-18T08:47:27+00:00", + "Region": "us-west-2" + } + } +} +``` + +``` +$ aws iam list-attached-role-policies --role-name eksctl-cluster-name-addon-iamserviceac-Role1-123456789123 +{ + "AttachedPolicies": [ + { + "PolicyName": "AmazonSQSFullAccess", + "PolicyArn": "arn:aws:iam::aws:policy/AmazonSQSFullAccess" + }, + { + "PolicyName": "AmazonS3FullAccess", + "PolicyArn": "arn:aws:iam::aws:policy/AmazonS3FullAccess" + } + ] +} +``` + +## Documentation References + +- [IAM Roles for Service Accounts on eksctl Docs](https://eksctl.io/usage/iamserviceaccounts/) + +# Horizontal Pod Autoscaler + +To automatically adjust the number of replicas to serve the ingestion traffic effectively, it is recommended to use Horizontal Pod Autoscaler which scales the workload based on the actual demand. It enables the user to provide the metrics which are used to make decisions on removing unwanted replicas if there is not too much traffic or setting up the new ones if the traffic is too big to be handled by currently running resources. + +## Example + +The exmaple presented below configures HorizontalPodAutoscaler named ingestor-hpa that resides in a default namespace (same namespace as resources it is managing) to scale IngestorCluster custom resource named ingestor. With average utilization set to 50, the HorizontalPodAutoscaler resource will try to keep the average utilization of the pods in the scaling target at 50%. It will be able to scale the replicas starting from the minimum number of 3 with the maximum number of 10 replicas. + +``` +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: ingestor-hpa +spec: + scaleTargetRef: + apiVersion: enterprise.splunk.com/v4 + kind: IngestorCluster + name: ingestor + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 +``` + +## Documentation References + +- [Horizontal Pod Autoscaling on Kubernetes Docs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) + +# Grafana + +In order to monitor the resources, Grafana could be installed and configured on the cluster to present the setup on a dashabord in a series of useful diagrams and metrics. + +## Example + +In the following example, the dashboard presents ingestion and indexing data in the form of useful diagrams and metrics such as number of replicas or resource consumption. + +``` +{ + "id": null, + "uid": "splunk-autoscale", + "title": "Splunk Ingestion & Indexer Autoscaling with I/O & PV", + "schemaVersion": 27, + "version": 12, + "refresh": "5s", + "time": { "from": "now-30m", "to": "now" }, + "timezone": "browser", + "style": "dark", + "tags": ["splunk","autoscale","ingestion","indexer","io","pv"], + "graphTooltip": 1, + "panels": [ + { "id": 1, "type": "stat", "title": "Ingestion Replicas", "gridPos": {"x":0,"y":0,"w":4,"h":4}, "targets":[{"expr":"kube_statefulset_replicas{namespace=\"default\",statefulset=\"splunk-ingestor-ingestor\"}"}], "options": {"reduceOptions":{"calcs":["last"]},"orientation":"horizontal","colorMode":"value","graphMode":"none","textMode":"value","thresholds":{"mode":"absolute","steps":[{"value":null,"color":"#73BF69"},{"value":5,"color":"#EAB839"},{"value":8,"color":"#BF1B00"}]}}}, + { "id": 2, "type": "stat", "title": "Indexer Replicas", "gridPos": {"x":4,"y":0,"w":4,"h":4}, "targets":[{"expr":"kube_statefulset_replicas{namespace=\"default\",statefulset=\"splunk-indexer-indexer\"}"}], "options": {"reduceOptions":{"calcs":["last"]},"orientation":"horizontal","colorMode":"value","graphMode":"none","textMode":"value","thresholds":{"mode":"absolute","steps":[{"value":null,"color":"#73BF69"},{"value":5,"color":"#EAB839"},{"value":8,"color":"#BF1B00"}]}}}, + { "id": 3, "type": "timeseries","title": "Ingestion CPU (cores)","gridPos": {"x":8,"y":0,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_cpu_usage_seconds_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m]))","legendFormat":"CPU (cores)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#FFA600"}}}, + { "id": 4, "type": "timeseries","title": "Ingestion Memory (MiB)","gridPos": {"x":16,"y":0,"w":8,"h":4},"targets":[{"expr":"sum(container_memory_usage_bytes{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}) / 1024 / 1024","legendFormat":"Memory (MiB)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#00AF91"}}}, + { "id": 5, "type": "timeseries","title": "Ingestion Network In (KB/s)","gridPos": {"x":0,"y":8,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_receive_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Net In (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#59A14F"}}}, + { "id": 6, "type": "timeseries","title": "Ingestion Network Out (KB/s)","gridPos": {"x":8,"y":8,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_transmit_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Net Out (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#E15759"}}}, + { "id": 7, "type": "timeseries","title": "Indexer CPU (cores)","gridPos": {"x":16,"y":4,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_cpu_usage_seconds_total{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}[1m]))","legendFormat":"CPU (cores)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#7D4E57"}}}, + { "id":8, "type": "timeseries","title": "Indexer Memory (MiB)","gridPos": {"x":0,"y":12,"w":8,"h":4},"targets":[{"expr":"sum(container_memory_usage_bytes{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}) / 1024 / 1024","legendFormat":"Memory (MiB)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#4E79A7"}}}, + { "id":9, "type": "timeseries","title": "Indexer Network In (KB/s)","gridPos": {"x":8,"y":12,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_receive_bytes_total{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}[1m])) / 1024","legendFormat":"Net In (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#9467BD"}}}, + { "id":10, "type": "timeseries","title": "Indexer Network Out (KB/s)","gridPos": {"x":16,"y":12,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_network_transmit_bytes_total{namespace=\"default\",pod=~\"splunk-indexer-indexer-.*\"}[1m])) / 1024","legendFormat":"Net Out (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#8C564B"}}}, + { "id":11, "type": "timeseries","title": "Ingestion Disk Read (KB/s)","gridPos": {"x":0,"y":16,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_fs_reads_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Disk Read (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#1F77B4"}}}, + { "id":12, "type": "timeseries","title": "Ingestion Disk Write (KB/s)","gridPos": {"x":8,"y":16,"w":8,"h":4},"targets":[{"expr":"sum(rate(container_fs_writes_bytes_total{namespace=\"default\",pod=~\"splunk-ingestor-ingestor-.*\"}[1m])) / 1024","legendFormat":"Disk Write (KB/s)"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"},"color":{"mode":"fixed","fixedColor":"#FF7F0E"}}}, + { "id":13, "type": "timeseries","title": "Indexer PV Usage (GiB)","gridPos": {"x":0,"y":20,"w":8,"h":4},"targets":[{"expr":"kubelet_volume_stats_used_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-indexer-.*\"} / 1024 / 1024 / 1024","legendFormat":"Used GiB"},{"expr":"kubelet_volume_stats_capacity_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-indexer-.*\"} / 1024 / 1024 / 1024","legendFormat":"Capacity GiB"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"}}}, + { "id":14, "type": "timeseries","title": "Ingestion PV Usage (GiB)","gridPos": {"x":8,"y":20,"w":8,"h":4},"targets":[{"expr":"kubelet_volume_stats_used_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-ingestor-.*\"} / 1024 / 1024 / 1024","legendFormat":"Used GiB"},{"expr":"kubelet_volume_stats_capacity_bytes{namespace=\"default\",persistentvolumeclaim=~\".*-ingestor-.*\"} / 1024 / 1024 / 1024","legendFormat":"Capacity GiB"}],"options":{"legend":{"displayMode":"list","placement":"bottom"},"yAxis":{"mode":"auto"}}} + ] +} +``` + +## Documentation References + +- [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) + +# Example + +1. Install CRDs and Splunk Operator for Kubernetes. + +- SOK_IMAGE_VERSION: version of the image for Splunk Operator for Kubernetes + +``` +$ make install +``` + +``` +$ kubectl apply -f ${SOK_IMAGE_VERSION}/splunk-operator-cluster.yaml --server-side +``` + +``` +$ kubectl get po -n splunk-operator +NAME READY STATUS RESTARTS AGE +splunk-operator-controller-manager-785b89d45c-dwfkd 2/2 Running 0 4d3h +``` + +2. Create a service account. + +``` +$ eksctl create iamserviceaccount \ + --name ingestor-sa \ + --cluster ind-ing-sep-demo \ + --region us-west-2 \ + --attach-policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess \ + --attach-policy-arn arn:aws:iam::aws:policy/AmazonSQSFullAccess \ + --approve \ + --override-existing-serviceaccounts +``` + +``` +$ kubectl describe sa ingestor-sa +Name: ingestor-sa +Namespace: default +Labels: app.kubernetes.io/managed-by=eksctl +Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123 +Image pull secrets: +Mountable secrets: +Tokens: +Events: +``` + +``` +$ aws iam get-role --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123 +{ + "Role": { + "Path": "/", + "RoleName": "eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123", + "RoleId": "123456789012345678901", + "Arn": "arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123", + "CreateDate": "2025-08-07T12:03:31+00:00", + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::111111111111:oidc-provider/oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:aud": "sts.amazonaws.com", + "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestor-sa" + } + } + } + ] + }, + "Description": "", + "MaxSessionDuration": 3600, + "Tags": [ + { + "Key": "alpha.eksctl.io/cluster-name", + "Value": "ind-ing-sep-demo" + }, + { + "Key": "alpha.eksctl.io/iamserviceaccount-name", + "Value": "default/ingestor-sa" + }, + { + "Key": "alpha.eksctl.io/eksctl-version", + "Value": "0.211.0" + }, + { + "Key": "eksctl.cluster.k8s.io/v1alpha1/cluster-name", + "Value": "ind-ing-sep-demo" + } + ], + "RoleLastUsed": { + "LastUsedDate": "2025-08-18T08:47:27+00:00", + "Region": "us-west-2" + } + } +} +``` + +``` +$ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123 +{ + "AttachedPolicies": [ + { + "PolicyName": "AmazonSQSFullAccess", + "PolicyArn": "arn:aws:iam::aws:policy/AmazonSQSFullAccess" + }, + { + "PolicyName": "AmazonS3FullAccess", + "PolicyArn": "arn:aws:iam::aws:policy/AmazonS3FullAccess" + } + ] +} +``` + +3. Install BusConfiguration resource. + +``` +$ cat bus.yaml +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: bus + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +``` + +``` +$ kubectl apply -f bus.yaml +``` + +``` +$ kubectl get busconfiguration +NAME PHASE AGE MESSAGE +bus Ready 20s +``` + +``` +kubectl describe busconfiguration +Name: bus +Namespace: default +Labels: +Annotations: +API Version: enterprise.splunk.com/v4 +Kind: BusConfiguration +Metadata: + Creation Timestamp: 2025-10-27T10:25:53Z + Finalizers: + enterprise.splunk.com/delete-pvc + Generation: 1 + Resource Version: 12345678 + UID: 12345678-1234-5678-1234-012345678911 +Spec: + Sqs: + Auth Region: us-west-2 + Dead Letter Queue Name: sqs-dlq-test + Endpoint: https://sqs.us-west-2.amazonaws.com + Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com + Large Message Store Path: s3://ingestion/smartbus-test + Queue Name: sqs-test + Type: sqs_smartbus +Status: + Message: + Phase: Ready + Resource Rev Map: +Events: +``` + +4. Install IngestorCluster resource. + +``` +$ cat ingestor.yaml +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + serviceAccount: ingestor-sa + replicas: 3 + image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + busConfigurationRef: + name: bus-config +``` + +``` +$ kubectl apply -f ingestor.yaml +``` + +``` +$ kubectl get po +NAME READY STATUS RESTARTS AGE +splunk-ingestor-ingestor-0 1/1 Running 0 2m12s +splunk-ingestor-ingestor-1 1/1 Running 0 2m12s +splunk-ingestor-ingestor-2 1/1 Running 0 2m12s +``` + +``` +$ kubectl describe ingestorcluster ingestor +Name: ingestor +Namespace: default +Labels: +Annotations: +API Version: enterprise.splunk.com/v4 +Kind: IngestorCluster +Metadata: + Creation Timestamp: 2025-08-18T09:49:45Z + Generation: 1 + Resource Version: 12345678 + UID: 12345678-1234-1234-1234-1234567890123 +Spec: + Bus Configuration Ref: + Name: bus-config + Namespace: default + Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + Replicas: 3 + Service Account: ingestor-sa +Status: + App Context: + App Repo: + App Install Period Seconds: 90 + Defaults: + Premium Apps Props: + Es Defaults: + Install Max Retries: 2 + Bundle Push Status: + Is Deployment In Progress: false + Last App Info Check Time: 0 + Version: 0 + Bus Configuration: + Sqs: + Auth Region: us-west-2 + Dead Letter Queue Name: sqs-dlq-test + Endpoint: https://sqs.us-west-2.amazonaws.com + Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com + Large Message Store Path: s3://ingestion/smartbus-test + Queue Name: sqs-test + Type: sqs_smartbus + Message: + Phase: Ready + Ready Replicas: 3 + Replicas: 3 + Resource Rev Map: + Selector: app.kubernetes.io/instance=splunk-ingestor-ingestor + Tel App Installed: true +Events: +``` + +``` +$ kubectl exec -it splunk-ingestor-ingestor-0 -- sh +$ kubectl exec -it splunk-ingestor-ingestor-1 -- sh +$ kubectl exec -it splunk-ingestor-ingestor-2 -- sh +sh-4.4$ env | grep AWS +AWS_DEFAULT_REGION=us-west-2 +AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token +AWS_REGION=us-west-2 +AWS_ROLE_ARN=arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123 +AWS_STS_REGIONAL_ENDPOINTS=regional +sh-4.4$ cat /opt/splunk/etc/system/local/default-mode.conf +[pipeline:remotequeueruleset] +disabled = false + +[pipeline:ruleset] +disabled = true + +[pipeline:remotequeuetyping] +disabled = false + +[pipeline:remotequeueoutput] +disabled = false + +[pipeline:typing] +disabled = true + +[pipeline:indexerPipe] +disabled = true + +sh-4.4$ cat /opt/splunk/etc/system/local/outputs.conf +[remote_queue:sqs-test] +remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4 +remote_queue.sqs_smartbus.auth_region = us-west-2 +remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test +remote_queue.sqs_smartbus.encoding_format = s2s +remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com +remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com +remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test +remote_queue.sqs_smartbus.retry_policy = max_count +remote_queue.sqs_smartbus.send_interval = 5s +remote_queue.type = sqs_smartbus +``` + +5. Install IndexerCluster resource. + +``` +$ cat idxc.yaml +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + serviceAccount: ingestor-sa +--- +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + replicas: 3 + clusterManagerRef: + name: cm + serviceAccount: ingestor-sa + busConfigurationRef: + name: bus-config +``` + +``` +$ kubectl apply -f idxc.yaml +``` + +``` +$ kubectl get po +NAME READY STATUS RESTARTS AGE +splunk-cm-cluster-manager-0 1/1 Running 0 15m +splunk-indexer-indexer-0 1/1 Running 0 12m +splunk-indexer-indexer-1 1/1 Running 0 12m +splunk-indexer-indexer-2 1/1 Running 0 12m +splunk-ingestor-ingestor-0 1/1 Running 0 27m +splunk-ingestor-ingestor-1 1/1 Running 0 29m +splunk-ingestor-ingestor-2 1/1 Running 0 31m +``` + +``` +$ kubectl exec -it splunk-indexer-indexer-0 -- sh +$ kubectl exec -it splunk-indexer-indexer-1 -- sh +$ kubectl exec -it splunk-indexer-indexer-2 -- sh +sh-4.4$ env | grep AWS +AWS_DEFAULT_REGION=us-west-2 +AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token +AWS_REGION=us-west-2 +AWS_ROLE_ARN=arn:aws:iam::111111111111:role/eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1-123456789123 +AWS_STS_REGIONAL_ENDPOINTS=regional +sh-4.4$ cat /opt/splunk/etc/system/local/inputs.conf + +[splunktcp://9997] +disabled = 0 + +[remote_queue:sqs-test] +remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4 +remote_queue.sqs_smartbus.auth_region = us-west-2 +remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test +remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com +remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com +remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test +remote_queue.sqs_smartbus.retry_policy = max_count +remote_queue.type = sqs_smartbus +sh-4.4$ cat /opt/splunk/etc/system/local/outputs.conf +[remote_queue:sqs-test] +remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4 +remote_queue.sqs_smartbus.auth_region = us-west-2 +remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test +remote_queue.sqs_smartbus.encoding_format = s2s +remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com +remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com +remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test +remote_queue.sqs_smartbus.retry_policy = max_count +remote_queue.sqs_smartbus.send_interval = 5s +remote_queue.type = sqs_smartbus +sh-4.4$ cat /opt/splunk/etc/system/local/default-mode.conf +[pipeline:remotequeueruleset] +disabled = false + +[pipeline:ruleset] +disabled = true + +[pipeline:remotequeuetyping] +disabled = false + +[pipeline:remotequeueoutput] +disabled = false + +[pipeline:typing] +disabled = true +``` + +6. Install Horizontal Pod Autoscaler for IngestorCluster. + +``` +$ cat hpa-ing.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: ing-hpa +spec: + scaleTargetRef: + apiVersion: enterprise.splunk.com/v4 + kind: IngestorCluster + name: ingestor + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 +``` + +``` +$ kubectl apply -f hpa-ing.yaml +``` + +``` +$ kubectl get hpa +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +ing-hpa IngestorCluster/ingestor cpu: /50% 3 10 0 10s +``` + +``` +kubectl top pod +NAME CPU(cores) MEMORY(bytes) +hec-locust-load-29270124-f86gj 790m 221Mi +splunk-cm-cluster-manager-0 154m 1696Mi +splunk-indexer-indexer-0 107m 1339Mi +splunk-indexer-indexer-1 187m 1052Mi +splunk-indexer-indexer-2 203m 1703Mi +splunk-ingestor-ingestor-0 97m 517Mi +splunk-ingestor-ingestor-1 64m 585Mi +splunk-ingestor-ingestor-2 57m 565Mi +``` + +``` +$ kubectl get po +NAME READY STATUS RESTARTS AGE +hec-locust-load-29270126-szgv2 1/1 Running 0 30s +splunk-cm-cluster-manager-0 1/1 Running 0 41m +splunk-indexer-indexer-0 1/1 Running 0 38m +splunk-indexer-indexer-1 1/1 Running 0 38m +splunk-indexer-indexer-2 1/1 Running 0 38m +splunk-ingestor-ingestor-0 1/1 Running 0 53m +splunk-ingestor-ingestor-1 1/1 Running 0 55m +splunk-ingestor-ingestor-2 1/1 Running 0 57m +splunk-ingestor-ingestor-3 0/1 Running 0 116s +splunk-ingestor-ingestor-4 0/1 Running 0 116s +``` + +``` +kubectl top pod +NAME CPU(cores) MEMORY(bytes) +hec-locust-load-29270126-szgv2 532m 72Mi +splunk-cm-cluster-manager-0 91m 1260Mi +splunk-indexer-indexer-0 112m 865Mi +splunk-indexer-indexer-1 115m 855Mi +splunk-indexer-indexer-2 152m 1696Mi +splunk-ingestor-ingestor-0 115m 482Mi +splunk-ingestor-ingestor-1 76m 496Mi +splunk-ingestor-ingestor-2 156m 553Mi +splunk-ingestor-ingestor-3 355m 846Mi +splunk-ingestor-ingestor-4 1036m 979Mi +``` + +``` +kubectl get hpa +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +ing-hpa IngestorCluster/ingestor cpu: 115%/50% 3 10 10 8m54s +``` + +7. Generate fake load. + +- HEC_TOKEN: HEC token for making fake calls + +``` +$ kubectl get secret splunk-default-secret -o yaml +apiVersion: v1 +data: + hec_token: HEC_TOKEN + idxc_secret: YWJjZGVmMTIzNDU2Cg== + pass4SymmKey: YWJjZGVmMTIzNDU2Cg== + password: YWJjZGVmMTIzNDU2Cg== + shc_secret: YWJjZGVmMTIzNDU2Cg== +kind: Secret +metadata: + creationTimestamp: "2025-08-26T10:15:11Z" + name: splunk-default-secret + namespace: default + ownerReferences: + - apiVersion: enterprise.splunk.com/v4 + controller: false + kind: IngestorCluster + name: ingestor + uid: 12345678-1234-1234-1234-1234567890123 + - apiVersion: enterprise.splunk.com/v4 + controller: false + kind: ClusterManager + name: cm + uid: 12345678-1234-1234-1234-1234567890125 + - apiVersion: enterprise.splunk.com/v4 + controller: false + kind: IndexerCluster + name: indexer + uid: 12345678-1234-1234-1234-1234567890124 + resourceVersion: "123456" + uid: 12345678-1234-1234-1234-1234567890126 +type: Opaque +``` + +``` +$ echo HEC_TOKEN | base64 -d +HEC_TOKEN +``` + +``` +cat loadgen.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hec-locust-config +data: + requirements.txt: | + locust + requests + urllib3 + + locustfile.py: | + import urllib3 + from locust import HttpUser, task, between + + # disable insecure‐ssl warnings + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + class HECUser(HttpUser): + wait_time = between(1, 2) + # use HTTPS and explicit port + host = "https://splunk-ingestor-ingestor-service:8088" + + def on_start(self): + # turn off SSL cert verification + self.client.verify = False + + @task + def send_event(self): + token = "HEC_TOKEN" + headers = { + "Authorization": f"Splunk {token}", + "Content-Type": "application/json" + } + payload = {"event": {"message": "load test", "value": 123}} + # this will POST to https://…:8088/services/collector/event + self.client.post( + "/services/collector/event", + json=payload, + headers=headers, + name="HEC POST" + ) +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hec-locust-load +spec: + schedule: "*/2 * * * *" + concurrencyPolicy: Replace + startingDeadlineSeconds: 60 + jobTemplate: + spec: + backoffLimit: 1 + template: + spec: + containers: + - name: locust + image: python:3.9-slim + command: + - sh + - -c + - | + pip install --no-cache-dir -r /app/requirements.txt \ + && exec locust \ + -f /app/locustfile.py \ + --headless \ + -u 200 \ + -r 50 \ + --run-time 1m50s + volumeMounts: + - name: app + mountPath: /app + restartPolicy: OnFailure + volumes: + - name: app + configMap: + name: hec-locust-config + defaultMode: 0755 +``` + +``` +kubectl apply -f loadgen.yaml +``` + +``` +$ kubectl get cm +NAME DATA AGE +hec-locust-config 2 10s +kube-root-ca.crt 1 5d2h +splunk-cluster-manager-cm-configmap 1 28m +splunk-default-probe-configmap 3 58m +splunk-indexer-indexer-configmap 1 28m +splunk-ingestor-ingestor-configmap 1 48m +``` + +``` +$ kubectl get cj +NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE +hec-locust-load */2 * * * * False 1 2s 26s +``` + +``` +$ kubectl get po +NAME READY STATUS RESTARTS AGE +hec-locust-load-29270114-zq7zz 1/1 Running 0 15s +splunk-cm-cluster-manager-0 1/1 Running 0 29m +splunk-indexer-indexer-0 1/1 Running 0 26m +splunk-indexer-indexer-1 1/1 Running 0 26m +splunk-indexer-indexer-2 1/1 Running 0 26m +splunk-ingestor-ingestor-0 1/1 Running 0 41m +splunk-ingestor-ingestor-1 1/1 Running 0 43m +splunk-ingestor-ingestor-2 1/1 Running 0 45m +``` + +``` +$ aws s3 ls s3://ingestion/smartbus-test/ + PRE 29DDC1B4-D43E-47D1-AC04-C87AC7298201/ + PRE 43E16731-7146-4397-8553-D68B5C2C8634/ + PRE C8A4D060-DE0D-4DCB-9690-01D8902825DC/ +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 8f24791da..6efe24ecb 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/google/uuid v1.6.0 github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.16 + github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.38.0 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 107db504c..4083e6e70 100644 --- a/go.sum +++ b/go.sum @@ -110,6 +110,8 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -129,6 +131,7 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -193,6 +196,7 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -245,8 +249,17 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -352,6 +365,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -359,6 +373,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -370,8 +385,6 @@ golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -384,13 +397,18 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -422,6 +440,7 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= @@ -479,13 +498,17 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml new file mode 100644 index 000000000..2a746968e --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml @@ -0,0 +1,40 @@ +{{- if .Values.busConfiguration }} +{{- if .Values.busConfiguration.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: {{ .Values.busConfiguration.name }} + namespace: {{ default .Release.Namespace .Values.busConfiguration.namespaceOverride }} + {{- with .Values.busConfiguration.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.busConfiguration.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.busConfiguration.type | quote }} + {{- with .Values.busConfiguration.sqs }} + sqs: + {{- if .queueName }} + queueName: {{ .queueName | quote }} + {{- end }} + {{- if .authRegion }} + authRegion: {{ .authRegion | quote }} + {{- end }} + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .largeMessageStoreEndpoint }} + largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} + {{- end }} + {{- if .largeMessageStorePath }} + largeMessageStorePath: {{ .largeMessageStorePath | quote }} + {{- end }} + {{- if .deadLetterQueueName }} + deadLetterQueueName: {{ .deadLetterQueueName | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 09e90481e..77c24d500 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: List items: -{{- range default (default (until 1) .Values.sva.c3.indexerClusters) .Values.sva.m4.indexerClusters }} +{{- range default (default (list (dict "name" .Values.indexerCluster.name)) .Values.sva.c3.indexerClusters) .Values.sva.m4.indexerClusters }} - apiVersion: enterprise.splunk.com/v4 kind: IndexerCluster metadata: @@ -163,5 +163,12 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} + {{- with $.Values.indexerCluster.busConfigurationRef }} + busConfigurationRef: + name: {{ .name }} + {{- if .namespace }} + namespace: {{ .namespace }} + {{- end }} + {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml new file mode 100644 index 000000000..fd72da310 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -0,0 +1,130 @@ +{{- if .Values.ingestorCluster.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: {{ .Values.ingestorCluster.name }} + namespace: {{ default .Release.Namespace .Values.ingestorCluster.namespaceOverride }} + {{- with .Values.ingestorCluster.additionalLabels }} + labels: + {{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.additionalAnnotations }} + annotations: + {{ toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ default 3 .Values.ingestorCluster.replicaCount }} + {{- if .Values.image.repository }} + image: {{ .Values.image.repository }} + {{- end }} + {{- if .Values.image.imagePullPolicy }} + imagePullPolicy: {{ .Values.image.imagePullPolicy }} + {{- end }} + {{- with .Values.image.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.ingestorCluster.serviceAccount }} + serviceAccount: {{ .Values.ingestorCluster.serviceAccount }} + {{- end }} + {{- if .Values.existingLicenseManager.name }} + licenseManagerRef: + name: {{ .Values.existingLicenseManager.name }} + {{- if .Values.existingLicenseManager.namespace }} + namespace: {{ .Values.existingLicenseManager.namespace }} + {{- end }} + {{- else if and .Values.licenseManager.enabled .Values.licenseManager.name }} + licenseManagerRef: + name: {{ .Values.licenseManager.name }} + {{- if .Values.licenseManager.namespaceOverride }} + namespace: {{ .Values.licenseManager.namespaceOverride }} + {{- end }} + {{- end }} + {{- if .Values.existingMonitoringConsole.name }} + monitoringConsoleRef: + name: {{ .Values.existingMonitoringConsole.name }} + {{- if .Values.existingMonitoringConsole.namespace }} + namespace: {{ .Values.existingMonitoringConsole.namespace }} + {{- end }} + {{- else if and .Values.monitoringConsole.enabled .Values.monitoringConsole.name }} + monitoringConsoleRef: + name: {{ .Values.monitoringConsole.name }} + {{- if .Values.monitoringConsole.namespaceOverride }} + namespace: {{ .Values.monitoringConsole.namespaceOverride }} + {{- end }} + {{- end }} + livenessInitialDelaySeconds: {{ default 300 .Values.ingestorCluster.livenessInitialDelaySeconds }} + readinessInitialDelaySeconds: {{ default 10 .Values.ingestorCluster.readinessInitialDelaySeconds }} + {{- with .Values.ingestorCluster.startupProbe }} + startupProbe: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.etcVolumeStorageConfig }} + etcVolumeStorageConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.varVolumeStorageConfig }} + varVolumeStorageConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.resources }} + resources: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.serviceTemplate }} + serviceTemplate: +{{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.tolerations }} + tolerations: +{{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.affinity }} + affinity: +{{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.topologySpreadConstraints }} + topologySpreadConstraints: +{{- toYaml . | nindent 4 }} + {{- end }} + {{- with $.Values.ingestorCluster.busConfigurationRef }} + busConfigurationRef: + name: {{ $.Values.ingestorCluster.busConfigurationRef.name }} + {{- if $.Values.ingestorCluster.busConfigurationRef.namespace }} + namespace: {{ $.Values.ingestorCluster.busConfigurationRef.namespace }} + {{- end }} + {{- end }} + {{- with .Values.ingestorCluster.extraEnv }} + extraEnv: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.appRepo }} + appRepo: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingestorCluster.volumes }} + volumes: +{{ toYaml . | indent 4 }} + {{- end }} + {{- if .Values.ingestorCluster.licenseUrl }} + licenseUrl: {{ .Values.ingestorCluster.licenseUrl }} + {{- end }} + {{- if .Values.ingestorCluster.defaultsUrl }} + defaultsUrl: {{ .Values.ingestorCluster.defaultsUrl }} + {{- end }} + {{- if .Values.ingestorCluster.defaults }} + defaults: |- + {{ toYaml .Values.ingestorCluster.defaults | indent 4 }} + {{- end }} + {{- if .Values.ingestorCluster.defaultsUrlApps }} + defaultsUrlApps: {{ .Values.ingestorCluster.defaultsUrlApps }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index da6308b1f..e49073398 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,6 +350,8 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 + busConfigurationRef: {} + searchHeadCluster: enabled: false @@ -808,3 +810,93 @@ extraManifests: [] # spec: # securityPolicy: # name: "gcp-cloud-armor-policy-test" + +ingestorCluster: + + enabled: false + + name: "ingestor" + + namespaceOverride: "" + + additionalLabels: {} + + additionalAnnotations: {} + + replicaCount: 3 + + appRepo: {} + # appsRepoPollIntervalSeconds: + # defaults: + # volumeName: + # scope: + # appSources: + # - name: + # location: + # volumes: + # - name: + # storageType: + # provider: + # path: + # endpoint: + # region: + # secretRef: + + volumes: [] + + extraEnv: [] + # - name: + # value: + + livenessInitialDelaySeconds: 300 + + readinessInitialDelaySeconds: 10 + + # Set Probes for Splunk instance pod containers + # reference: https://github.com/splunk/splunk-operator/blob/main/docs/HealthCheck.md + startupProbe: {} + # initialDelaySeconds: 40 + # timeoutSeconds: 30 + # periodSeconds: 30 + # failureThreshold: 12 + livenessProbe: {} + # initialDelaySeconds: 30 + # timeoutSeconds: 30 + # periodSeconds: 30 + # failureThreshold: 3 + readinessProbe: {} + # initialDelaySeconds: 10 + # timeoutSeconds: 5 + # periodSeconds: 5 + # failureThreshold: 3 + + etcVolumeStorageConfig: + ephemeralStorage: false + storageCapacity: 10Gi + # storageClassName: gp2 + + varVolumeStorageConfig: + ephemeralStorage: false + storageCapacity: 100Gi + # storageClassName: gp2 + + resources: {} + # requests: + # memory: "2Gi" + # cpu: "4" + # limits: + # memory: "12Gi" + # cpu: "24" + + serviceAccount: "" + + # ServiceTemplate is a template used to create Kubernetes services + serviceTemplate: {} + + topologySpreadConstraints: [] + + tolerations: [] + + affinity: {} + + busConfigurationRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml new file mode 100644 index 000000000..1475add32 --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml @@ -0,0 +1,55 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml new file mode 100644 index 000000000..500b1d100 --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml @@ -0,0 +1,47 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml new file mode 100644 index 000000000..b161aea9c --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml @@ -0,0 +1,55 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml new file mode 100644 index 000000000..47287423f --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml @@ -0,0 +1,47 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 2a2869654..4eab5275e 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -222,6 +222,58 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: diff --git a/internal/controller/busconfiguration_controller.go b/internal/controller/busconfiguration_controller.go new file mode 100644 index 000000000..c8519c017 --- /dev/null +++ b/internal/controller/busconfiguration_controller.go @@ -0,0 +1,120 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pkg/errors" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/common" + metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" + enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" +) + +// BusConfigurationReconciler reconciles a BusConfiguration object +type BusConfigurationReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BusConfiguration object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile +func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "BusConfiguration")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "BusConfiguration") + + reqLogger := log.FromContext(ctx) + reqLogger = reqLogger.WithValues("busconfiguration", req.NamespacedName) + + // Fetch the BusConfiguration + instance := &enterpriseApi.BusConfiguration{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after + // reconcile request. Owned objects are automatically + // garbage collected. For additional cleanup logic use + // finalizers. Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, errors.Wrap(err, "could not load bus configuration data") + } + + // If the reconciliation is paused, requeue + annotations := instance.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.BusConfigurationPausedAnnotation]; ok { + return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil + } + } + + reqLogger.Info("start", "CR version", instance.GetResourceVersion()) + + result, err := ApplyBusConfiguration(ctx, r.Client, instance) + if result.Requeue && result.RequeueAfter != 0 { + reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) + } + + return result, err +} + +var ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return enterprise.ApplyBusConfiguration(ctx, client, instance) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BusConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterpriseApi.BusConfiguration{}). + WithEventFilter(predicate.Or( + common.GenerationChangedPredicate(), + common.AnnotationChangedPredicate(), + common.LabelChangedPredicate(), + common.SecretChangedPredicate(), + common.ConfigMapChangedPredicate(), + common.StatefulsetChangedPredicate(), + common.PodChangedPredicate(), + common.CrdChangedPredicate(), + )). + WithOptions(controller.Options{ + MaxConcurrentReconciles: enterpriseApi.TotalWorker, + }). + Complete(r) +} diff --git a/internal/controller/busconfiguration_controller_test.go b/internal/controller/busconfiguration_controller_test.go new file mode 100644 index 000000000..e08154211 --- /dev/null +++ b/internal/controller/busconfiguration_controller_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/testutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("BusConfiguration Controller", func() { + BeforeEach(func() { + time.Sleep(2 * time.Second) + }) + + AfterEach(func() { + + }) + + Context("BusConfiguration Management", func() { + + It("Get BusConfiguration custom resource should fail", func() { + namespace := "ns-splunk-bus-1" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + _, err := GetBusConfiguration("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("busconfigurations.enterprise.splunk.com \"test\" not found")) + + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create BusConfiguration custom resource with annotations should pause", func() { + namespace := "ns-splunk-bus-2" + annotations := make(map[string]string) + annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + icSpec, _ := GetBusConfiguration("test", nsSpecs.Name) + annotations = map[string]string{} + icSpec.Annotations = annotations + icSpec.Status.Phase = "Ready" + UpdateBusConfiguration(icSpec, enterpriseApi.PhaseReady) + DeleteBusConfiguration("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create BusConfiguration custom resource should succeeded", func() { + namespace := "ns-splunk-bus-3" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + annotations := make(map[string]string) + CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + DeleteBusConfiguration("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Cover Unused methods", func() { + namespace := "ns-splunk-bus-4" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + ctx := context.TODO() + builder := fake.NewClientBuilder() + c := builder.Build() + instance := BusConfigurationReconciler{ + Client: c, + Scheme: scheme.Scheme, + } + request := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: namespace, + }, + } + _, err := instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + bcSpec := testutils.NewBusConfiguration("test", namespace, "image") + Expect(c.Create(ctx, bcSpec)).Should(Succeed()) + + annotations := make(map[string]string) + annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" + bcSpec.Annotations = annotations + Expect(c.Update(ctx, bcSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + annotations = map[string]string{} + bcSpec.Annotations = annotations + Expect(c.Update(ctx, bcSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + bcSpec.DeletionTimestamp = &metav1.Time{} + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + }) + + }) +}) + +func GetBusConfiguration(name string, namespace string) (*enterpriseApi.BusConfiguration, error) { + By("Expecting BusConfiguration custom resource to be retrieved successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + bc := &enterpriseApi.BusConfiguration{} + + err := k8sClient.Get(context.Background(), key, bc) + if err != nil { + return nil, err + } + + return bc, err +} + +func CreateBusConfiguration(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { + By("Expecting BusConfiguration custom resource to be created successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + ingSpec := &enterpriseApi.BusConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + } + + Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + bc := &enterpriseApi.BusConfiguration{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, bc) + if status != "" { + fmt.Printf("status is set to %v", status) + bc.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return bc +} + +func UpdateBusConfiguration(instance *enterpriseApi.BusConfiguration, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { + By("Expecting BusConfiguration custom resource to be updated successfully") + + key := types.NamespacedName{ + Name: instance.Name, + Namespace: instance.Namespace, + } + + bcSpec := testutils.NewBusConfiguration(instance.Name, instance.Namespace, "image") + bcSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), bcSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + bc := &enterpriseApi.BusConfiguration{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, bc) + if status != "" { + fmt.Printf("status is set to %v", status) + bc.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return bc +} + +func DeleteBusConfiguration(name string, namespace string) { + By("Expecting BusConfiguration custom resource to be deleted successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + + Eventually(func() error { + bc := &enterpriseApi.BusConfiguration{} + _ = k8sClient.Get(context.Background(), key, bc) + err := k8sClient.Delete(context.Background(), bc) + return err + }, timeout, interval).Should(Succeed()) +} diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index bc9a6c9f5..3cc840baa 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -31,6 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -171,6 +172,34 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). + Watches(&enterpriseApi.BusConfiguration{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + bc, ok := obj.(*enterpriseApi.BusConfiguration) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.BusConfigurationRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). WithOptions(controller.Options{ MaxConcurrentReconciles: enterpriseApi.TotalWorker, }). diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go new file mode 100644 index 000000000..a2c5846df --- /dev/null +++ b/internal/controller/ingestorcluster_controller.go @@ -0,0 +1,176 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pkg/errors" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/common" + metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" + enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" +) + +// IngestorClusterReconciler reconciles a IngestorCluster object +type IngestorClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the IngestorCluster object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile +func (r *IngestorClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "IngestorCluster")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "IngestorCluster") + + reqLogger := log.FromContext(ctx) + reqLogger = reqLogger.WithValues("ingestorcluster", req.NamespacedName) + + // Fetch the IngestorCluster + instance := &enterpriseApi.IngestorCluster{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after + // reconcile request. Owned objects are automatically + // garbage collected. For additional cleanup logic use + // finalizers. Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, errors.Wrap(err, "could not load ingestor cluster data") + } + + // If the reconciliation is paused, requeue + annotations := instance.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.IngestorClusterPausedAnnotation]; ok { + return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil + } + } + + reqLogger.Info("start", "CR version", instance.GetResourceVersion()) + + result, err := ApplyIngestorCluster(ctx, r.Client, instance) + if result.Requeue && result.RequeueAfter != 0 { + reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) + } + + return result, err +} + +var ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) { + return enterprise.ApplyIngestorCluster(ctx, client, instance) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterpriseApi.IngestorCluster{}). + WithEventFilter(predicate.Or( + common.GenerationChangedPredicate(), + common.AnnotationChangedPredicate(), + common.LabelChangedPredicate(), + common.SecretChangedPredicate(), + common.ConfigMapChangedPredicate(), + common.StatefulsetChangedPredicate(), + common.PodChangedPredicate(), + common.CrdChangedPredicate(), + )). + Watches(&appsv1.StatefulSet{}, + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &enterpriseApi.IngestorCluster{}, + )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &enterpriseApi.IngestorCluster{}, + )). + Watches(&corev1.Pod{}, + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &enterpriseApi.IngestorCluster{}, + )). + Watches(&corev1.ConfigMap{}, + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &enterpriseApi.IngestorCluster{}, + )). + Watches(&enterpriseApi.BusConfiguration{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + bc, ok := obj.(*enterpriseApi.BusConfiguration) + if !ok { + return nil + } + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.BusConfigurationRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + WithOptions(controller.Options{ + MaxConcurrentReconciles: enterpriseApi.TotalWorker, + }). + Complete(r) +} diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go new file mode 100644 index 000000000..5e7ae4b73 --- /dev/null +++ b/internal/controller/ingestorcluster_controller_test.go @@ -0,0 +1,253 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/testutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("IngestorCluster Controller", func() { + BeforeEach(func() { + time.Sleep(2 * time.Second) + }) + + AfterEach(func() { + + }) + + Context("IngestorCluster Management", func() { + + It("Get IngestorCluster custom resource should fail", func() { + namespace := "ns-splunk-ing-1" + ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + _, err := GetIngestorCluster("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("ingestorclusters.enterprise.splunk.com \"test\" not found")) + + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create IngestorCluster custom resource with annotations should pause", func() { + namespace := "ns-splunk-ing-2" + annotations := make(map[string]string) + annotations[enterpriseApi.IngestorClusterPausedAnnotation] = "" + ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) + annotations = map[string]string{} + icSpec.Annotations = annotations + icSpec.Status.Phase = "Ready" + UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady) + DeleteIngestorCluster("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create IngestorCluster custom resource should succeeded", func() { + namespace := "ns-splunk-ing-3" + ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + annotations := make(map[string]string) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + DeleteIngestorCluster("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Cover Unused methods", func() { + namespace := "ns-splunk-ing-4" + ApplyIngestorCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IngestorCluster) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + ctx := context.TODO() + builder := fake.NewClientBuilder() + c := builder.Build() + instance := IngestorClusterReconciler{ + Client: c, + Scheme: scheme.Scheme, + } + request := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: namespace, + }, + } + _, err := instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + icSpec := testutils.NewIngestorCluster("test", namespace, "image") + Expect(c.Create(ctx, icSpec)).Should(Succeed()) + + annotations := make(map[string]string) + annotations[enterpriseApi.IngestorClusterPausedAnnotation] = "" + icSpec.Annotations = annotations + Expect(c.Update(ctx, icSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + annotations = map[string]string{} + icSpec.Annotations = annotations + Expect(c.Update(ctx, icSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + icSpec.DeletionTimestamp = &metav1.Time{} + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + }) + + }) +}) + +func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorCluster, error) { + By("Expecting IngestorCluster custom resource to be retrieved successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + ic := &enterpriseApi.IngestorCluster{} + + err := k8sClient.Get(context.Background(), key, ic) + if err != nil { + return nil, err + } + + return ic, err +} + +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { + By("Expecting IngestorCluster custom resource to be created successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + ingSpec := &enterpriseApi.IngestorCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: enterpriseApi.IngestorClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + }, + Replicas: 3, + BusConfigurationRef: corev1.ObjectReference{ + Name: "busConfig", + }, + }, + } + + Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + ic := &enterpriseApi.IngestorCluster{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, ic) + if status != "" { + fmt.Printf("status is set to %v", status) + ic.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), ic)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return ic +} + +func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { + By("Expecting IngestorCluster custom resource to be updated successfully") + + key := types.NamespacedName{ + Name: instance.Name, + Namespace: instance.Namespace, + } + + icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image") + icSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), icSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + ic := &enterpriseApi.IngestorCluster{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, ic) + if status != "" { + fmt.Printf("status is set to %v", status) + ic.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), ic)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return ic +} + +func DeleteIngestorCluster(name string, namespace string) { + By("Expecting IngestorCluster custom resource to be deleted successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + + Eventually(func() error { + ic := &enterpriseApi.IngestorCluster{} + _ = k8sClient.Get(context.Background(), key, ic) + err := k8sClient.Delete(context.Background(), ic) + return err + }, timeout, interval).Should(Succeed()) +} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index be2c1a50f..52c4c1a1d 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -147,6 +147,12 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } + if err := (&IngestorClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } go func() { err = k8sManager.Start(ctrl.SetupSignalHandler()) diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index 50ec481cb..9ca78593c 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -45,6 +45,40 @@ func NewStandalone(name, ns, image string) *enterpriseApi.Standalone { return ad } +// NewIngestorCluster returns new IngestorCluster instance with its config hash +func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { + return &enterpriseApi.IngestorCluster{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: enterpriseApi.IngestorClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, + }, + Replicas: 3, + BusConfigurationRef: corev1.ObjectReference{ + Name: "busConfig", + }, + }, + } +} + +// NewBusConfiguration returns new BusConfiguration instance with its config hash +func NewBusConfiguration(name, ns, image string) *enterpriseApi.BusConfiguration { + return &enterpriseApi.BusConfiguration{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } +} + // NewSearchHeadCluster returns new serach head cluster instance with its config hash func NewSearchHeadCluster(name, ns, image string) *enterpriseApi.SearchHeadCluster { @@ -279,6 +313,9 @@ func NewIndexerCluster(name, ns, image string) *enterpriseApi.IndexerCluster { ad.Spec = enterpriseApi.IndexerClusterSpec{ CommonSplunkSpec: *cs, + BusConfigurationRef: corev1.ObjectReference{ + Name: "busConfig", + }, } return ad } diff --git a/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml b/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml new file mode 100644 index 000000000..602ebe0c1 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: ../script/installoperator.sh + background: false \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml new file mode 100644 index 000000000..5ac9b4a7a --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -0,0 +1,118 @@ +--- +# assert for bus configurtion custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: bus-config +spec: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +status: + phase: Ready + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer +spec: + replicas: 3 + busConfigurationRef: + name: bus-config +status: + phase: Ready + busConfiguration: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-indexer-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-indexer-indexer-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 3 + busConfigurationRef: + name: bus-config +status: + phase: Ready + busConfiguration: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml new file mode 100644 index 000000000..0e9f5d58e --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm install splunk-index-ingest-sep $HELM_REPO_PATH/splunk-enterprise -f splunk_index_ingest_sep.yaml + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml new file mode 100644 index 000000000..daa1ab4ab --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -0,0 +1,30 @@ +--- +# assert for ingestor cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 4 + busConfigurationRef: + name: bus-config +status: + phase: Ready + busConfiguration: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 4 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml new file mode 100644 index 000000000..731faf145 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm upgrade splunk-index-ingest-sep $HELM_REPO_PATH/splunk-enterprise --reuse-values --set ingestorCluster.replicaCount=4 + namespaced: true diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml new file mode 100644 index 000000000..85bf05dfe --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm uninstall splunk-index-ingest-sep + namespaced: true diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml new file mode 100644 index 000000000..6e87733cc --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -0,0 +1,39 @@ +splunk-operator: + enabled: false + splunkOperator: + clusterWideAccess: false + persistentVolumeClaim: + storageClassName: gp2 + +busConfiguration: + enabled: true + name: bus-config + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +ingestorCluster: + enabled: true + name: ingestor + replicaCount: 3 + busConfigurationRef: + name: bus-config + +clusterManager: + enabled: true + name: cm + replicaCount: 1 + +indexerCluster: + enabled: true + name: indexer + replicaCount: 3 + clusterManagerRef: + name: cm + busConfigurationRef: + name: bus-config diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go index 8bc36b08a..6eb4d2f87 100644 --- a/pkg/splunk/client/enterprise.go +++ b/pkg/splunk/client/enterprise.go @@ -26,6 +26,7 @@ import ( "strings" "time" + "github.com/go-logr/logr" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" ) @@ -966,3 +967,70 @@ func (c *SplunkClient) RestartSplunk() error { expectedStatus := []int{200} return c.Do(request, expectedStatus, nil) } + +// Updates conf files and their properties +// See https://help.splunk.com/en/splunk-enterprise/leverage-rest-apis/rest-api-reference/10.0/configuration-endpoints/configuration-endpoint-descriptions +func (c *SplunkClient) UpdateConfFile(scopedLog logr.Logger, fileName, property string, propertyKVList [][]string) error { + // Creates an object in a conf file if it doesn't exist + endpoint := fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s", c.ManagementURI, fileName) + body := fmt.Sprintf("name=%s", property) + + scopedLog.Info("Creating conf file object if it does not exist", "fileName", fileName, "property", property) + request, err := http.NewRequest("POST", endpoint, strings.NewReader(body)) + if err != nil { + scopedLog.Error(err, "Failed to create conf file object if it does not exist", "fileName", fileName, "property", property) + return err + } + + scopedLog.Info("Validating conf file object creation", "fileName", fileName, "property", property) + expectedStatus := []int{200, 201, 409} + err = c.Do(request, expectedStatus, nil) + if err != nil { + scopedLog.Error(err, fmt.Sprintf("Status not in %v for conf file object creation", expectedStatus), "fileName", fileName, "property", property) + return err + } + + // Updates a property of an object in a conf file + endpoint = fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s/%s", c.ManagementURI, fileName, property) + body = "" + for _, kv := range propertyKVList { + body += fmt.Sprintf("%s=%s&", kv[0], kv[1]) + } + if len(body) > 0 && body[len(body)-1] == '&' { + body = body[:len(body)-1] + } + + scopedLog.Info("Updating conf file object", "fileName", fileName, "property", property, "body", body) + request, err = http.NewRequest("POST", endpoint, strings.NewReader(body)) + if err != nil { + scopedLog.Error(err, "Failed to update conf file object", "fileName", fileName, "property", property, "body", body) + return err + } + + scopedLog.Info("Validating conf file object update", "fileName", fileName, "property", property) + expectedStatus = []int{200, 201} + err = c.Do(request, expectedStatus, nil) + if err != nil { + scopedLog.Error(err, fmt.Sprintf("Status not in %v for conf file object update", expectedStatus), "fileName", fileName, "property", property, "body", body) + } + return err +} + +// Deletes conf files properties +func (c *SplunkClient) DeleteConfFileProperty(scopedLog logr.Logger, fileName, property string) error { + endpoint := fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s/%s", c.ManagementURI, fileName, property) + + scopedLog.Info("Deleting conf file object", "fileName", fileName, "property", property) + request, err := http.NewRequest("DELETE", endpoint, nil) + if err != nil { + scopedLog.Error(err, "Failed to delete conf file object", "fileName", fileName, "property", property) + return err + } + + expectedStatus := []int{200, 201, 404} + err = c.Do(request, expectedStatus, nil) + if err != nil { + scopedLog.Error(err, fmt.Sprintf("Status not in %v for conf file object deletion", expectedStatus), "fileName", fileName, "property", property) + } + return err +} diff --git a/pkg/splunk/client/enterprise_test.go b/pkg/splunk/client/enterprise_test.go index 9850b17c5..6b97c24d7 100644 --- a/pkg/splunk/client/enterprise_test.go +++ b/pkg/splunk/client/enterprise_test.go @@ -16,6 +16,7 @@ package client import ( + "context" "fmt" "net/http" "net/url" @@ -23,6 +24,7 @@ import ( "testing" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "sigs.k8s.io/controller-runtime/pkg/log" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" ) @@ -652,3 +654,86 @@ func TestRestartSplunk(t *testing.T) { // Test invalid http request splunkClientErrorTester(t, test) } + +func TestUpdateConfFile(t *testing.T) { + // Test successful creation and update of conf property + property := "myproperty" + key := "mykey" + value := "myvalue" + fileName := "outputs" + + reqLogger := log.FromContext(context.TODO()) + scopedLog := reqLogger.WithName("TestUpdateConfFile") + + // First request: create the property (object) if it doesn't exist + createBody := strings.NewReader(fmt.Sprintf("name=%s", property)) + wantCreateRequest, _ := http.NewRequest("POST", "https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs", createBody) + + // Second request: update the key/value for the property + updateBody := strings.NewReader(fmt.Sprintf("%s=%s", key, value)) + wantUpdateRequest, _ := http.NewRequest("POST", fmt.Sprintf("https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs/%s", property), updateBody) + + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandler(wantCreateRequest, 201, "", nil) + mockSplunkClient.AddHandler(wantUpdateRequest, 200, "", nil) + + c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd") + c.Client = mockSplunkClient + + err := c.UpdateConfFile(scopedLog, fileName, property, [][]string{{key, value}}) + if err != nil { + t.Errorf("UpdateConfFile err = %v", err) + } + mockSplunkClient.CheckRequests(t, "TestUpdateConfFile") + + // Negative test: error on create + mockSplunkClient = &spltest.MockHTTPClient{} + mockSplunkClient.AddHandler(wantCreateRequest, 500, "", nil) + c.Client = mockSplunkClient + err = c.UpdateConfFile(scopedLog, fileName, property, [][]string{{key, value}}) + if err == nil { + t.Errorf("UpdateConfFile expected error on create, got nil") + } + + // Negative test: error on update + mockSplunkClient = &spltest.MockHTTPClient{} + mockSplunkClient.AddHandler(wantCreateRequest, 201, "", nil) + mockSplunkClient.AddHandler(wantUpdateRequest, 500, "", nil) + c.Client = mockSplunkClient + err = c.UpdateConfFile(scopedLog, fileName, property, [][]string{{key, value}}) + if err == nil { + t.Errorf("UpdateConfFile expected error on update, got nil") + } +} + +func TestDeleteConfFileProperty(t *testing.T) { + // Test successful deletion of conf property + property := "myproperty" + fileName := "outputs" + + reqLogger := log.FromContext(context.TODO()) + scopedLog := reqLogger.WithName("TestDeleteConfFileProperty") + + wantDeleteRequest, _ := http.NewRequest("DELETE", fmt.Sprintf("https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs/%s", property), nil) + + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandler(wantDeleteRequest, 200, "", nil) + + c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd") + c.Client = mockSplunkClient + + err := c.DeleteConfFileProperty(scopedLog, fileName, property) + if err != nil { + t.Errorf("DeleteConfFileProperty err = %v", err) + } + mockSplunkClient.CheckRequests(t, "TestDeleteConfFileProperty") + + // Negative test: error on delete + mockSplunkClient = &spltest.MockHTTPClient{} + mockSplunkClient.AddHandler(wantDeleteRequest, 500, "", nil) + c.Client = mockSplunkClient + err = c.DeleteConfFileProperty(scopedLog, fileName, property) + if err == nil { + t.Errorf("DeleteConfFileProperty expected error on delete, got nil") + } +} diff --git a/pkg/splunk/enterprise/afwscheduler.go b/pkg/splunk/enterprise/afwscheduler.go index cc99eca0d..481e58139 100644 --- a/pkg/splunk/enterprise/afwscheduler.go +++ b/pkg/splunk/enterprise/afwscheduler.go @@ -55,7 +55,7 @@ var appPhaseInfoStatuses = map[enterpriseApi.AppPhaseStatusType]bool{ // isFanOutApplicableToCR confirms if a given CR needs fanOut support func isFanOutApplicableToCR(cr splcommon.MetaObject) bool { switch cr.GetObjectKind().GroupVersionKind().Kind { - case "Standalone": + case "Standalone", "IngestorCluster": return true default: return false @@ -106,6 +106,8 @@ func getApplicablePodNameForAppFramework(cr splcommon.MetaObject, ordinalIdx int podType = "cluster-manager" case "MonitoringConsole": podType = "monitoring-console" + case "IngestorCluster": + podType = "ingestor" } return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx) @@ -153,6 +155,8 @@ func getTelAppNameExtension(crKind string) (string, error) { return "cmaster", nil case "ClusterManager": return "cmanager", nil + case "IngestorCluster": + return "ingestor", nil default: return "", errors.New("Invalid CR kind for telemetry app") } @@ -1510,6 +1514,8 @@ func afwGetReleventStatefulsetByKind(ctx context.Context, cr splcommon.MetaObjec instanceID = SplunkClusterManager case "MonitoringConsole": instanceID = SplunkMonitoringConsole + case "IngestorCluster": + instanceID = SplunkIngestor default: return nil } @@ -2171,6 +2177,7 @@ func afwSchedulerEntry(ctx context.Context, client splcommon.ControllerClient, c podExecClient := splutil.GetPodExecClient(client, cr, podName) appsPathOnPod := filepath.Join(appBktMnt, appSrcName) + // create the dir on Splunk pod/s where app/s will be copied from operator pod err = createDirOnSplunkPods(ctx, cr, *sts.Spec.Replicas, appsPathOnPod, podExecClient) if err != nil { diff --git a/pkg/splunk/enterprise/afwscheduler_test.go b/pkg/splunk/enterprise/afwscheduler_test.go index 87c5f2ba8..9fd566d30 100644 --- a/pkg/splunk/enterprise/afwscheduler_test.go +++ b/pkg/splunk/enterprise/afwscheduler_test.go @@ -377,6 +377,13 @@ func TestGetApplicablePodNameForAppFramework(t *testing.T) { if expectedPodName != returnedPodName { t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName) } + + cr.TypeMeta.Kind = "IngestorCluster" + expectedPodName = "splunk-stack1-ingestor-0" + returnedPodName = getApplicablePodNameForAppFramework(&cr, podID) + if expectedPodName != returnedPodName { + t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName) + } } func TestInitAppInstallPipeline(t *testing.T) { @@ -1346,7 +1353,7 @@ func TestAfwGetReleventStatefulsetByKind(t *testing.T) { _, _ = splctrl.ApplyStatefulSet(ctx, c, ¤t) if afwGetReleventStatefulsetByKind(ctx, &cr, c) == nil { - t.Errorf("Unable to get the sts for SHC deployer") + t.Errorf("Unable to get the sts for LicenseManager") } // Test if STS works for Standalone @@ -1360,7 +1367,21 @@ func TestAfwGetReleventStatefulsetByKind(t *testing.T) { _, _ = splctrl.ApplyStatefulSet(ctx, c, ¤t) if afwGetReleventStatefulsetByKind(ctx, &cr, c) == nil { - t.Errorf("Unable to get the sts for SHC deployer") + t.Errorf("Unable to get the sts for Standalone") + } + + // Test if STS works for IngestorCluster + cr.TypeMeta.Kind = "IngestorCluster" + current = appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-stack1-ingestor", + Namespace: "test", + }, + } + + _, _ = splctrl.ApplyStatefulSet(ctx, c, ¤t) + if afwGetReleventStatefulsetByKind(ctx, &cr, c) == nil { + t.Errorf("Unable to get the sts for IngestorCluster") } // Negative testing @@ -4211,6 +4232,7 @@ func TestGetTelAppNameExtension(t *testing.T) { "SearchHeadCluster": "shc", "ClusterMaster": "cmaster", "ClusterManager": "cmanager", + "IngestorCluster": "ingestor", } // Test all CR kinds diff --git a/pkg/splunk/enterprise/busconfiguration.go b/pkg/splunk/enterprise/busconfiguration.go new file mode 100644 index 000000000..43fd35f68 --- /dev/null +++ b/pkg/splunk/enterprise/busconfiguration.go @@ -0,0 +1,140 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyBusConfiguration reconciles the state of an IngestorCluster custom resource +func ApplyBusConfiguration(ctx context.Context, client client.Client, cr *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("ApplyBusConfiguration") + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "BusConfiguration" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Validate and updates defaults for CR + err = validateBusConfigurationSpec(ctx, client, cr) + if err != nil { + eventPublisher.Warning(ctx, "validateBusConfigurationSpec", fmt.Sprintf("validate bus configuration spec failed %s", err.Error())) + scopedLog.Error(err, "Failed to validate bus configuration spec") + return result, err + } + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} + +// validateBusConfigurationSpec checks validity and makes default updates to a BusConfigurationSpec and returns error if something is wrong +func validateBusConfigurationSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.BusConfiguration) error { + return validateBusConfigurationInputs(cr) +} + +func validateBusConfigurationInputs(cr *enterpriseApi.BusConfiguration) error { + // sqs_smartbus type is supported for now + if cr.Spec.Type != "sqs_smartbus" { + return errors.New("only sqs_smartbus type is supported in bus configuration") + } + + // Cannot be empty fields check + cannotBeEmptyFields := []string{} + if cr.Spec.SQS.QueueName == "" { + cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") + } + + if cr.Spec.SQS.AuthRegion == "" { + cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") + } + + if cr.Spec.SQS.DeadLetterQueueName == "" { + cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") + } + + if len(cannotBeEmptyFields) > 0 { + return errors.New("bus configuration sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") + } + + // Have to start with https:// or s3:// checks + haveToStartWithHttps := []string{} + if !strings.HasPrefix(cr.Spec.SQS.Endpoint, "https://") { + haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") + } + + if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStoreEndpoint, "https://") { + haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") + } + + if len(haveToStartWithHttps) > 0 { + return errors.New("bus configuration sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") + } + + if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStorePath, "s3://") { + return errors.New("bus configuration sqs largeMessageStorePath must start with s3://") + } + + return nil +} diff --git a/pkg/splunk/enterprise/busconfiguration_test.go b/pkg/splunk/enterprise/busconfiguration_test.go new file mode 100644 index 000000000..45d19bb40 --- /dev/null +++ b/pkg/splunk/enterprise/busconfiguration_test.go @@ -0,0 +1,151 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "path/filepath" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + GetReadinessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) + return fileLocation + } + GetLivenessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) + return fileLocation + } + GetStartupScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) + return fileLocation + } +} + +func TestApplyBusConfiguration(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + busConfig := &enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, busConfig) + + // ApplyBusConfiguration + result, err := ApplyBusConfiguration(ctx, c, busConfig) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, busConfig.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, busConfig.Status.Phase) +} + +func TestValidateBusConfigurationInputs(t *testing.T) { + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "othertype", + SQS: enterpriseApi.SQSSpec{}, + }, + } + + err := validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "only sqs_smartbus type is supported in bus configuration", err.Error()) + + busConfig.Spec.Type = "sqs_smartbus" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs queueName, authRegion, deadLetterQueueName cannot be empty", err.Error()) + + busConfig.Spec.SQS.AuthRegion = "us-west-2" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs queueName, deadLetterQueueName cannot be empty", err.Error()) + + busConfig.Spec.SQS.QueueName = "test-queue" + busConfig.Spec.SQS.DeadLetterQueueName = "dlq-test" + busConfig.Spec.SQS.AuthRegion = "" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs authRegion cannot be empty", err.Error()) + + busConfig.Spec.SQS.AuthRegion = "us-west-2" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) + + busConfig.Spec.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" + busConfig.Spec.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) + + busConfig.Spec.SQS.LargeMessageStorePath = "ingestion/smartbus-test" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) + + busConfig.Spec.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" + + err = validateBusConfigurationInputs(&busConfig) + assert.Nil(t, err) +} diff --git a/pkg/splunk/enterprise/configuration.go b/pkg/splunk/enterprise/configuration.go index a0d90b354..2ce0af325 100644 --- a/pkg/splunk/enterprise/configuration.go +++ b/pkg/splunk/enterprise/configuration.go @@ -467,6 +467,9 @@ func getSplunkPorts(instanceType InstanceType) map[string]int { case SplunkIndexer: result[GetPortName(hecPort, protoHTTP)] = 8088 result[GetPortName(s2sPort, protoTCP)] = 9997 + case SplunkIngestor: + result[GetPortName(hecPort, protoHTTP)] = 8088 + result[GetPortName(s2sPort, protoTCP)] = 9997 } return result diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index 4deb1be98..8e5461413 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -1816,3 +1816,18 @@ func TestValidateLivenessProbe(t *testing.T) { t.Errorf("Unexpected error when less than deault values passed for livenessProbe InitialDelaySeconds %d, TimeoutSeconds %d, PeriodSeconds %d. Error %s", livenessProbe.InitialDelaySeconds, livenessProbe.TimeoutSeconds, livenessProbe.PeriodSeconds, err) } } + +func TestGetSplunkPorts(t *testing.T) { + test := func(instanceType InstanceType) { + ports := getSplunkPorts(instanceType) + require.Equal(t, 8000, ports["http-splunkweb"]) + require.Equal(t, 8089, ports["https-splunkd"]) + require.Equal(t, 8088, ports["http-hec"]) + require.Equal(t, 9997, ports["tcp-s2s"]) + } + + test(SplunkStandalone) + test(SplunkIndexer) + test(SplunkIngestor) + test(SplunkMonitoringConsole) +} diff --git a/pkg/splunk/enterprise/finalizers.go b/pkg/splunk/enterprise/finalizers.go index 574ccf093..9ecbd0136 100644 --- a/pkg/splunk/enterprise/finalizers.go +++ b/pkg/splunk/enterprise/finalizers.go @@ -56,6 +56,8 @@ func DeleteSplunkPvc(ctx context.Context, cr splcommon.MetaObject, c splcommon.C components = append(components, splcommon.ClusterManager) case "MonitoringConsole": components = append(components, "monitoring-console") + case "IngestorCluster": + components = append(components, "ingestor") default: scopedLog.Info("Skipping PVC removal") return nil diff --git a/pkg/splunk/enterprise/finalizers_test.go b/pkg/splunk/enterprise/finalizers_test.go index 92c46f1e0..369271200 100644 --- a/pkg/splunk/enterprise/finalizers_test.go +++ b/pkg/splunk/enterprise/finalizers_test.go @@ -54,6 +54,8 @@ func splunkDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func(spl component = "cluster-master" case "MonitoringConsole": component = "monitoring-console" + case "IngestorCluster": + component = "ingestor" } labelsB := map[string]string{ @@ -306,6 +308,19 @@ func splunkDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func(spl {MetaName: "*v4.IndexerCluster-test-stack1"}, {MetaName: "*v4.IndexerCluster-test-stack1"}, } + case "IngestorCluster": + mockCalls["Create"] = []spltest.MockFuncCall{ + {MetaName: "*v1.Secret-test-splunk-test-secret"}, + {MetaName: "*v1.ConfigMap-test-splunk-ingestor-stack1-configmap"}, + } + mockCalls["Get"] = []spltest.MockFuncCall{ + {MetaName: "*v1.Secret-test-splunk-test-secret"}, + {MetaName: "*v1.Secret-test-splunk-test-secret"}, + {MetaName: "*v1.Secret-test-splunk-test-secret"}, + {MetaName: "*v1.ConfigMap-test-splunk-ingestor-stack1-configmap"}, + {MetaName: "*v4.IngestorCluster-test-stack1"}, + {MetaName: "*v4.IngestorCluster-test-stack1"}, + } } } } @@ -340,6 +355,8 @@ func splunkPVCDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func( component = "cluster-manager" case "MonitoringConsole": component = "monitoring-console" + case "IngestorCluster": + component = "ingestor" } labels := map[string]string{ @@ -544,4 +561,15 @@ func TestDeleteSplunkPvcError(t *testing.T) { if err == nil { t.Errorf("Expected error") } + + // IngestorCluster + icCr := &enterpriseApi.IngestorCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IngestorCluster", + }, + } + err = DeleteSplunkPvc(ctx, icCr, c) + if err == nil { + t.Errorf("Expected error") + } } diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 2d135d84f..74b1b0a91 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "reflect" "regexp" "sort" "strconv" @@ -76,6 +77,9 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -241,6 +245,38 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { + // Bus config + busConfig := enterpriseApi.BusConfiguration{} + if cr.Spec.BusConfigurationRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.BusConfigurationRef.Namespace != "" { + ns = cr.Spec.BusConfigurationRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.BusConfigurationRef.Name, + Namespace: ns, + }, &busConfig) + if err != nil { + return result, err + } + } + + // If bus config is updated + if cr.Spec.BusConfigurationRef.Name != "" { + if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + + err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + if err != nil { + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + return result, err + } + + cr.Status.BusConfiguration = busConfig.Spec + } + } + //update MC //Retrieve monitoring console ref from CM Spec cmMonitoringConsoleConfigRef, err := RetrieveCMSpec(ctx, client, cr) @@ -329,6 +365,9 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // updates status after function completes cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -497,6 +536,38 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { + // Bus config + busConfig := enterpriseApi.BusConfiguration{} + if cr.Spec.BusConfigurationRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.BusConfigurationRef.Namespace != "" { + ns = cr.Spec.BusConfigurationRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.BusConfigurationRef.Name, + Namespace: ns, + }, &busConfig) + if err != nil { + return result, err + } + } + + // If bus config is updated + if cr.Spec.BusConfigurationRef.Name != "" { + if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + + err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + if err != nil { + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + return result, err + } + + cr.Status.BusConfiguration = busConfig.Spec + } + } + //update MC //Retrieve monitoring console ref from CM Spec cmMonitoringConsoleConfigRef, err := RetrieveCMSpec(ctx, client, cr) @@ -1078,6 +1149,7 @@ func validateIndexerClusterSpec(ctx context.Context, c splcommon.ControllerClien len(cr.Spec.ClusterMasterRef.Namespace) > 0 && cr.Spec.ClusterMasterRef.Namespace != cr.GetNamespace() { return fmt.Errorf("multisite cluster does not support cluster manager to be located in a different namespace") } + return validateCommonSplunkSpec(ctx, c, &cr.Spec.CommonSplunkSpec, cr) } @@ -1159,6 +1231,79 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri return extractedValue } +var newSplunkClientForBusPipeline = splclient.NewSplunkClient + +// Checks if only PullBus or Pipeline config changed, and updates the conf file if so +func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("handlePullBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + + // Only update config for pods that exist + readyReplicas := newCR.Status.ReadyReplicas + + // List all pods for this IndexerCluster StatefulSet + var updateErr error + for n := 0; n < int(readyReplicas); n++ { + memberName := GetSplunkStatefulsetPodName(SplunkIndexer, newCR.GetName(), int32(n)) + fqdnName := splcommon.GetServiceFQDN(newCR.GetNamespace(), fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIndexer, newCR.GetName(), true))) + adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, k8s, memberName, newCR.GetNamespace(), "password") + if err != nil { + return err + } + splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + + afterDelete := false + if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || + (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + updateErr = err + } + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + updateErr = err + } + afterDelete = true + } + + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, afterDelete) + + for _, pbVal := range busChangedFieldsOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + updateErr = err + } + } + + for _, pbVal := range busChangedFieldsInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + updateErr = err + } + } + + for _, field := range pipelineChangedFields { + if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { + updateErr = err + } + } + } + + // Do NOT restart Splunk + return updateErr +} + +// getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods +func getChangedBusFieldsForIndexer(busConfig *enterpriseApi.BusConfiguration, busConfigIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { + // Compare bus fields + oldPB := busConfigIndexerStatus.Status.BusConfiguration + newPB := busConfig.Spec + + // Push all bus fields + busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(&oldPB, &newPB, afterDelete) + + // Always set all pipeline fields, not just changed ones + pipelineChangedFields = pipelineConfig(true) + + return +} + // Tells if there is an image migration from 8.x.x to 9.x.x func imageUpdatedTo9(previousImage string, currentImage string) bool { // If there is no colon, version can't be detected @@ -1169,3 +1314,36 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { currentVersion := strings.Split(currentImage, ":")[1] return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } + +func pullBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (inputs, outputs [][]string) { + if oldBus.Type != newBus.Type || afterDelete { + inputs = append(inputs, []string{"remote_queue.type", newBus.Type}) + } + if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + } + if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + } + if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + } + if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + } + if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + } + inputs = append(inputs, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + ) + + outputs = inputs + outputs = append(outputs, + []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + ) + + return inputs, outputs +} diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 1f98f767d..446a23dcc 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -30,10 +30,12 @@ import ( "github.com/pkg/errors" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -1341,11 +1343,38 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", }, + Spec: enterpriseApi.IndexerClusterSpec{ + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + }, + }, } ctx := context.TODO() @@ -2017,3 +2046,508 @@ func TestImageUpdatedTo9(t *testing.T) { t.Errorf("Should not have detected an upgrade from 8 to 9, there is no version") } } + +func TestGetChangedBusFieldsForIndexer(t *testing.T) { + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + + newCR := &enterpriseApi.IndexerCluster{ + Spec: enterpriseApi.IndexerClusterSpec{ + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + }, + }, + } + + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, false) + assert.Equal(t, 8, len(busChangedFieldsInputs)) + assert.Equal(t, [][]string{ + {"remote_queue.type", busConfig.Spec.Type}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + }, busChangedFieldsInputs) + + assert.Equal(t, 10, len(busChangedFieldsOutputs)) + assert.Equal(t, [][]string{ + {"remote_queue.type", busConfig.Spec.Type}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + }, busChangedFieldsOutputs) + + assert.Equal(t, 5, len(pipelineChangedFields)) + assert.Equal(t, [][]string{ + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + }, pipelineChangedFields) +} + +func TestHandlePullBusChange(t *testing.T) { + // Object definitions + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + + newCR := &enterpriseApi.IndexerCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IndexerCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.IndexerClusterSpec{ + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + }, + }, + Status: enterpriseApi.IndexerClusterStatus{ + ReadyReplicas: 3, + }, + } + + pod0 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-indexer-0", + Namespace: "test", + Labels: map[string]string{ + "app.kubernetes.io/instance": "splunk-test-indexer", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "dummy-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secrets", + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true}, + }, + }, + } + + pod1 := pod0.DeepCopy() + pod1.ObjectMeta.Name = "splunk-test-indexer-1" + + pod2 := pod0.DeepCopy() + pod2.ObjectMeta.Name = "splunk-test-indexer-2" + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secrets", + Namespace: "test", + }, + Data: map[string][]byte{ + "password": []byte("dummy"), + }, + } + + // Mock pods + c := spltest.NewMockClient() + ctx := context.TODO() + c.Create(ctx, &busConfig) + c.Create(ctx, newCR) + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + + // Negative test case: secret not found + mgr := &indexerClusterPodManager{} + err := mgr.handlePullBusChange(ctx, newCR, busConfig, c) + assert.NotNil(t, err) + + // Mock secret + c.Create(ctx, secret) + + mockHTTPClient := &spltest.MockHTTPClient{} + + // Negative test case: failure in creating remote queue stanza + mgr = newTestPullBusPipelineManager(mockHTTPClient) + + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + assert.NotNil(t, err) + + // outputs.conf + propertyKVList := [][]string{ + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + } + propertyKVListOutputs := propertyKVList + + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}) + + body := buildFormBody(propertyKVListOutputs) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + + // Negative test case: failure in creating remote queue stanza + mgr = newTestPullBusPipelineManager(mockHTTPClient) + + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + assert.NotNil(t, err) + + // inputs.conf + body = buildFormBody(propertyKVList) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-inputs", body) + + // Negative test case: failure in updating remote queue stanza + mgr = newTestPullBusPipelineManager(mockHTTPClient) + + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + assert.NotNil(t, err) + + // default-mode.conf + propertyKVList = [][]string{ + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + } + + for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + podName := fmt.Sprintf("splunk-test-indexer-%d", i) + baseURL := fmt.Sprintf("https://%s.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName) + + for _, field := range propertyKVList { + req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0]))) + mockHTTPClient.AddHandler(req, 200, "", nil) + + updateURL := fmt.Sprintf("%s/%s", baseURL, field[0]) + req, _ = http.NewRequest("POST", updateURL, strings.NewReader(fmt.Sprintf("%s=%s", field[1], field[2]))) + mockHTTPClient.AddHandler(req, 200, "", nil) + } + } + + mgr = newTestPullBusPipelineManager(mockHTTPClient) + + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + assert.Nil(t, err) +} + +func buildFormBody(pairs [][]string) string { + var b strings.Builder + for i, kv := range pairs { + if len(kv) < 2 { + continue + } + fmt.Fprintf(&b, "%s=%s", kv[0], kv[1]) + if i < len(pairs)-1 { + b.WriteByte('&') + } + } + return b.String() +} + +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, replicas int32, confName, body string) { + for i := 0; i < int(replicas); i++ { + podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) + baseURL := fmt.Sprintf( + "https://%s.splunk-%s-indexer-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", + podName, cr.GetName(), cr.GetNamespace(), confName, + ) + + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) + mockHTTPClient.AddHandler(reqCreate, 200, "", nil) + + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) + mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) + } +} + +func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { + newSplunkClientForBusPipeline = func(uri, user, pass string) *splclient.SplunkClient { + return &splclient.SplunkClient{ + ManagementURI: uri, + Username: user, + Password: pass, + Client: mockHTTPClient, + } + } + return &indexerClusterPodManager{ + newSplunkClient: newSplunkClientForBusPipeline, + } +} + +func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, &busConfig) + + cm := &enterpriseApi.ClusterManager{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: "test", + }, + Status: enterpriseApi.ClusterManagerStatus{ + Phase: enterpriseApi.PhaseReady, + }, + } + c.Create(ctx, cm) + + cr := &enterpriseApi.IndexerCluster{ + TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.IndexerClusterSpec{ + Replicas: 1, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + Namespace: busConfig.Namespace, + }, + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ClusterManagerRef: corev1.ObjectReference{ + Name: "cm", + }, + Mock: true, + }, + }, + Status: enterpriseApi.IndexerClusterStatus{ + Phase: enterpriseApi.PhaseReady, + }, + } + c.Create(ctx, cr) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secrets", + Namespace: "test", + }, + Data: map[string][]byte{ + "password": []byte("dummy"), + }, + } + c.Create(ctx, secret) + + cmPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-cm-cluster-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secrets", + }}, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true}, + }, + }, + } + c.Create(ctx, cmPod) + + pod0 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-indexer-0", + Namespace: "test", + Labels: map[string]string{ + "app.kubernetes.io/instance": "splunk-test-indexer", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "dummy-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secrets", + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true}, + }, + }, + } + c.Create(ctx, pod0) + + replicas := int32(1) + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-indexer", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: 1, + ReadyReplicas: 1, + UpdatedReplicas: 1, + }, + } + c.Create(ctx, sts) + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-indexer-headless", + Namespace: "test", + }, + } + c.Create(ctx, svc) + + // outputs.conf + mockHTTPClient := &spltest.MockHTTPClient{} + + base := "https://splunk-test-indexer-0.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs" + queue := "remote_queue:test-queue" + + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+queue), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, queue), ""), 200, "", nil) + + // inputs.conf + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+queue), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, queue), ""), 200, "", nil) + + // default-mode.conf + pipelineFields := []string{ + "pipeline:remotequeueruleset", + "pipeline:ruleset", + "pipeline:remotequeuetyping", + "pipeline:remotequeueoutput", + "pipeline:typing", + } + for range pipelineFields { + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-default-mode", base), "name="), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-default-mode/", base), ""), 200, "", nil) + } + + res, err := ApplyIndexerCluster(ctx, c, cr) + assert.NotNil(t, res) + assert.Nil(t, err) +} + +func mustReq(method, url, body string) *http.Request { + var r *http.Request + var err error + if body != "" { + r, err = http.NewRequest(method, url, strings.NewReader(body)) + } else { + r, err = http.NewRequest(method, url, nil) + } + if err != nil { + panic(err) + } + return r +} diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go new file mode 100644 index 000000000..4f96f05bc --- /dev/null +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -0,0 +1,432 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/go-logr/logr" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + splutil "github.com/splunk/splunk-operator/pkg/splunk/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyIngestorCluster reconciles the state of an IngestorCluster custom resource +func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpriseApi.IngestorCluster) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("ApplyIngestorCluster") + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "IngestorCluster" + + // Validate and updates defaults for CR + err = validateIngestorClusterSpec(ctx, client, cr) + if err != nil { + eventPublisher.Warning(ctx, "validateIngestorClusterSpec", fmt.Sprintf("validate ingestor cluster spec failed %s", err.Error())) + scopedLog.Error(err, "Failed to validate ingestor cluster spec") + return result, err + } + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + } + cr.Status.Replicas = cr.Spec.Replicas + + // If needed, migrate the app framework status + err = checkAndMigrateAppDeployStatus(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig, true) + if err != nil { + return result, err + } + + // If app framework is configured, then do following things + // Initialize the S3 clients based on providers + // Check the status of apps on remote storage + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { + err = initAndCheckAppInfoStatus(ctx, client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + if err != nil { + eventPublisher.Warning(ctx, "initAndCheckAppInfoStatus", fmt.Sprintf("init and check app info status failed %s", err.Error())) + cr.Status.AppContext.IsDeploymentInProgress = false + return result, err + } + } + + cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-ingestor", cr.GetName()) + + // Create or update general config resources + namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIngestor) + if err != nil { + scopedLog.Error(err, "create or update general config failed", "error", err.Error()) + eventPublisher.Warning(ctx, "ApplySplunkConfig", fmt.Sprintf("create or update general config failed with error %s", err.Error())) + return result, err + } + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + if cr.Spec.MonitoringConsoleRef.Name != "" { + _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, make([]corev1.EnvVar, 0), false) + if err != nil { + eventPublisher.Warning(ctx, "ApplyMonitoringConsoleEnvConfigMap", fmt.Sprintf("create/update monitoring console config map failed %s", err.Error())) + return result, err + } + } + + // If this is the last of its kind getting deleted, + // remove the entry for this CR type from configMap or else + // just decrement the refCount for this CR type + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { + err = UpdateOrRemoveEntryFromConfigMapLocked(ctx, client, cr, SplunkIngestor) + if err != nil { + return result, err + } + } + + DeleteOwnerReferencesForResources(ctx, client, cr, SplunkIngestor) + + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + // Create or update a headless service for ingestor cluster + err = splctrl.ApplyService(ctx, client, getSplunkService(ctx, cr, &cr.Spec.CommonSplunkSpec, SplunkIngestor, true)) + if err != nil { + eventPublisher.Warning(ctx, "ApplyService", fmt.Sprintf("create/update headless service for ingestor cluster failed %s", err.Error())) + return result, err + } + + // Create or update a regular service for ingestor cluster + err = splctrl.ApplyService(ctx, client, getSplunkService(ctx, cr, &cr.Spec.CommonSplunkSpec, SplunkIngestor, false)) + if err != nil { + eventPublisher.Warning(ctx, "ApplyService", fmt.Sprintf("create/update service for ingestor cluster failed %s", err.Error())) + return result, err + } + + // If we are using App Framework and are scaling up, we should re-populate the + // config map with all the appSource entries + // This is done so that the new pods + // that come up now will have the complete list of all the apps and then can + // download and install all the apps + // If we are scaling down, just update the auxPhaseInfo list + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 && cr.Status.ReadyReplicas > 0 { + statefulsetName := GetSplunkStatefulsetName(SplunkIngestor, cr.GetName()) + + isStatefulSetScaling, err := splctrl.IsStatefulSetScalingUpOrDown(ctx, client, cr, statefulsetName, cr.Spec.Replicas) + if err != nil { + return result, err + } + + appStatusContext := cr.Status.AppContext + + switch isStatefulSetScaling { + case enterpriseApi.StatefulSetScalingUp: + // If we are indeed scaling up, then mark the deploy status to Pending + // for all the app sources so that we add all the app sources in config map + cr.Status.AppContext.IsDeploymentInProgress = true + + for appSrc := range appStatusContext.AppsSrcDeployStatus { + changeAppSrcDeployInfoStatus(ctx, appSrc, appStatusContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusComplete, enterpriseApi.DeployStatusPending) + changePhaseInfo(ctx, cr.Spec.Replicas, appSrc, appStatusContext.AppsSrcDeployStatus) + } + + // If we are scaling down, just delete the state auxPhaseInfo entries + case enterpriseApi.StatefulSetScalingDown: + for appSrc := range appStatusContext.AppsSrcDeployStatus { + removeStaleEntriesFromAuxPhaseInfo(ctx, cr.Spec.Replicas, appSrc, appStatusContext.AppsSrcDeployStatus) + } + } + } + + // Create or update statefulset for the ingestors + statefulSet, err := getIngestorStatefulSet(ctx, client, cr) + if err != nil { + eventPublisher.Warning(ctx, "getIngestorStatefulSet", fmt.Sprintf("get ingestor stateful set failed %s", err.Error())) + return result, err + } + + // Make changes to respective mc configmap when changing/removing mcRef from spec + err = validateMonitoringConsoleRef(ctx, client, statefulSet, make([]corev1.EnvVar, 0)) + if err != nil { + eventPublisher.Warning(ctx, "validateMonitoringConsoleRef", fmt.Sprintf("validate monitoring console reference failed %s", err.Error())) + return result, err + } + + mgr := splctrl.DefaultStatefulSetPodManager{} + phase, err := mgr.Update(ctx, client, statefulSet, cr.Spec.Replicas) + cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas + if err != nil { + eventPublisher.Warning(ctx, "update", fmt.Sprintf("update stateful set failed %s", err.Error())) + return result, err + } + cr.Status.Phase = phase + + // No need to requeue if everything is ready + if cr.Status.Phase == enterpriseApi.PhaseReady { + // Bus config + busConfig := enterpriseApi.BusConfiguration{} + if cr.Spec.BusConfigurationRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.BusConfigurationRef.Namespace != "" { + ns = cr.Spec.BusConfigurationRef.Namespace + } + err = client.Get(ctx, types.NamespacedName{ + Name: cr.Spec.BusConfigurationRef.Name, + Namespace: ns, + }, &busConfig) + if err != nil { + return result, err + } + } + + // If bus config is updated + if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + + err = mgr.handlePushBusChange(ctx, cr, busConfig, client) + if err != nil { + eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + return result, err + } + + cr.Status.BusConfiguration = busConfig.Spec + } + + // Upgrade fron automated MC to MC CRD + namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetNamespace())} + err = splctrl.DeleteReferencesToAutomatedMCIfExists(ctx, client, cr, namespacedName) + if err != nil { + eventPublisher.Warning(ctx, "DeleteReferencesToAutomatedMCIfExists", fmt.Sprintf("delete reference to automated MC if exists failed %s", err.Error())) + scopedLog.Error(err, "Error in deleting automated monitoring console resource") + } + if cr.Spec.MonitoringConsoleRef.Name != "" { + _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, make([]corev1.EnvVar, 0), true) + if err != nil { + eventPublisher.Warning(ctx, "ApplyMonitoringConsoleEnvConfigMap", fmt.Sprintf("apply monitoring console environment config map failed %s", err.Error())) + return result, err + } + } + + finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) + result = *finalResult + + // Add a splunk operator telemetry app + if cr.Spec.EtcVolumeStorageConfig.EphemeralStorage || !cr.Status.TelAppInstalled { + podExecClient := splutil.GetPodExecClient(client, cr, "") + err = addTelApp(ctx, podExecClient, cr.Spec.Replicas, cr) + if err != nil { + return result, err + } + + // Mark telemetry app as installed + cr.Status.TelAppInstalled = true + } + } + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} + +// validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong +func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error { + // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in an ingestor cluster + if cr.Spec.Replicas < 3 { + cr.Spec.Replicas = 3 + } + + if !reflect.DeepEqual(cr.Status.AppContext.AppFrameworkConfig, cr.Spec.AppFrameworkConfig) { + err := ValidateAppFrameworkSpec(ctx, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext, true, cr.GetObjectKind().GroupVersionKind().Kind) + if err != nil { + return err + } + } + + return validateCommonSplunkSpec(ctx, c, &cr.Spec.CommonSplunkSpec, cr) +} + +// getIngestorStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise ingestors +func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) (*appsv1.StatefulSet, error) { + ss, err := getSplunkStatefulSet(ctx, client, cr, &cr.Spec.CommonSplunkSpec, SplunkIngestor, cr.Spec.Replicas, []corev1.EnvVar{}) + if err != nil { + return nil, err + } + + // Setup App framework staging volume for apps + setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) + + return ss, nil +} + +// Checks if only Bus or Pipeline config changed, and updates the conf file if so +func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("handlePushBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + + // Only update config for pods that exist + readyReplicas := newCR.Status.Replicas + + // List all pods for this IngestorCluster StatefulSet + var updateErr error + for n := 0; n < int(readyReplicas); n++ { + memberName := GetSplunkStatefulsetPodName(SplunkIngestor, newCR.GetName(), int32(n)) + fqdnName := splcommon.GetServiceFQDN(newCR.GetNamespace(), fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIngestor, newCR.GetName(), true))) + adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, k8s, memberName, newCR.GetNamespace(), "password") + if err != nil { + return err + } + splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + + afterDelete := false + if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || + (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + updateErr = err + } + afterDelete = true + } + + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, afterDelete) + + for _, pbVal := range busChangedFields { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + updateErr = err + } + } + + for _, field := range pipelineChangedFields { + if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { + updateErr = err + } + } + } + + // Do NOT restart Splunk + return updateErr +} + +// getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods +func getChangedBusFieldsForIngestor(busConfig *enterpriseApi.BusConfiguration, busConfigIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { + oldPB := &busConfigIngestorStatus.Status.BusConfiguration + newPB := &busConfig.Spec + + // Push changed bus fields + busChangedFields = pushBusChanged(oldPB, newPB, afterDelete) + + // Always changed pipeline fields + pipelineChangedFields = pipelineConfig(false) + + return +} + +type ingestorClusterPodManager struct { + log logr.Logger + cr *enterpriseApi.IngestorCluster + secrets *corev1.Secret + newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient +} + +// newIngestorClusterPodManager function to create pod manager this is added to write unit test case +var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) ingestorClusterPodManager { + return ingestorClusterPodManager{ + log: log, + cr: cr, + secrets: secret, + newSplunkClient: newSplunkClient, + } +} + +func pipelineConfig(isIndexer bool) (output [][]string) { + output = append(output, + []string{"pipeline:remotequeueruleset", "disabled", "false"}, + []string{"pipeline:ruleset", "disabled", "true"}, + []string{"pipeline:remotequeuetyping", "disabled", "false"}, + []string{"pipeline:remotequeueoutput", "disabled", "false"}, + []string{"pipeline:typing", "disabled", "true"}, + ) + if !isIndexer { + output = append(output, []string{"pipeline:indexerPipe", "disabled", "true"}) + } + return output +} + +func pushBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (output [][]string) { + if oldBus.Type != newBus.Type || afterDelete { + output = append(output, []string{"remote_queue.type", newBus.Type}) + } + if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + } + if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + } + if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + } + if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + } + if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + } + + output = append(output, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}) + + return output +} diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go new file mode 100644 index 000000000..bee3df4d6 --- /dev/null +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -0,0 +1,654 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/go-logr/logr" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + spltest "github.com/splunk/splunk-operator/pkg/splunk/test" + splutil "github.com/splunk/splunk-operator/pkg/splunk/util" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + GetReadinessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) + return fileLocation + } + GetLivenessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) + return fileLocation + } + GetStartupScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) + return fileLocation + } +} + +func TestApplyIngestorCluster(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + busConfig := &enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, busConfig) + + cr := &enterpriseApi.IngestorCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IngestorCluster", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.IngestorClusterSpec{ + Replicas: 3, + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Mock: true, + }, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + Namespace: busConfig.Namespace, + }, + }, + } + c.Create(ctx, cr) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secrets", + Namespace: "test", + }, + Data: map[string][]byte{"password": []byte("dummy")}, + } + c.Create(ctx, secret) + + probeConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-probe-configmap", + Namespace: "test", + }, + } + c.Create(ctx, probeConfigMap) + + replicas := int32(3) + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-ingestor", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/instance": "splunk-test-ingestor", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/instance": "splunk-test-ingestor", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk-test-ingestor", + Image: "splunk/splunk:latest", + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8080, + }, + }, + }, + }, + }, + }, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + UpdatedReplicas: replicas, + CurrentRevision: "v1", + UpdateRevision: "v1", + }, + } + c.Create(ctx, sts) + + pod0 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-ingestor-0", + Namespace: "test", + Labels: map[string]string{ + "app.kubernetes.io/instance": "splunk-test-ingestor", + "controller-revision-hash": "v1", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "dummy-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secrets", + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true}, + }, + }, + } + + pod1 := pod0.DeepCopy() + pod1.ObjectMeta.Name = "splunk-test-ingestor-1" + + pod2 := pod0.DeepCopy() + pod2.ObjectMeta.Name = "splunk-test-ingestor-2" + + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + + // ApplyIngestorCluster + cr.Spec.Replicas = replicas + cr.Status.ReadyReplicas = cr.Spec.Replicas + + result, err := ApplyIngestorCluster(ctx, c, cr) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, cr.Status.Phase) + + // Ensure stored StatefulSet status reflects readiness after any reconcile modifications + fetched := &appsv1.StatefulSet{} + _ = c.Get(ctx, types.NamespacedName{Name: "splunk-test-ingestor", Namespace: "test"}, fetched) + fetched.Status.Replicas = replicas + fetched.Status.ReadyReplicas = replicas + fetched.Status.UpdatedReplicas = replicas + if fetched.Status.UpdateRevision == "" { + fetched.Status.UpdateRevision = "v1" + } + c.Update(ctx, fetched) + + // Guarantee all pods have matching revision label + for _, pn := range []string{"splunk-test-ingestor-0", "splunk-test-ingestor-1", "splunk-test-ingestor-2"} { + p := &corev1.Pod{} + if err := c.Get(ctx, types.NamespacedName{Name: pn, Namespace: "test"}, p); err == nil { + if p.Labels == nil { + p.Labels = map[string]string{} + } + p.Labels["controller-revision-hash"] = fetched.Status.UpdateRevision + c.Update(ctx, p) + } + } + + // outputs.conf + origNew := newIngestorClusterPodManager + mockHTTPClient := &spltest.MockHTTPClient{} + newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc) ingestorClusterPodManager { + return ingestorClusterPodManager{ + log: l, cr: cr, secrets: secret, + newSplunkClient: func(uri, user, pass string) *splclient.SplunkClient { + return &splclient.SplunkClient{ManagementURI: uri, Username: user, Password: pass, Client: mockHTTPClient} + }, + } + } + defer func() { newIngestorClusterPodManager = origNew }() + + propertyKVList := [][]string{ + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + } + + body := buildFormBody(propertyKVList) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, busConfig, cr.Status.ReadyReplicas, "conf-outputs", body) + + // default-mode.conf + propertyKVList = [][]string{ + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + {"pipeline:indexerPipe", "disabled", "true"}, + } + + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { + podName := fmt.Sprintf("splunk-test-ingestor-%d", i) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, cr.GetName(), cr.GetNamespace()) + + for _, field := range propertyKVList { + req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0]))) + mockHTTPClient.AddHandler(req, 200, "", nil) + + updateURL := fmt.Sprintf("%s/%s", baseURL, field[0]) + req, _ = http.NewRequest("POST", updateURL, strings.NewReader(fmt.Sprintf("%s=%s", field[1], field[2]))) + mockHTTPClient.AddHandler(req, 200, "", nil) + } + } + + // Second reconcile should now yield Ready + cr.Status.TelAppInstalled = true + result, err = ApplyIngestorCluster(ctx, c, cr) + assert.NoError(t, err) + assert.Equal(t, enterpriseApi.PhaseReady, cr.Status.Phase) +} + +func TestGetIngestorStatefulSet(t *testing.T) { + // Object definitions + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + + cr := enterpriseApi.IngestorCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IngestorCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.IngestorClusterSpec{ + Replicas: 2, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + }, + }, + } + + ctx := context.TODO() + + c := spltest.NewMockClient() + _, err := splutil.ApplyNamespaceScopedSecretObject(ctx, c, "test") + if err != nil { + t.Errorf("Failed to create namespace scoped object") + } + + test := func(want string) { + f := func() (interface{}, error) { + if err := validateIngestorClusterSpec(ctx, c, &cr); err != nil { + t.Errorf("validateIngestorClusterSpec() returned error: %v", err) + } + return getIngestorStatefulSet(ctx, c, &cr) + } + configTester(t, "getIngestorStatefulSet()", f, want) + } + + // Define additional service port in CR and verify the statefulset has the new port + cr.Spec.ServiceTemplate.Spec.Ports = []corev1.ServicePort{{Name: "user-defined", Port: 32000, Protocol: "UDP"}} + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) + + // Create a service account + current := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "defaults", + Namespace: "test", + }, + } + _ = splutil.CreateResource(ctx, c, ¤t) + cr.Spec.ServiceAccount = "defaults" + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) + + // Add extraEnv + cr.Spec.CommonSplunkSpec.ExtraEnv = []corev1.EnvVar{ + { + Name: "TEST_ENV_VAR", + Value: "test_value", + }, + } + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) + + // Add additional label to cr metadata to transfer to the statefulset + cr.ObjectMeta.Labels = make(map[string]string) + cr.ObjectMeta.Labels["app.kubernetes.io/test-extra-label"] = "test-extra-label-value" + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) +} + +func TestGetChangedBusFieldsForIngestor(t *testing.T) { + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + + newCR := &enterpriseApi.IngestorCluster{ + Spec: enterpriseApi.IngestorClusterSpec{ + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + }, + }, + Status: enterpriseApi.IngestorClusterStatus{}, + } + + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, false) + + assert.Equal(t, 10, len(busChangedFields)) + assert.Equal(t, [][]string{ + {"remote_queue.type", busConfig.Spec.Type}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + }, busChangedFields) + + assert.Equal(t, 6, len(pipelineChangedFields)) + assert.Equal(t, [][]string{ + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + {"pipeline:indexerPipe", "disabled", "true"}, + }, pipelineChangedFields) +} + +func TestHandlePushBusChange(t *testing.T) { + // Object definitions + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + + newCR := &enterpriseApi.IngestorCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IngestorCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.IngestorClusterSpec{ + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + }, + }, + Status: enterpriseApi.IngestorClusterStatus{ + Replicas: 3, + ReadyReplicas: 3, + }, + } + + pod0 := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-ingestor-0", + Namespace: "test", + Labels: map[string]string{ + "app.kubernetes.io/instance": "splunk-test-ingestor", + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "dummy-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "test-secrets", + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true}, + }, + }, + } + + pod1 := pod0.DeepCopy() + pod1.ObjectMeta.Name = "splunk-test-ingestor-1" + + pod2 := pod0.DeepCopy() + pod2.ObjectMeta.Name = "splunk-test-ingestor-2" + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secrets", + Namespace: "test", + }, + Data: map[string][]byte{ + "password": []byte("dummy"), + }, + } + + // Mock pods + c := spltest.NewMockClient() + ctx := context.TODO() + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + + // Negative test case: secret not found + mgr := &ingestorClusterPodManager{} + + err := mgr.handlePushBusChange(ctx, newCR, busConfig, c) + assert.NotNil(t, err) + + // Mock secret + c.Create(ctx, secret) + + mockHTTPClient := &spltest.MockHTTPClient{} + + // Negative test case: failure in creating remote queue stanza + mgr = newTestPushBusPipelineManager(mockHTTPClient) + + err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + assert.NotNil(t, err) + + // outputs.conf + propertyKVList := [][]string{ + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + } + + body := buildFormBody(propertyKVList) + addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + + // Negative test case: failure in creating remote queue stanza + mgr = newTestPushBusPipelineManager(mockHTTPClient) + + err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + assert.NotNil(t, err) + + // default-mode.conf + propertyKVList = [][]string{ + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + {"pipeline:indexerPipe", "disabled", "true"}, + } + + for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + podName := fmt.Sprintf("splunk-test-ingestor-%d", i) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, newCR.GetName(), newCR.GetNamespace()) + + for _, field := range propertyKVList { + req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0]))) + mockHTTPClient.AddHandler(req, 200, "", nil) + + updateURL := fmt.Sprintf("%s/%s", baseURL, field[0]) + req, _ = http.NewRequest("POST", updateURL, strings.NewReader(fmt.Sprintf("%s=%s", field[1], field[2]))) + mockHTTPClient.AddHandler(req, 200, "", nil) + } + } + + mgr = newTestPushBusPipelineManager(mockHTTPClient) + + err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + assert.Nil(t, err) +} + +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, busConfig *enterpriseApi.BusConfiguration, replicas int32, confName, body string) { + for i := 0; i < int(replicas); i++ { + podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) + baseURL := fmt.Sprintf( + "https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", + podName, cr.GetName(), cr.GetNamespace(), confName, + ) + + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) + mockHTTPClient.AddHandler(reqCreate, 200, "", nil) + + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) + mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) + } +} + +func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForPushBusPipeline := func(uri, user, pass string) *splclient.SplunkClient { + return &splclient.SplunkClient{ + ManagementURI: uri, + Username: user, + Password: pass, + Client: mockHTTPClient, + } + } + return &ingestorClusterPodManager{ + newSplunkClient: newSplunkClientForPushBusPipeline, + } +} diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 7b34c5eeb..6ebd3df34 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -60,6 +60,12 @@ const ( // SplunkIndexer may be a standalone or clustered indexer peer SplunkIndexer InstanceType = "indexer" + // SplunkIngestor may be a standalone or clustered ingestion peer + SplunkIngestor InstanceType = "ingestor" + + // SplunkBusConfiguration is the bus configuration instance + SplunkBusConfiguration InstanceType = "busconfiguration" + // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -244,6 +250,8 @@ func (instanceType InstanceType) ToRole() string { role = splcommon.LicenseManagerRole case SplunkMonitoringConsole: role = "splunk_monitor" + case SplunkIngestor: + role = "splunk_standalone" // TODO: change this to a new role when we have one (splunk_ingestor) } return role } @@ -270,6 +278,8 @@ func (instanceType InstanceType) ToKind() string { kind = "license-manager" case SplunkMonitoringConsole: kind = "monitoring-console" + case SplunkIngestor: + kind = "ingestor" } return kind } @@ -282,6 +292,10 @@ func KindToInstanceString(kind string) string { return SplunkClusterMaster.ToString() case "IndexerCluster": return SplunkIndexer.ToString() + case "IngestorCluster": + return SplunkIngestor.ToString() + case "BusConfiguration": + return SplunkBusConfiguration.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/types_test.go b/pkg/splunk/enterprise/types_test.go index edde72ca8..1f30bd500 100644 --- a/pkg/splunk/enterprise/types_test.go +++ b/pkg/splunk/enterprise/types_test.go @@ -39,6 +39,7 @@ func TestInstanceType(t *testing.T) { SplunkLicenseMaster: splcommon.LicenseManagerRole, SplunkLicenseManager: splcommon.LicenseManagerRole, SplunkMonitoringConsole: "splunk_monitor", + SplunkIngestor: "splunk_standalone", // TODO: change this to a new role when we have one (splunk_ingestor) } for key, val := range instMap { if key.ToRole() != val { @@ -57,6 +58,7 @@ func TestInstanceType(t *testing.T) { SplunkLicenseMaster: splcommon.LicenseManager, SplunkLicenseManager: "license-manager", SplunkMonitoringConsole: "monitoring-console", + SplunkIngestor: "ingestor", } for key, val := range instMap { if key.ToKind() != val { @@ -65,3 +67,29 @@ func TestInstanceType(t *testing.T) { } } + +func TestKindToInstanceString(t *testing.T) { + tests := []struct { + kind string + expected string + }{ + {"ClusterManager", "cluster-manager"}, + {"ClusterMaster", "cluster-master"}, + {"IndexerCluster", "indexer"}, + {"IngestorCluster", "ingestor"}, + {"LicenseManager", "license-manager"}, + {"LicenseMaster", "license-master"}, + {"MonitoringConsole", "monitoring-console"}, + {"SearchHeadCluster", "search-head"}, + {"SearchHead", "search-head"}, + {"Standalone", "standalone"}, + {"UnknownKind", ""}, + } + + for _, tt := range tests { + got := KindToInstanceString(tt.kind) + if got != tt.expected { + t.Errorf("KindToInstanceString(%q) = %q; want %q", tt.kind, got, tt.expected) + } + } +} diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 701c4beda..3df285264 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2274,6 +2274,34 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.Standalone).Status.DeepCopyInto(&latestStdlnCR.Status) return latestStdlnCR, nil + case "IngestorCluster": + latestIngCR := &enterpriseApi.IngestorCluster{} + err = client.Get(ctx, namespacedName, latestIngCR) + if err != nil { + return nil, err + } + + origCR.(*enterpriseApi.IngestorCluster).Status.Message = "" + if (crError != nil) && ((*crError) != nil) { + origCR.(*enterpriseApi.IngestorCluster).Status.Message = (*crError).Error() + } + origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) + return latestIngCR, nil + + case "BusConfiguration": + latestBusCR := &enterpriseApi.BusConfiguration{} + err = client.Get(ctx, namespacedName, latestBusCR) + if err != nil { + return nil, err + } + + origCR.(*enterpriseApi.BusConfiguration).Status.Message = "" + if (crError != nil) && ((*crError) != nil) { + origCR.(*enterpriseApi.BusConfiguration).Status.Message = (*crError).Error() + } + origCR.(*enterpriseApi.BusConfiguration).Status.DeepCopyInto(&latestBusCR.Status) + return latestBusCR, nil + case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} err = client.Get(ctx, namespacedName, latestLmCR) @@ -2449,6 +2477,8 @@ func getApplicablePodNameForK8Probes(cr splcommon.MetaObject, ordinalIdx int32) podType = "cluster-manager" case "MonitoringConsole": podType = "monitoring-console" + case "IngestorCluster": + podType = "ingestor" } return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx) } diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 5e69bfdb7..dedb3545a 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2687,7 +2687,8 @@ func TestFetchCurrentCRWithStatusUpdate(t *testing.T) { WithStatusSubresource(&enterpriseApi.IndexerCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). WithStatusSubresource(&enterpriseApiV3.LicenseMaster{}). - WithStatusSubresource(&enterpriseApiV3.ClusterMaster{}) + WithStatusSubresource(&enterpriseApiV3.ClusterMaster{}). + WithStatusSubresource(&enterpriseApi.IngestorCluster{}) c := builder.Build() ctx := context.TODO() @@ -2923,6 +2924,43 @@ func TestFetchCurrentCRWithStatusUpdate(t *testing.T) { } else if receivedCR.(*enterpriseApi.SearchHeadCluster).Status.Message != "testerror" { t.Errorf("Failed to update error message") } + + // IngestorCluster: should return a valid CR + ic := enterpriseApi.IngestorCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IngestorCluster", + APIVersion: "enterprise.splunk.com/v4", + }, + + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: enterpriseApi.IngestorClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + Status: enterpriseApi.IngestorClusterStatus{ + ReadyReplicas: 3, + }, + } + + // When the CR is available, should be able to fetch it. + err = c.Create(ctx, &ic) + if err != nil { + t.Errorf("ingestor CR creation failed.") + } + + receivedCR, err = fetchCurrentCRWithStatusUpdate(ctx, c, &ic, nil) + if err != nil { + t.Errorf("Expected a valid CR without error, but got the error %v", err) + } else if receivedCR == nil || receivedCR.GroupVersionKind().Kind != "IngestorCluster" { + t.Errorf("Failed to fetch the CR") + } } // func getApplicablePodNameForK8Probes(t *testing.T) { @@ -2984,6 +3022,13 @@ func TestGetApplicablePodNameForK8Probes(t *testing.T) { if expectedPodName != returnedPodName { t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName) } + + cr.TypeMeta.Kind = "IngestorCluster" + expectedPodName = "splunk-stack1-ingestor-0" + returnedPodName = getApplicablePodNameForK8Probes(&cr, podID) + if expectedPodName != returnedPodName { + t.Errorf("Unable to fetch correct pod name. Expected %s, returned %s", expectedPodName, returnedPodName) + } } func TestCheckCmRemainingReferences(t *testing.T) { @@ -3258,7 +3303,8 @@ func TestGetCurrentImage(t *testing.T) { WithStatusSubresource(&enterpriseApi.Standalone{}). WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). - WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) + WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). + WithStatusSubresource(&enterpriseApi.IngestorCluster{}) client := builder.Build() client.Create(ctx, ¤t) _, err := ApplyClusterManager(ctx, client, ¤t) diff --git a/pkg/splunk/test/controller.go b/pkg/splunk/test/controller.go index aa0dfb4b5..b1adff420 100644 --- a/pkg/splunk/test/controller.go +++ b/pkg/splunk/test/controller.go @@ -368,13 +368,54 @@ func (c MockClient) List(ctx context.Context, obj client.ObjectList, opts ...cli ListOpts: opts, ObjList: obj, }) - listObj := c.ListObj - if listObj != nil { - srcObj := listObj - copyMockObjectList(&obj, &srcObj) - return nil + + // Only handle PodList for this test + podList, ok := obj.(*corev1.PodList) + if !ok { + // fallback to old logic + listObj := c.ListObj + if listObj != nil { + srcObj := listObj + copyMockObjectList(&obj, &srcObj) + return nil + } + return c.NotFoundError + } + + // Gather label selector and namespace from opts + var ns string + var matchLabels map[string]string + for _, opt := range opts { + switch v := opt.(type) { + case client.InNamespace: + ns = string(v) + case client.MatchingLabels: + matchLabels = v + } } - return c.NotFoundError + + // Filter pods in State + for _, v := range c.State { + pod, ok := v.(*corev1.Pod) + if !ok { + continue + } + if ns != "" && pod.Namespace != ns { + continue + } + matches := true + for k, val := range matchLabels { + if pod.Labels[k] != val { + matches = false + break + } + } + if matches { + podList.Items = append(podList.Items, *pod) + } + } + + return nil } // Create returns mock client's Err field diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go index cd241e2eb..ba0162ffa 100644 --- a/test/appframework_aws/c3/appframework_aws_test.go +++ b/test/appframework_aws/c3/appframework_aws_test.go @@ -3182,7 +3182,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go index e00da4428..afc7abae6 100644 --- a/test/appframework_aws/c3/manager_appframework_test.go +++ b/test/appframework_aws/c3/manager_appframework_test.go @@ -355,7 +355,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3324,7 +3324,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go index a79d4941a..0622700a4 100644 --- a/test/appframework_az/c3/appframework_azure_test.go +++ b/test/appframework_az/c3/appframework_azure_test.go @@ -993,7 +993,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go index 2422d3e85..2a0af0b3b 100644 --- a/test/appframework_az/c3/manager_appframework_azure_test.go +++ b/test/appframework_az/c3/manager_appframework_azure_test.go @@ -991,7 +991,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go index 02b7c81be..02ad17cfb 100644 --- a/test/appframework_gcp/c3/manager_appframework_test.go +++ b/test/appframework_gcp/c3/manager_appframework_test.go @@ -361,7 +361,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3327,7 +3327,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go new file mode 100644 index 000000000..c040802f8 --- /dev/null +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -0,0 +1,160 @@ +// Copyright (c) 2018-2025 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package indingsep + +import ( + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/test/testenv" +) + +const ( + // PollInterval specifies the polling interval + PollInterval = 5 * time.Second + + // ConsistentPollInterval is the interval to use to consistently check a state is stable + ConsistentPollInterval = 200 * time.Millisecond + ConsistentDuration = 2000 * time.Millisecond +) + +var ( + testenvInstance *testenv.TestEnv + testSuiteName = "indingsep-" + testenv.RandomDNSName(3) + + bus = enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://test-bucket/smartbus-test", + DeadLetterQueueName: "test-dead-letter-queue", + }, + } + serviceAccountName = "index-ingest-sa" + + inputs = []string{ + "[remote_queue:test-queue]", + "remote_queue.type = sqs_smartbus", + "remote_queue.sqs_smartbus.auth_region = us-west-2", + "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.retry_policy = max_count", + "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} + outputs = append(inputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 5s") + defaultsAll = []string{ + "[pipeline:remotequeueruleset]\ndisabled = false", + "[pipeline:ruleset]\ndisabled = true", + "[pipeline:remotequeuetyping]\ndisabled = false", + "[pipeline:remotequeueoutput]\ndisabled = false", + "[pipeline:typing]\ndisabled = true", + } + defaultsIngest = append(defaultsAll, "[pipeline:indexerPipe]\ndisabled = true") + + awsEnvVars = []string{ + "AWS_REGION=us-west-2", + "AWS_DEFAULT_REGION=us-west-2", + "AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token", + "AWS_ROLE_ARN=arn:aws:iam::", + "AWS_STS_REGIONAL_ENDPOINTS=regional", + } + + updateBus = enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue-updated", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://test-bucket-updated/smartbus-test", + DeadLetterQueueName: "test-dead-letter-queue-updated", + }, + } + + updatedInputs = []string{ + "[remote_queue:test-queue-updated]", + "remote_queue.type = sqs_smartbus", + "remote_queue.sqs_smartbus.auth_region = us-west-2", + "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue-updated", + "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket-updated/smartbus-test", + "remote_queue.sqs_smartbus.retry_policy = max", + "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} + updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") + updatedDefaultsAll = []string{ + "[pipeline:remotequeueruleset]\ndisabled = false", + "[pipeline:ruleset]\ndisabled = false", + "[pipeline:remotequeuetyping]\ndisabled = false", + "[pipeline:remotequeueoutput]\ndisabled = false", + "[pipeline:typing]\ndisabled = true", + } + updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") + + inputsShouldNotContain = []string{ + "[remote_queue:test-queue]", + "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.retry_policy = max_count", + "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} + outputsShouldNotContain = append(inputs, "remote_queue.sqs_smartbus.send_interval = 5s") + + testDataS3Bucket = os.Getenv("TEST_BUCKET") + testS3Bucket = os.Getenv("TEST_INDEXES_S3_BUCKET") + currDir, _ = os.Getwd() + downloadDirV1 = filepath.Join(currDir, "icappfwV1-"+testenv.RandomDNSName(4)) + appSourceVolumeName = "appframework-test-volume-" + testenv.RandomDNSName(3) + s3TestDir = "icappfw-" + testenv.RandomDNSName(4) + appListV1 = testenv.BasicApps + s3AppDirV1 = testenv.AppLocationV1 +) + +// TestBasic is the main entry point +func TestBasic(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Running "+testSuiteName) +} + +var _ = BeforeSuite(func() { + var err error + testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) + Expect(err).ToNot(HaveOccurred()) + + appListV1 = testenv.BasicApps + appFileList := testenv.GetAppFileList(appListV1) + + // Download V1 Apps from S3 + err = testenv.DownloadFilesFromS3(testDataS3Bucket, s3AppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download V1 app files") +}) + +var _ = AfterSuite(func() { + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + err := os.RemoveAll(downloadDirV1) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V1 app files") +}) diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go new file mode 100644 index 000000000..8bccddb47 --- /dev/null +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -0,0 +1,504 @@ +// Copyright (c) 2018-2025 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package indingsep + +import ( + "context" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/onsi/ginkgo/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + + "github.com/splunk/splunk-operator/test/testenv" +) + +var _ = Describe("indingsep test", func() { + + var testcaseEnvInst *testenv.TestCaseEnv + var deployment *testenv.Deployment + + var cmSpec enterpriseApi.ClusterManagerSpec + + ctx := context.TODO() + + BeforeEach(func() { + var err error + + name := fmt.Sprintf("%s-%s", testenvInstance.GetName(), testenv.RandomDNSName(3)) + testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name) + Expect(err).To(Succeed(), "Unable to create testcaseenv") + + deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + + cmSpec = enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: testcaseEnvInst.GetSplunkImage(), + }, + }, + } + }) + + AfterEach(func() { + if types.SpecState(CurrentSpecReport().State) == types.SpecStateFailed { + testcaseEnvInst.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + + if testcaseEnvInst != nil { + Expect(testcaseEnvInst.Teardown()).ToNot(HaveOccurred()) + } + }) + + Context("Ingestor and Indexer deployment", func() { + It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers", func() { + // Create Service Account + testcaseEnvInst.Log.Info("Create Service Account") + testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + + // Deploy Ingestor Cluster + testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + + // Deploy Cluster Manager + testcaseEnvInst.Log.Info("Deploy Cluster Manager") + _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) + Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + + // Deploy Indexer Cluster + testcaseEnvInst.Log.Info("Deploy Indexer Cluster") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") + + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Cluster Manager is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Delete the Indexer Cluster + idxc := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", idxc) + Expect(err).To(Succeed(), "Unable to get Indexer Cluster instance", "Indexer Cluster Name", idxc) + err = deployment.DeleteCR(ctx, idxc) + Expect(err).To(Succeed(), "Unable to delete Indexer Cluster instance", "Indexer Cluster Name", idxc) + + // Delete the Ingestor Cluster + ingest := &enterpriseApi.IngestorCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) + Expect(err).To(Succeed(), "Unable to get Ingestor Cluster instance", "Ingestor Cluster Name", ingest) + err = deployment.DeleteCR(ctx, ingest) + Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) + + // Delete the Bus Configuration + busConfiguration := &enterpriseApi.BusConfiguration{} + err = deployment.GetInstance(ctx, "bus-config", busConfiguration) + Expect(err).To(Succeed(), "Unable to get Bus Configuration instance", "Bus Configuration Name", busConfiguration) + err = deployment.DeleteCR(ctx, busConfiguration) + Expect(err).To(Succeed(), "Unable to delete Bus Configuration", "Bus Configuration Name", busConfiguration) + }) + }) + + Context("Ingestor and Indexer deployment", func() { + It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers with additional configurations", func() { + // Create Service Account + testcaseEnvInst.Log.Info("Create Service Account") + testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + + // Upload apps to S3 + testcaseEnvInst.Log.Info("Upload apps to S3") + appFileList := testenv.GetAppFileList(appListV1) + _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") + + // Deploy Ingestor Cluster with additional configurations (similar to standalone app framework test) + appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, s3TestDir, 60) + appFrameworkSpec.MaxConcurrentAppDownloads = uint64(5) + ic := &enterpriseApi.IngestorCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: deployment.GetName() + "-ingest", + Namespace: testcaseEnvInst.GetName(), + }, + Spec: enterpriseApi.IngestorClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ServiceAccount: serviceAccountName, + LivenessInitialDelaySeconds: 600, + ReadinessInitialDelaySeconds: 50, + StartupProbe: &enterpriseApi.Probe{ + InitialDelaySeconds: 40, + TimeoutSeconds: 30, + PeriodSeconds: 30, + FailureThreshold: 12, + }, + LivenessProbe: &enterpriseApi.Probe{ + InitialDelaySeconds: 400, + TimeoutSeconds: 30, + PeriodSeconds: 30, + FailureThreshold: 12, + }, + ReadinessProbe: &enterpriseApi.Probe{ + InitialDelaySeconds: 20, + TimeoutSeconds: 30, + PeriodSeconds: 30, + FailureThreshold: 12, + }, + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: testcaseEnvInst.GetSplunkImage(), + }, + }, + BusConfigurationRef: v1.ObjectReference{Name: bc.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, + }, + } + + testcaseEnvInst.Log.Info("Deploy Ingestor Cluster with additional configurations") + _, err = deployment.DeployIngestorClusterWithAdditionalConfiguration(ctx, ic) + Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Verify Ingestor Cluster Pods have apps installed + testcaseEnvInst.Log.Info("Verify Ingestor Cluster Pods have apps installed") + ingestorPod := []string{fmt.Sprintf(testenv.IngestorPod, deployment.GetName()+"-ingest", 0)} + ingestorAppSourceInfo := testenv.AppSourceInfo{ + CrKind: ic.Kind, + CrName: ic.Name, + CrAppSourceName: appSourceName, + CrPod: ingestorPod, + CrAppVersion: "V1", + CrAppScope: enterpriseApi.ScopeLocal, + CrAppList: testenv.BasicApps, + CrAppFileList: testenv.GetAppFileList(testenv.BasicApps), + CrReplicas: 3, + } + allAppSourceInfo := []testenv.AppSourceInfo{ingestorAppSourceInfo} + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify probe configuration + testcaseEnvInst.Log.Info("Get config map for probes") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for probes", "ConfigMap", ConfigMapName) + testcaseEnvInst.Log.Info("Verify probe configurations on Ingestor pods") + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + }) + }) + + Context("Ingestor and Indexer deployment", func() { + It("indingsep, integration, indingsep: Splunk Operator can deploy Ingestors and Indexers with correct setup", func() { + // Create Service Account + testcaseEnvInst.Log.Info("Create Service Account") + testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + + // Deploy Ingestor Cluster + testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + + // Deploy Cluster Manager + testcaseEnvInst.Log.Info("Deploy Cluster Manager") + _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) + Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + + // Deploy Indexer Cluster + testcaseEnvInst.Log.Info("Deploy Indexer Cluster") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") + + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Cluster Manager is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Get instance of current Ingestor Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") + ingest := &enterpriseApi.IngestorCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) + Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") + + // Verify Ingestor Cluster Status + testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") + Expect(ingest.Status.BusConfiguration).To(Equal(bus), "Ingestor bus configuration status is not the same as provided as input") + + // Get instance of current Indexer Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") + index := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + + // Verify Indexer Cluster Status + testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") + Expect(index.Status.BusConfiguration).To(Equal(bus), "Indexer bus configuration status is not the same as provided as input") + + // Verify conf files + testcaseEnvInst.Log.Info("Verify conf files") + pods := testenv.DumpGetPods(deployment.GetName()) + for _, pod := range pods { + defaultsConf := "" + + if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { + // Verify outputs.conf + testcaseEnvInst.Log.Info("Verify outputs.conf") + outputsPath := "opt/splunk/etc/system/local/outputs.conf" + outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") + testenv.ValidateContent(outputsConf, outputs, true) + + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" + defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") + testenv.ValidateContent(defaultsConf, defaultsAll, true) + + // Verify AWS env variables + testcaseEnvInst.Log.Info("Verify AWS env variables") + envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") + testenv.ValidateContent(envVars, awsEnvVars, true) + } + + if strings.Contains(pod, "ingest") { + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + testenv.ValidateContent(defaultsConf, defaultsIngest, true) + } else if strings.Contains(pod, "idxc") { + // Verify inputs.conf + testcaseEnvInst.Log.Info("Verify inputs.conf") + inputsPath := "opt/splunk/etc/system/local/inputs.conf" + inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") + testenv.ValidateContent(inputsConf, inputs, true) + } + } + }) + }) + + Context("Ingestor and Indexer deployment", func() { + It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { + // Create Service Account + testcaseEnvInst.Log.Info("Create Service Account") + testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + + // Deploy Ingestor Cluster + testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + + // Deploy Cluster Manager + testcaseEnvInst.Log.Info("Deploy Cluster Manager") + _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) + Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + + // Deploy Indexer Cluster + testcaseEnvInst.Log.Info("Deploy Indexer Cluster") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") + + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Cluster Manager is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Get instance of current Bus Configuration CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Bus Configuration CR with latest config") + bus := &enterpriseApi.BusConfiguration{} + err = deployment.GetInstance(ctx, bc.Name, bus) + Expect(err).To(Succeed(), "Failed to get instance of Bus Configuration") + + // Update instance of BusConfiguration CR with new bus configuration + testcaseEnvInst.Log.Info("Update instance of BusConfiguration CR with new bus configuration") + bus.Spec = updateBus + err = deployment.UpdateCR(ctx, bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration with updated CR") + + // Ensure that Ingestor Cluster has not been restarted + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Indexer Cluster has not been restarted + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster has not been restarted") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Get instance of current Ingestor Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") + ingest := &enterpriseApi.IngestorCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) + Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") + + // Verify Ingestor Cluster Status + testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") + Expect(ingest.Status.BusConfiguration).To(Equal(updateBus), "Ingestor bus configuration status is not the same as provided as input") + + // Get instance of current Indexer Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") + index := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + + // Verify Indexer Cluster Status + testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") + Expect(index.Status.BusConfiguration).To(Equal(updateBus), "Indexer bus configuration status is not the same as provided as input") + + // Verify conf files + testcaseEnvInst.Log.Info("Verify conf files") + pods := testenv.DumpGetPods(deployment.GetName()) + for _, pod := range pods { + defaultsConf := "" + + if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { + // Verify outputs.conf + testcaseEnvInst.Log.Info("Verify outputs.conf") + outputsPath := "opt/splunk/etc/system/local/outputs.conf" + outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") + testenv.ValidateContent(outputsConf, updatedOutputs, true) + testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) + + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" + defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") + testenv.ValidateContent(defaultsConf, defaultsAll, true) + + // Verify AWS env variables + testcaseEnvInst.Log.Info("Verify AWS env variables") + envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") + testenv.ValidateContent(envVars, awsEnvVars, true) + } + + if strings.Contains(pod, "ingest") { + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + testenv.ValidateContent(defaultsConf, defaultsIngest, true) + } else if strings.Contains(pod, "idxc") { + // Verify inputs.conf + testcaseEnvInst.Log.Info("Verify inputs.conf") + inputsPath := "opt/splunk/etc/system/local/inputs.conf" + inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") + testenv.ValidateContent(inputsConf, updatedInputs, true) + testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) + } + } + + // Verify conf files + testcaseEnvInst.Log.Info("Verify conf files") + pods = testenv.DumpGetPods(deployment.GetName()) + for _, pod := range pods { + defaultsConf := "" + + if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { + // Verify outputs.conf + testcaseEnvInst.Log.Info("Verify outputs.conf") + outputsPath := "opt/splunk/etc/system/local/outputs.conf" + outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") + testenv.ValidateContent(outputsConf, updatedOutputs, true) + testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) + + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" + defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") + testenv.ValidateContent(defaultsConf, updatedDefaultsAll, true) + + // Verify AWS env variables + testcaseEnvInst.Log.Info("Verify AWS env variables") + envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") + testenv.ValidateContent(envVars, awsEnvVars, true) + } + + if strings.Contains(pod, "ingest") { + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + testenv.ValidateContent(defaultsConf, updatedDefaultsIngest, true) + } else if strings.Contains(pod, "idxc") { + // Verify inputs.conf + testcaseEnvInst.Log.Info("Verify inputs.conf") + inputsPath := "opt/splunk/etc/system/local/inputs.conf" + inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") + testenv.ValidateContent(inputsConf, updatedInputs, true) + testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) + } + } + }) + }) +}) diff --git a/test/secret/manager_secret_m4_test.go b/test/secret/manager_secret_m4_test.go index 526af6d31..fdf2d2a31 100644 --- a/test/secret/manager_secret_m4_test.go +++ b/test/secret/manager_secret_m4_test.go @@ -17,8 +17,8 @@ import ( "context" "fmt" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" enterpriseApi "github.com/splunk/splunk-operator/api/v4" diff --git a/test/secret/manager_secret_s1_test.go b/test/secret/manager_secret_s1_test.go index d51e004fd..123538317 100644 --- a/test/secret/manager_secret_s1_test.go +++ b/test/secret/manager_secret_s1_test.go @@ -19,8 +19,8 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" diff --git a/test/secret/secret_c3_test.go b/test/secret/secret_c3_test.go index 90bb9fe9c..698c84786 100644 --- a/test/secret/secret_c3_test.go +++ b/test/secret/secret_c3_test.go @@ -19,8 +19,8 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" "github.com/splunk/splunk-operator/test/testenv" diff --git a/test/secret/secret_m4_test.go b/test/secret/secret_m4_test.go index f257e70ce..e40d94cfd 100644 --- a/test/secret/secret_m4_test.go +++ b/test/secret/secret_m4_test.go @@ -19,8 +19,8 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" "github.com/splunk/splunk-operator/test/testenv" diff --git a/test/secret/secret_s1_test.go b/test/secret/secret_s1_test.go index 11c621815..fc7a0e47d 100644 --- a/test/secret/secret_s1_test.go +++ b/test/secret/secret_s1_test.go @@ -19,8 +19,8 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" diff --git a/test/smartstore/manager_smartstore_test.go b/test/smartstore/manager_smartstore_test.go index 45db78875..b90a68337 100644 --- a/test/smartstore/manager_smartstore_test.go +++ b/test/smartstore/manager_smartstore_test.go @@ -7,8 +7,8 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" "github.com/splunk/splunk-operator/test/testenv" diff --git a/test/smartstore/smartstore_test.go b/test/smartstore/smartstore_test.go index f1c330a66..c2d550411 100644 --- a/test/smartstore/smartstore_test.go +++ b/test/smartstore/smartstore_test.go @@ -5,8 +5,8 @@ import ( "fmt" "time" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" diff --git a/test/smoke/smoke_test.go b/test/smoke/smoke_test.go index 9c0a609e6..de4d26e88 100644 --- a/test/smoke/smoke_test.go +++ b/test/smoke/smoke_test.go @@ -17,8 +17,8 @@ import ( "context" "fmt" - "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" "github.com/splunk/splunk-operator/test/testenv" diff --git a/test/testenv/appframework_utils.go b/test/testenv/appframework_utils.go index d1f2f938c..e9879679b 100644 --- a/test/testenv/appframework_utils.go +++ b/test/testenv/appframework_utils.go @@ -250,6 +250,28 @@ func GetAppDeploymentInfoStandalone(ctx context.Context, deployment *Deployment, return appDeploymentInfo, err } +// GetAppDeploymentInfoIngestorCluster returns AppDeploymentInfo for given IngestorCluster, appSourceName and appName +func GetAppDeploymentInfoIngestorCluster(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, appSourceName string, appName string) (enterpriseApi.AppDeploymentInfo, error) { + ingestor := &enterpriseApi.IngestorCluster{} + appDeploymentInfo := enterpriseApi.AppDeploymentInfo{} + err := deployment.GetInstance(ctx, name, ingestor) + if err != nil { + testenvInstance.Log.Error(err, "Failed to get CR ", "CR Name", name) + return appDeploymentInfo, err + } + appInfoList := ingestor.Status.AppContext.AppsSrcDeployStatus[appSourceName].AppDeploymentInfoList + for _, appInfo := range appInfoList { + testenvInstance.Log.Info("Checking Ingestor AppInfo Struct", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "AppDeploymentInfo", appInfo) + if strings.Contains(appName, appInfo.AppName) { + testenvInstance.Log.Info("App Deployment Info found.", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "AppDeploymentInfo", appInfo) + appDeploymentInfo = appInfo + return appDeploymentInfo, nil + } + } + testenvInstance.Log.Info("App Info not found in App Info List", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "App Info List", appInfoList) + return appDeploymentInfo, err +} + // GetAppDeploymentInfoMonitoringConsole returns AppDeploymentInfo for given Monitoring Console, appSourceName and appName func GetAppDeploymentInfoMonitoringConsole(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, appSourceName string, appName string) (enterpriseApi.AppDeploymentInfo, error) { mc := &enterpriseApi.MonitoringConsole{} @@ -345,6 +367,8 @@ func GetAppDeploymentInfo(ctx context.Context, deployment *Deployment, testenvIn switch crKind { case "Standalone": appDeploymentInfo, err = GetAppDeploymentInfoStandalone(ctx, deployment, testenvInstance, name, appSourceName, appName) + case "IngestorCluster": + appDeploymentInfo, err = GetAppDeploymentInfoIngestorCluster(ctx, deployment, testenvInstance, name, appSourceName, appName) case "MonitoringConsole": appDeploymentInfo, err = GetAppDeploymentInfoMonitoringConsole(ctx, deployment, testenvInstance, name, appSourceName, appName) case "SearchHeadCluster": diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 85e753a84..2e312c652 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, busConfig, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -444,6 +444,53 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana return deployed.(*enterpriseApi.IndexerCluster), err } +// DeployIngestorCluster deploys the ingestor cluster +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { + d.testenv.Log.Info("Deploying ingestor cluster", "name", name) + + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, busConfig, serviceAccountName) + pdata, _ := json.Marshal(ingestor) + + d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, ingestor) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.IngestorCluster), err +} + +// DeployBusConfiguration deploys the bus configuration +func (d *Deployment) DeployBusConfiguration(ctx context.Context, name string, busConfig enterpriseApi.BusConfigurationSpec) (*enterpriseApi.BusConfiguration, error) { + d.testenv.Log.Info("Deploying bus configuration", "name", name) + + busCfg := newBusConfiguration(name, d.testenv.namespace, busConfig) + pdata, _ := json.Marshal(busCfg) + + d.testenv.Log.Info("bus configuration spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, busCfg) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.BusConfiguration), err +} + +// DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration +func (d *Deployment) DeployIngestorClusterWithAdditionalConfiguration(ctx context.Context, ic *enterpriseApi.IngestorCluster) (*enterpriseApi.IngestorCluster, error) { + d.testenv.Log.Info("Deploying ingestor cluster with additional configuration", "name", ic.Name) + + pdata, _ := json.Marshal(ic) + + d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, ic.Name, ic) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.IngestorCluster), err +} + // DeploySearchHeadCluster deploys a search head cluster func (d *Deployment) DeploySearchHeadCluster(ctx context.Context, name, ClusterManagerRef, LicenseManagerName string, ansibleConfig string, mcRef string) (*enterpriseApi.SearchHeadCluster, error) { d.testenv.Log.Info("Deploying search head cluster", "name", name) @@ -576,6 +623,24 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IndexerCluster) current.Spec = ucr.Spec cobject = current + case "IngestorCluster": + current := &enterpriseApi.IngestorCluster{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.IngestorCluster) + current.Spec = ucr.Spec + cobject = current + case "BusConfiguration": + current := &enterpriseApi.BusConfiguration{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.BusConfiguration) + current.Spec = ucr.Spec + cobject = current case "ClusterMaster": current := &enterpriseApiV3.ClusterMaster{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) @@ -675,7 +740,7 @@ func (d *Deployment) DeploySingleSiteCluster(ctx context.Context, name string, i } // Deploy the indexer cluster - _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "") + _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return err } @@ -733,7 +798,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHead(ctx context.Cont multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults) + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -805,7 +870,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(ctx context.Context, n multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults) + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -866,7 +931,7 @@ func (d *Deployment) DeployMultisiteCluster(ctx context.Context, name string, in multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults) + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1002,7 +1067,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(ctx context. multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults) + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1057,7 +1122,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndIndexes(ctx co multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults) + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1162,7 +1227,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx contex } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1240,7 +1305,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1340,7 +1405,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults) + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1444,7 +1509,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(c multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults) + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1525,7 +1590,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenMonitoringConsole(ctx conte } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return err } @@ -1597,7 +1662,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenMonitoringConsole(ctx } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return err } @@ -1691,7 +1756,7 @@ func (d *Deployment) DeployMultisiteClusterWithMonitoringConsole(ctx context.Con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults) + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1791,7 +1856,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults) + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index 7e4579ee2..f82310015 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -20,9 +20,10 @@ import ( "fmt" "net" "os" - "sigs.k8s.io/controller-runtime/pkg/metrics/server" "time" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" + enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -77,6 +78,9 @@ const ( // LicenseMasterPod Template String for standalone pod LicenseMasterPod = "splunk-%s-" + splcommon.LicenseManager + "-%d" + // IngestorPod Template String for ingestor pod + IngestorPod = "splunk-%s-ingestor-%d" + // IndexerPod Template String for indexer pod IndexerPod = "splunk-%s-idxc-indexer-%d" diff --git a/test/testenv/util.go b/test/testenv/util.go index fce1b58b1..b779ab3c3 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -30,6 +30,8 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" + . "github.com/onsi/gomega" + "github.com/onsi/ginkgo/v2" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" @@ -357,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -374,7 +376,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste Spec: enterpriseApi.IndexerClusterSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Volumes: []corev1.Volume{}, + ServiceAccount: serviceAccountName, + Volumes: []corev1.Volume{}, Spec: enterpriseApi.Spec{ ImagePullPolicy: "Always", Image: splunkImage, @@ -393,13 +396,56 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), + Replicas: int32(replicas), + BusConfigurationRef: busConfig, }, } return &new } +// newIngestorCluster creates and initialize the CR for IngestorCluster Kind +func newIngestorCluster(name, ns string, replicas int, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { + return &enterpriseApi.IngestorCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IngestorCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, + }, + + Spec: enterpriseApi.IngestorClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ServiceAccount: serviceAccountName, + Volumes: []corev1.Volume{}, + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: splunkImage, + }, + }, + Replicas: int32(replicas), + BusConfigurationRef: busConfig, + }, + } +} + +// newBusConfiguration creates and initializes the CR for BusConfiguration Kind +func newBusConfiguration(name, ns string, busConfig enterpriseApi.BusConfigurationSpec) *enterpriseApi.BusConfiguration { + return &enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: busConfig, + + } +} + func newSearchHeadCluster(name, ns, clusterManagerRef, licenseManagerName, ansibleConfig, splunkImage string) *enterpriseApi.SearchHeadCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) @@ -1188,3 +1234,47 @@ func DeleteConfigMap(ns string, ConfigMapName string) error { } return nil } + +// GetConfFile gets config file from pod +func GetConfFile(podName, filePath, ns string) (string, error) { + var config string + var err error + + output, err := exec.Command("kubectl", "exec", "-n", ns, podName, "--", "cat", filePath).Output() + if err != nil { + cmd := fmt.Sprintf("kubectl exec -n %s %s -- cat %s", ns, podName, filePath) + logf.Log.Error(err, "Failed to execute command", "command", cmd) + return config, err + } + + return string(output), err +} + +// GetAWSEnv gets AWS environment variables from pod +func GetAWSEnv(podName, ns string) (string, error) { + var config string + var err error + + output, err := exec.Command("kubectl", "exec", "-n", ns, podName, "--", "env", "|", "grep", "-i", "aws").Output() + if err != nil { + cmd := fmt.Sprintf("kubectl exec -n %s %s -- env | grep -i aws", ns, podName) + logf.Log.Error(err, "Failed to execute command", "command", cmd) + return config, err + } + + return string(output), err +} + +func ValidateContent(confFileContent string, listOfStringsForValidation []string, shouldContain bool) { + for _, str := range listOfStringsForValidation { + if shouldContain { + if !strings.Contains(confFileContent, str) { + Expect(confFileContent).To(ContainSubstring(str), "Failed to find string "+str+" in conf file") + } + } else { + if strings.Contains(confFileContent, str) { + Expect(confFileContent).ToNot(ContainSubstring(str), "Found string "+str+" in conf file, but it should not be there") + } + } + } +} diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index e5c734405..6ec2cc310 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -185,6 +185,34 @@ func SingleSiteIndexersReady(ctx context.Context, deployment *Deployment, testen }, ConsistentDuration, ConsistentPollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) } +// IngestorsReady verify ingestors go to ready state +func IngestorReady(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv) { + ingest := &enterpriseApi.IngestorCluster{} + instanceName := fmt.Sprintf("%s-ingest", deployment.GetName()) + + gomega.Eventually(func() enterpriseApi.Phase { + err := deployment.GetInstance(ctx, instanceName, ingest) + if err != nil { + return enterpriseApi.PhaseError + } + + testenvInstance.Log.Info("Waiting for ingestor instance's phase to be ready", "instance", instanceName, "phase", ingest.Status.Phase) + DumpGetPods(testenvInstance.GetName()) + + return ingest.Status.Phase + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) + + // In a steady state, we should stay in Ready and not flip-flop around + gomega.Consistently(func() enterpriseApi.Phase { + _ = deployment.GetInstance(ctx, instanceName, ingest) + + testenvInstance.Log.Info("Check for Consistency ingestor instance's phase to be ready", "instance", instanceName, "phase", ingest.Status.Phase) + DumpGetSplunkVersion(ctx, testenvInstance.GetName(), deployment, "-ingest-") + + return ingest.Status.Phase + }, ConsistentDuration, ConsistentPollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) +} + // ClusterManagerReady verify Cluster Manager Instance is in ready status func ClusterManagerReady(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv) { // Ensure that the cluster-manager goes to Ready phase