diff --git a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml index aec3def7b..d354dfd5e 100644 --- a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml +++ b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml @@ -120,7 +120,7 @@ jobs: appframeworksS1, managersecret, managermc, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/arm-AL2023-int-test-workflow.yml b/.github/workflows/arm-AL2023-int-test-workflow.yml index 37d8812e8..8862b6dc3 100644 --- a/.github/workflows/arm-AL2023-int-test-workflow.yml +++ b/.github/workflows/arm-AL2023-int-test-workflow.yml @@ -68,7 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-al2023 diff --git a/.github/workflows/arm-RHEL-build-test-push-workflow.yml b/.github/workflows/arm-RHEL-build-test-push-workflow.yml index 1b79b531a..eb2580800 100644 --- a/.github/workflows/arm-RHEL-build-test-push-workflow.yml +++ b/.github/workflows/arm-RHEL-build-test-push-workflow.yml @@ -68,7 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-rhel diff --git a/.github/workflows/arm-RHEL-int-test-workflow.yml b/.github/workflows/arm-RHEL-int-test-workflow.yml index 1b79b531a..eb2580800 100644 --- a/.github/workflows/arm-RHEL-int-test-workflow.yml +++ b/.github/workflows/arm-RHEL-int-test-workflow.yml @@ -68,7 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-rhel diff --git a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml index 1449178ec..8606c1da6 100644 --- a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml +++ b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml @@ -120,7 +120,7 @@ jobs: appframeworksS1, managersecret, managermc, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/arm-Ubuntu-int-test-workflow.yml b/.github/workflows/arm-Ubuntu-int-test-workflow.yml index 3806a9654..3084d9307 100644 --- a/.github/workflows/arm-Ubuntu-int-test-workflow.yml +++ b/.github/workflows/arm-Ubuntu-int-test-workflow.yml @@ -68,7 +68,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-arm-ubuntu diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml index 568823343..bc5e28998 100644 --- a/.github/workflows/build-test-push-workflow.yml +++ b/.github/workflows/build-test-push-workflow.yml @@ -166,7 +166,7 @@ jobs: managerappframeworkm4, managersecret, managermc, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/distroless-build-test-push-workflow.yml b/.github/workflows/distroless-build-test-push-workflow.yml index 49a38bcf7..63cc09533 100644 --- a/.github/workflows/distroless-build-test-push-workflow.yml +++ b/.github/workflows/distroless-build-test-push-workflow.yml @@ -167,7 +167,7 @@ jobs: managerappframeworkm4, managersecret, managermc, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/distroless-int-test-workflow.yml b/.github/workflows/distroless-int-test-workflow.yml index 320a2692a..8250b379c 100644 --- a/.github/workflows/distroless-int-test-workflow.yml +++ b/.github/workflows/distroless-int-test-workflow.yml @@ -64,7 +64,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image-distroless diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 65a188c77..9ef6e4658 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -100,8 +100,8 @@ jobs: version: ${{ steps.dotenv.outputs.KUBECTL_VERSION }} - name: Install kuttl run: | - sudo curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.12.0/kuttl_0.12.0_linux_x86_64.tar.gz - sudo tar -xvzf kuttl_0.12.0_linux_x86_64.tar.gz + sudo curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.22.0/kuttl_0.22.0_linux_x86_64.tar.gz + sudo tar -xvzf kuttl_0.22.0_linux_x86_64.tar.gz sudo chmod +x kubectl-kuttl sudo mv kubectl-kuttl /usr/local/bin/kubectl-kuttl - name: Install Python diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 4af8fee67..b89bbd28e 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -61,7 +61,7 @@ jobs: managercrcrud, licensemanager, managerdeletecr, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml index d83a56759..ca5299cb7 100644 --- a/.github/workflows/manual-int-test-workflow.yml +++ b/.github/workflows/manual-int-test-workflow.yml @@ -23,7 +23,7 @@ jobs: managerscaling, managercrcrud, licensemanager, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/namespace-scope-int-workflow.yml b/.github/workflows/namespace-scope-int-workflow.yml index 6fc6138eb..8a1365f1e 100644 --- a/.github/workflows/namespace-scope-int-workflow.yml +++ b/.github/workflows/namespace-scope-int-workflow.yml @@ -19,7 +19,7 @@ jobs: managerscaling, managercrcrud, licensemanager, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest env: diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 03187afe8..10fde82be 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -59,7 +59,7 @@ jobs: managerscaling, managercrcrud, licensemanager, - indexingestseparation, + indingsep, ] runs-on: ubuntu-latest needs: build-operator-image diff --git a/PROJECT b/PROJECT index 5bd530bda..983f3418b 100644 --- a/PROJECT +++ b/PROJECT @@ -122,4 +122,13 @@ resources: kind: IngestorCluster path: github.com/splunk/splunk-operator/api/v4 version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: BusConfiguration + path: github.com/splunk/splunk-operator/api/v4 + version: v4 version: "3" diff --git a/api/v4/busconfiguration_types.go b/api/v4/busconfiguration_types.go new file mode 100644 index 000000000..f6b2fc065 --- /dev/null +++ b/api/v4/busconfiguration_types.go @@ -0,0 +1,137 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +const ( + // BusConfigurationPausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + BusConfigurationPausedAnnotation = "busconfiguration.enterprise.splunk.com/paused" +) + +// BusConfigurationSpec defines the desired state of BusConfiguration +type BusConfigurationSpec struct { + Type string `json:"type"` + + SQS SQSSpec `json:"sqs"` +} + +type SQSSpec struct { + QueueName string `json:"queueName"` + + AuthRegion string `json:"authRegion"` + + Endpoint string `json:"endpoint"` + + LargeMessageStoreEndpoint string `json:"largeMessageStoreEndpoint"` + + LargeMessageStorePath string `json:"largeMessageStorePath"` + + DeadLetterQueueName string `json:"deadLetterQueueName"` +} + +// BusConfigurationStatus defines the observed state of BusConfiguration. +type BusConfigurationStatus struct { + // Phase of the bus configuration + Phase Phase `json:"phase"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // Auxillary message describing CR status + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// BusConfiguration is the Schema for a Splunk Enterprise bus configuration +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=busconfigurations,scope=Namespaced,shortName=bus +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus configuration" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus configuration resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// BusConfiguration is the Schema for the busconfigurations API +type BusConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec BusConfigurationSpec `json:"spec"` + Status BusConfigurationStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *BusConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// BusConfigurationList contains a list of BusConfiguration +type BusConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BusConfiguration `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BusConfiguration{}, &BusConfigurationList{}) +} + +// NewEvent creates a new event associated with the object and ready +// to be published to Kubernetes API +func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.Event { + t := metav1.Now() + return corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: reason + "-", + Namespace: bc.ObjectMeta.Namespace, + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "BusConfiguration", + Namespace: bc.Namespace, + Name: bc.Name, + UID: bc.UID, + APIVersion: GroupVersion.String(), + }, + Reason: reason, + Message: message, + Source: corev1.EventSource{ + Component: "splunk-busconfiguration-controller", + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + ReportingController: "enterprise.splunk.com/busconfiguration-controller", + } +} diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 84cd680b9..493aeb0f3 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -38,9 +38,8 @@ const ( type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` - PipelineConfig PipelineConfigSpec `json:"pipelineConfig,omitempty"` - - PullBus PushBusSpec `json:"pullBus,omitempty"` + // Bus configuration reference + BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef,omitempty"` // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` @@ -113,14 +112,11 @@ type IndexerClusterStatus struct { // status of each indexer cluster peer Peers []IndexerClusterMemberStatus `json:"peers"` - // Pipeline configuration status - PipelineConfig PipelineConfigSpec `json:"pipelineConfig,omitempty"` - - // Pull Bus status - PullBus PushBusSpec `json:"pullBus,omitempty"` - // Auxillary message describing CR status Message string `json:"message"` + + // Bus configuration + BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 6e8b50951..1ebc660d0 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -42,71 +42,8 @@ type IngestorClusterSpec struct { // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` - // Push Bus spec - PushBus PushBusSpec `json:"pushBus"` - - // Pipeline configuration - PipelineConfig PipelineConfigSpec `json:"pipelineConfig"` -} - -// Helper types -// Only SQS as of now -type PushBusSpec struct { - // +kubebuilder:validation:Enum=sqs_smartbus - // +kubebuilder:default=sqs_smartbus - Type string `json:"type"` - - SQS SQSSpec `json:"sqs"` -} - -type SQSSpec struct { - QueueName string `json:"queueName"` - - AuthRegion string `json:"authRegion"` - - // +kubebuilder:validation:Pattern=`^https://` - Endpoint string `json:"endpoint"` - - // +kubebuilder:validation:Pattern=`^https://` - LargeMessageStoreEndpoint string `json:"largeMessageStoreEndpoint"` - - // +kubebuilder:validation:Pattern=`^s3://` - LargeMessageStorePath string `json:"largeMessageStorePath"` - - DeadLetterQueueName string `json:"deadLetterQueueName"` - - // +kubebuilder:validation:Minimum=0 - // +kubebuilder:default=3 - MaxRetriesPerPart int `json:"maxRetriesPerPart"` - - // +kubebuilder:validation:Enum=max_count - // +kubebuilder:default=max_count - RetryPolicy string `json:"retryPolicy"` - - // +kubebuilder:validation:Pattern=`^[0-9]+s$` - // +kubebuilder:default="5s" - SendInterval string `json:"sendInterval"` - - EncodingFormat string `json:"encodingFormat"` -} - -type PipelineConfigSpec struct { - // +kubebuilder:default=false - RemoteQueueRuleset bool `json:"remoteQueueRuleset"` - - // +kubebuilder:default=true - RuleSet bool `json:"ruleSet"` - - // +kubebuilder:default=false - RemoteQueueTyping bool `json:"remoteQueueTyping"` - - // +kubebuilder:default=false - RemoteQueueOutput bool `json:"remoteQueueOutput"` - - // +kubebuilder:default=true - Typing bool `json:"typing"` - - IndexerPipe bool `json:"indexerPipe"` + // Bus configuration reference + BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef"` } // IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -135,11 +72,8 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Pipeline configuration status - PipelineConfig PipelineConfigSpec `json:"pipelineConfig"` - - // Push Bus status - PushBus PushBusSpec `json:"pushBus"` + // Bus configuration + BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index aabda82b3..fa23c996a 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -180,6 +180,95 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfiguration. +func (in *BusConfiguration) DeepCopy() *BusConfiguration { + if in == nil { + return nil + } + out := new(BusConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfigurationList) DeepCopyInto(out *BusConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BusConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationList. +func (in *BusConfigurationList) DeepCopy() *BusConfigurationList { + if in == nil { + return nil + } + out := new(BusConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BusConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfigurationSpec) DeepCopyInto(out *BusConfigurationSpec) { + *out = *in + out.SQS = in.SQS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationSpec. +func (in *BusConfigurationSpec) DeepCopy() *BusConfigurationSpec { + if in == nil { + return nil + } + out := new(BusConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationStatus. +func (in *BusConfigurationStatus) DeepCopy() *BusConfigurationStatus { + if in == nil { + return nil + } + out := new(BusConfigurationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -511,8 +600,7 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - out.PipelineConfig = in.PipelineConfig - out.PullBus = in.PullBus + out.BusConfigurationRef = in.BusConfigurationRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -545,8 +633,7 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - out.PipelineConfig = in.PipelineConfig - out.PullBus = in.PullBus + out.BusConfiguration = in.BusConfiguration } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -615,8 +702,7 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) - out.PushBus = in.PushBus - out.PipelineConfig = in.PipelineConfig + out.BusConfigurationRef = in.BusConfigurationRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. @@ -640,8 +726,7 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - out.PipelineConfig = in.PipelineConfig - out.PushBus = in.PushBus + out.BusConfiguration = in.BusConfiguration } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. @@ -861,21 +946,6 @@ func (in *PhaseInfo) DeepCopy() *PhaseInfo { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineConfigSpec) DeepCopyInto(out *PipelineConfigSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineConfigSpec. -func (in *PipelineConfigSpec) DeepCopy() *PipelineConfigSpec { - if in == nil { - return nil - } - out := new(PipelineConfigSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PremiumAppsProps) DeepCopyInto(out *PremiumAppsProps) { *out = *in @@ -907,22 +977,6 @@ func (in *Probe) DeepCopy() *Probe { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PushBusSpec) DeepCopyInto(out *PushBusSpec) { - *out = *in - out.SQS = in.SQS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushBusSpec. -func (in *PushBusSpec) DeepCopy() *PushBusSpec { - if in == nil { - return nil - } - out := new(PushBusSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 517a1c48e..4f22d0d83 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -23,10 +23,11 @@ import ( "os" "time" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + intController "github.com/splunk/splunk-operator/internal/controller" "github.com/splunk/splunk-operator/internal/controller/debug" "github.com/splunk/splunk-operator/pkg/config" - "sigs.k8s.io/controller-runtime/pkg/metrics/filters" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -110,11 +111,11 @@ func main() { // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName // to provide certificates, ensuring the server communicates using trusted and secure certificates. - TLSOpts: tlsOpts, + TLSOpts: tlsOpts, FilterProvider: filters.WithAuthenticationAndAuthorization, } - // TODO: enable https for /metrics endpoint by default + // TODO: enable https for /metrics endpoint by default // if secureMetrics { // // FilterProvider is used to protect the metrics endpoint with authn/authz. // // These configurations ensure that only authorized users and service accounts @@ -231,6 +232,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } + if err := (&controller.BusConfigurationReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BusConfiguration") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml b/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml new file mode 100644 index 000000000..9f80cdbea --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml @@ -0,0 +1,106 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: busconfigurations.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: BusConfiguration + listKind: BusConfigurationList + plural: busconfigurations + shortNames: + - bus + singular: busconfiguration + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of bus configuration + jsonPath: .status.phase + name: Phase + type: string + - description: Age of bus configuration resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Auxillary message describing CR status + jsonPath: .status.message + name: Message + type: string + name: v4 + schema: + openAPIV3Schema: + description: BusConfiguration is the Schema for the busconfigurations API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BusConfigurationSpec defines the desired state of BusConfiguration + properties: + sqs: + properties: + authRegion: + type: string + deadLetterQueueName: + type: string + endpoint: + type: string + largeMessageStoreEndpoint: + type: string + largeMessageStorePath: + type: string + queueName: + type: string + type: object + type: + type: string + type: object + status: + description: BusConfigurationStatus defines the observed state of BusConfiguration. + properties: + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the bus configuration + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + resourceRevMap: + additionalProperties: + type: string + description: Resource revision tracker + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 8d8d4b61c..d66e057fb 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,6 +5165,49 @@ spec: x-kubernetes-list-type: atomic type: object type: object + busConfigurationRef: + description: Bus configuration reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -5604,71 +5647,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - pipelineConfig: - properties: - indexerPipe: - default: true - type: boolean - remoteQueueOutput: - default: false - type: boolean - remoteQueueRuleset: - default: false - type: boolean - remoteQueueTyping: - default: false - type: boolean - ruleSet: - default: true - type: boolean - typing: - default: true - type: boolean - type: object - pullBus: - description: |- - Helper types - Only SQS as of now - properties: - sqs: - properties: - authRegion: - type: string - deadLetterQueueName: - type: string - encodingFormat: - type: string - endpoint: - pattern: ^https:// - type: string - largeMessageStoreEndpoint: - pattern: ^https:// - type: string - largeMessageStorePath: - pattern: ^s3:// - type: string - maxRetriesPerPart: - default: 3 - minimum: 0 - type: integer - queueName: - type: string - retryPolicy: - default: max_count - enum: - - max_count - type: string - sendInterval: - default: 5s - pattern: ^[0-9]+s$ - type: string - type: object - type: - default: sqs_smartbus - enum: - - sqs_smartbus - type: string - type: object readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -8316,6 +8294,27 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object + busConfiguration: + description: Bus configuration + properties: + sqs: + properties: + authRegion: + type: string + deadLetterQueueName: + type: string + endpoint: + type: string + largeMessageStoreEndpoint: + type: string + largeMessageStorePath: + type: string + queueName: + type: string + type: object + type: + type: string + type: object clusterManagerPhase: description: current phase of the cluster manager enum: @@ -8400,51 +8399,6 @@ spec: - Terminating - Error type: string - pipelineConfig: - description: Pipeline configuration status - properties: - indexerPipe: - type: boolean - remoteQueueOutput: - type: boolean - remoteQueueRuleset: - type: boolean - remoteQueueTyping: - type: boolean - ruleSet: - type: boolean - typing: - type: boolean - type: object - pullBus: - description: Pull Bus status - properties: - sqs: - properties: - authRegion: - type: string - deadLetterQueueName: - type: string - encodingFormat: - type: string - endpoint: - type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - maxRetriesPerPart: - type: integer - queueName: - type: string - retryPolicy: - type: string - sendInterval: - type: string - type: object - type: - type: string - type: object readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index f01444587..82f1f868a 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1141,6 +1141,49 @@ spec: type: object type: array type: object + busConfigurationRef: + description: Bus configuration reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1580,70 +1623,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - pipelineConfig: - description: Pipeline configuration - properties: - indexerPipe: - default: true - type: boolean - remoteQueueOutput: - default: false - type: boolean - remoteQueueRuleset: - default: false - type: boolean - remoteQueueTyping: - default: false - type: boolean - ruleSet: - default: true - type: boolean - typing: - default: true - type: boolean - type: object - pushBus: - description: Push Bus spec - properties: - sqs: - properties: - authRegion: - type: string - deadLetterQueueName: - type: string - encodingFormat: - type: string - endpoint: - pattern: ^https:// - type: string - largeMessageStoreEndpoint: - pattern: ^https:// - type: string - largeMessageStorePath: - pattern: ^s3:// - type: string - maxRetriesPerPart: - default: 3 - minimum: 0 - type: integer - queueName: - type: string - retryPolicy: - default: max_count - enum: - - max_count - type: string - sendInterval: - default: 5s - pattern: ^[0-9]+s$ - type: string - type: object - type: - default: sqs_smartbus - enum: - - sqs_smartbus - type: string - type: object readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -4566,38 +4545,8 @@ spec: description: App Framework version info for future use type: integer type: object - message: - description: Auxillary message describing CR status - type: string - phase: - description: Phase of the ingestor pods - enum: - - Pending - - Ready - - Updating - - ScalingUp - - ScalingDown - - Terminating - - Error - type: string - pipelineConfig: - description: Pipeline configuration status - properties: - indexerPipe: - type: boolean - remoteQueueOutput: - type: boolean - remoteQueueRuleset: - type: boolean - remoteQueueTyping: - type: boolean - ruleSet: - type: boolean - typing: - type: boolean - type: object - pushBus: - description: Push Bus status + busConfiguration: + description: Bus configuration properties: sqs: properties: @@ -4605,26 +4554,32 @@ spec: type: string deadLetterQueueName: type: string - encodingFormat: - type: string endpoint: type: string largeMessageStoreEndpoint: type: string largeMessageStorePath: type: string - maxRetriesPerPart: - type: integer queueName: type: string - retryPolicy: - type: string - sendInterval: - type: string type: object type: type: string type: object + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the ingestor pods + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 2ec9c6d4f..679c1dc72 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,6 +11,7 @@ resources: - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml +- bases/enterprise.splunk.com_busconfigurations.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/busconfiguration_editor_role.yaml b/config/rbac/busconfiguration_editor_role.yaml new file mode 100644 index 000000000..fde8687f7 --- /dev/null +++ b/config/rbac/busconfiguration_editor_role.yaml @@ -0,0 +1,30 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: busconfiguration-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get diff --git a/config/rbac/busconfiguration_viewer_role.yaml b/config/rbac/busconfiguration_viewer_role.yaml new file mode 100644 index 000000000..6230863a9 --- /dev/null +++ b/config/rbac/busconfiguration_viewer_role.yaml @@ -0,0 +1,26 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: busconfiguration-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - busconfigurations/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fc8513023..78231b303 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,6 +47,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: + - busconfigurations - clustermanagers - clustermasters - indexerclusters @@ -67,6 +68,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: + - busconfigurations/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers @@ -81,6 +83,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: + - busconfigurations/status - clustermanagers/status - clustermasters/status - indexerclusters/status diff --git a/config/samples/enterprise_v4_busconfiguration.yaml b/config/samples/enterprise_v4_busconfiguration.yaml new file mode 100644 index 000000000..0cc1aed31 --- /dev/null +++ b/config/samples/enterprise_v4_busconfiguration.yaml @@ -0,0 +1,8 @@ +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: busconfiguration-sample + finalizers: + - "enterprise.splunk.com/delete-pvc" +spec: {} +# TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 9a86043e0..88c71025d 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -14,4 +14,5 @@ resources: - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml +- enterprise_v4_busconfiguration.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index af6298152..ae77f17f9 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -12,22 +12,13 @@ This separation enables: > [!WARNING] > **As of now, only brand new deployments are supported for Index and Ingestion Separation. No migration path is implemented, described or tested for existing deployments to move from a standard model to Index & Ingestion separation model.** +# BusConfiguration -# IngestorCluster - -IngestorCluster is introduced for high‑throughput data ingestion into a durable message bus. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message bus. +BusConfiguration is introduced to store message bus configuration to be shared among IngestorCluster and IndexerCluster. ## Spec -In addition to common spec inputs, the IngestorCluster resource provides the following Spec configuration parameters. - -| Key | Type | Description | -| ---------- | ------- | ------------------------------------------------- | -| replicas | integer | The number of replicas (defaults to 3) | -| pushBus | PushBus | Message bus configuration for publishing messages (required) | -| pipelineConfig | PipelineConfig | Configuration for pipeline (required) | - -PushBus inputs can be found in the table below. As of now, only SQS type of message bus is supported. +BusConfiguration inputs can be found in the table below. As of now, only SQS type of message bus is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -40,33 +31,48 @@ SQS message bus inputs can be found in the table below. | ---------- | ------- | ------------------------------------------------- | | queueName | string | Name of the SQS queue | | authRegion | string | Region where the SQS queue is located | -| endpoint | string | AWS SQS endpoint (e.g. https://sqs.us-west-2.amazonaws.com) | -| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint (e.g. https://s3.us-west-2.amazonaws.com) | -| largeMessageStorePath | string | S3 path for Large Message Store (e.g. s3://bucket-name/directory) | +| endpoint | string | AWS SQS endpoint +| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint | +| largeMessageStorePath | string | S3 path for Large Message Store | | deadLetterQueueName | string | Name of the SQS dead letter queue | -| maxRetriesPerPart | integer | Max retries per part for max_count retry policy | -| retryPolicy | string | Retry policy (e.g. max_count) | -| sendInterval | string | Send interval (e.g. 5s) | -| encodingFormat | string | Encoding format (e.g. s2s) | -PipelineConfig inputs can be found in the table below. +Change of any of the bus inputs does not restart Splunk. It just updates the config values with no disruptions. + +## Example +``` +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: bus-config +spec: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +``` + +# IngestorCluster + +IngestorCluster is introduced for high‑throughput data ingestion into a durable message bus. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message bus. + +## Spec + +In addition to common spec inputs, the IngestorCluster resource provides the following Spec configuration parameters. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| remoteQueueRuleset | bool | Disable remote queue ruleset | -| ruleSet | bool | Disable rule set | -| remoteQueueTyping | bool | Disable remote queue typing | -| remoteQueueOutput | bool | Disable remote queue output | -| typing | bool | Disable typing | -| indexerPipe | bool | Disable indexer pipe | +| replicas | integer | The number of replicas (defaults to 3) | +| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | ## Example -The example presented below configures IngestorCluster named ingestor with Splunk 9.4.4 image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestion-role-sa allowing it to perform SQS and S3 operations. Push Bus and Pipeline Config inputs allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk 10.0.0 image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Push Bus reference allows the user to specify queue and bucket settings for the ingestion process. -In this case, it is the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Retry policy is set to max count with max retries per part equal to 4 and send interval set to 5 seconds. Pipeline config either enables (false) or disables (true) settings such as remote queue ruleset, ruleset, remote quee typing, typing, remote queue output and indexer pipe. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. - -Change of any of the pushBus or pipelineConfig inputs does not restart Splunk. It just updates the config values with no disruptions. +In this case, the setup the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -76,29 +82,11 @@ metadata: finalizers: - enterprise.splunk.com/delete-pvc spec: - serviceAccount: ingestion-sa + serviceAccount: ingestor-sa replicas: 3 - image: splunk/splunk:9.4.4 - pushBus: - type: sqs_smartbus - sqs: - queueName: sqs-test - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test - maxRetriesPerPart: 4 - retryPolicy: max_count - sendInterval: 5s - encodingFormat: s2s - pipelineConfig: - remoteQueueRuleset: false - ruleSet: true - remoteQueueTyping: false - remoteQueueOutput: false - typing: true - indexerPipe: true + image: splunk/splunk:10.0.0 + busConfigurationRef: + name: bus-config ``` # IndexerCluster @@ -112,49 +100,13 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| pullBus | PushBus | Message bus configuration for pulling messages (required) | -| pipelineConfig | PipelineConfig | Configuration for pipeline (required) | - -PullBus inputs can be found in the table below. As of now, only SQS type of message bus is supported. - -| Key | Type | Description | -| ---------- | ------- | ------------------------------------------------- | -| type | string | Type of message bus (Only sqs_smartbus as of now) | -| sqs | SQS | SQS message bus inputs | - -SQS message bus inputs can be found in the table below. - -| Key | Type | Description | -| ---------- | ------- | ------------------------------------------------- | -| queueName | string | Name of SQS queue | -| authRegion | string | Region where the SQS is located | -| endpoint | string | AWS SQS endpoint (e.g. https://sqs.us-west-2.amazonaws.com) | -| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint (e.g. https://s3.us-west-2.amazonaws.com) | -| largeMessageStorePath | string | S3 path for Large Message Store (e.g. s3://bucket-name/directory) | -| deadLetterQueueName | string | Name of SQS dead letter queue | -| maxRetriesPerPart | integer | Max retries per part for max_count retry policy | -| retryPolicy | string | Retry policy (e.g. max_count) | -| sendInterval | string | Send interval (e.g. 5s) | -| encodingFormat | string | Encoding format (e.g. s2s) | - -PipelineConfig inputs can be found in the table below. - -| Key | Type | Description | -| ---------- | ------- | ------------------------------------------------- | -| remoteQueueRuleset | bool | Disable remote queue ruleset | -| ruleSet | bool | Disable rule set | -| remoteQueueTyping | bool | Disable remote queue typing | -| remoteQueueOutput | bool | Disable remote queue output | -| typing | bool | Disable typing | -| indexerPipe | bool | Disable indexer pipe | +| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | ## Example -The example presented below configures IndexerCluster named indexer with Splunk 9.4.4 image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestion-role-sa allowing it to perform SQS and S3 operations. Pull Bus and Pipeline Config inputs allow the user to specify queue and bucket settings for the indexing process. - -In this case, it is the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Retry policy is set to max count with max retries per part equal to 4 and send interval set to 5 seconds. Pipeline config either enables (false) or disables (true) settings such as remote queue ruleset, ruleset, remote quee typing, typing and remote queue output. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. +The example presented below configures IndexerCluster named indexer with Splunk 10.0.0 image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Pull Bus reference allows the user to specify queue and bucket settings for the indexing process. -Change of any of the pullBus or pipelineConfig inputs does not restart Splunk. It just updates the config values with no disruptions. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -164,8 +116,8 @@ metadata: finalizers: - enterprise.splunk.com/delete-pvc spec: - serviceAccount: ingestion-sa - image: splunk/splunk:9.4.4 + serviceAccount: ingestor-sa + image: splunk/splunk:10.0.0 --- apiVersion: enterprise.splunk.com/v4 kind: IndexerCluster @@ -176,34 +128,17 @@ metadata: spec: clusterManagerRef: name: cm - serviceAccount: ingestion-role-sa + serviceAccount: ingestor-sa replicas: 3 - image: splunk/splunk:9.4.4 - pullBus: - type: sqs_smartbus - sqs: - queueName: sqs-test - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test - maxRetriesPerPart: 4 - retryPolicy: max_count - sendInterval: 5s - encodingFormat: s2s - pipelineConfig: - remoteQueueRuleset: false - ruleSet: true - remoteQueueTyping: false - remoteQueueOutput: false - typing: true + image: splunk/splunk:10.0.0 + busConfigurationRef: + name: bus-config ``` # Common Spec -The spec section is used to define the desired state for a resource. All custom resources provided by the Splunk Operator include the following -configuration parameters. +The spec section is used to define the desired state for a resource. All custom resources provided by the Splunk Operator (with an exception for BusConfiguration) include the following +configuration parameters. | Key | Type | Description | | --------------------- | ---------- | ---------------------------------------------------------------------------------------------------------- | @@ -243,34 +178,30 @@ An IngestorCluster template has been added to the splunk/splunk-enterprise Helm ## Example -Below examples describe how to define values for IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for BusConfiguration, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. + +``` +busConfiguration:: + enabled: true + name: bus-config + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +``` ``` ingestorCluster: enabled: true name: ingestor replicaCount: 3 - serviceAccount: ingestion-role-sa - pipelineConfig: - remoteQueueRuleset: false - ruleSet: true - remoteQueueTyping: false - remoteQueueOutput: false - typing: true - indexerPipe: true - pushBus: - type: sqs_smartbus - sqs: - queueName: ing-ind-separation-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ing-ind-separation/smartbus-test - deadLetterQueueName: ing-ind-separation-dlq - maxRetriesPerPart: 4 - retryPolicy: max_count - sendInterval: 5s - encodingFormat: s2s + serviceAccount: ingestor-sa + busConfigurationRef: + name: bus-config ``` ``` @@ -278,35 +209,17 @@ clusterManager: enabled: true name: cm replicaCount: 1 - serviceAccount: ingestion-role-sa + serviceAccount: ingestor-sa indexerCluster: enabled: true name: indexer replicaCount: 3 - serviceAccount: ingestion-role-sa + serviceAccount: ingestor-sa clusterManagerRef: name: cm - pipelineConfig: - remoteQueueRuleset: false - ruleSet: true - remoteQueueTyping: false - remoteQueueOutput: false - typing: true - indexerPipe: true - pullBus: - type: sqs_smartbus - sqs: - queueName: ing-ind-separation-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ing-ind-separation/smartbus-test - deadLetterQueueName: ing-ind-separation-dlq - maxRetriesPerPart: 4 - retryPolicy: max_count - sendInterval: 5s - encodingFormat: s2s + busConfigurationRef: + name: bus-config ``` # Service Account @@ -315,7 +228,7 @@ To be able to configure ingestion and indexing resources correctly in a secure m ## Example -The example presented below configures the ingestion-sa service account by using esctl utility. It sets up the service account for cluster-name cluster in region us-west-2 with AmazonS3FullAccess and AmazonSQSFullAccess access policies. +The example presented below configures the ingestor-sa service account by using esctl utility. It sets up the service account for cluster-name cluster in region us-west-2 with AmazonS3FullAccess and AmazonSQSFullAccess access policies. ``` eksctl create iamserviceaccount \ @@ -361,7 +274,7 @@ $ aws iam get-role --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1- "Condition": { "StringEquals": { "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:aud": "sts.amazonaws.com", - "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestion-sa" + "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestor-sa" } } } @@ -376,7 +289,7 @@ $ aws iam get-role --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1- }, { "Key": "alpha.eksctl.io/iamserviceaccount-name", - "Value": "default/ingestion-sa" + "Value": "default/ingestor-sa" }, { "Key": "alpha.eksctl.io/eksctl-version", @@ -503,7 +416,7 @@ $ make install ``` ``` -$ kubectl apply -f SOK_IMAGE_VERSION/splunk-operator-cluster.yaml --server-side +$ kubectl apply -f ${SOK_IMAGE_VERSION}/splunk-operator-cluster.yaml --server-side ``` ``` @@ -558,7 +471,7 @@ $ aws iam get-role --role-name eksctl-ind-ing-sep-demo-addon-iamserviceac-Role1- "Condition": { "StringEquals": { "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:aud": "sts.amazonaws.com", - "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestion-sa" + "oidc.eks.us-west-2.amazonaws.com/id/1234567890123456789012345678901:sub": "system:serviceaccount:default:ingestor-sa" } } } @@ -608,7 +521,69 @@ $ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon- } ``` -3. Install IngestorCluster resource. +3. Install BusConfiguration resource. + +``` +$ cat bus.yaml +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: bus + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +``` + +``` +$ kubectl apply -f bus.yaml +``` + +``` +$ kubectl get busconfiguration +NAME PHASE AGE MESSAGE +bus Ready 20s +``` + +``` +kubectl describe busconfiguration +Name: bus +Namespace: default +Labels: +Annotations: +API Version: enterprise.splunk.com/v4 +Kind: BusConfiguration +Metadata: + Creation Timestamp: 2025-10-27T10:25:53Z + Finalizers: + enterprise.splunk.com/delete-pvc + Generation: 1 + Resource Version: 12345678 + UID: 12345678-1234-5678-1234-012345678911 +Spec: + Sqs: + Auth Region: us-west-2 + Dead Letter Queue Name: sqs-dlq-test + Endpoint: https://sqs.us-west-2.amazonaws.com + Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com + Large Message Store Path: s3://ingestion/smartbus-test + Queue Name: sqs-test + Type: sqs_smartbus +Status: + Message: + Phase: Ready + Resource Rev Map: +Events: +``` + +4. Install IngestorCluster resource. ``` $ cat ingestor.yaml @@ -621,27 +596,9 @@ metadata: spec: serviceAccount: ingestor-sa replicas: 3 - image: splunk/splunk:9.4.4 - pushBus: - type: sqs_smartbus - sqs: - queueName: ing-ind-separation-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ing-ind-separation/smartbus-test - deadLetterQueueName: ing-ind-separation-dlq - maxRetriesPerPart: 4 - retryPolicy: max_count - sendInterval: 5s - encodingFormat: s2s - pipelineConfig: - remoteQueueRuleset: false - ruleSet: true - remoteQueueTyping: false - remoteQueueOutput: false - typing: true - indexerPipe: true + image: splunk/splunk:10.0.0 + busConfigurationRef: + name: bus-config ``` ``` @@ -670,26 +627,10 @@ Metadata: Resource Version: 12345678 UID: 12345678-1234-1234-1234-1234567890123 Spec: - Image: splunk/splunk:9.4.4 - Pipeline Config: - Indexer Pipe: true - Remote Queue Output: false - Remote Queue Ruleset: false - Remote Queue Typing: false - Rule Set: true - Typing: true - Push Bus: - Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: ing-ind-separation-dlq - Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ing-ind-separation/smartbus-test - Max Retries Per Part: 4 - Queue Name: ing-ind-separation-q - Retry Policy: max_count - Send Interval: 3s - Type: sqs_smartbus + Bus Configuration Ref: + Name: bus-config + Namespace: default + Image: splunk/splunk:10.0.0 Replicas: 3 Service Account: ingestor-sa Status: @@ -704,6 +645,15 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 + Bus Configuration: + Sqs: + Auth Region: us-west-2 + Dead Letter Queue Name: sqs-dlq-test + Endpoint: https://sqs.us-west-2.amazonaws.com + Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com + Large Message Store Path: s3://ingestion/smartbus-test + Queue Name: sqs-test + Type: sqs_smartbus Message: Phase: Ready Ready Replicas: 3 @@ -744,20 +694,20 @@ disabled = true disabled = true sh-4.4$ cat /opt/splunk/etc/system/local/outputs.conf -[remote_queue:ing-ind-separation-q] -remote_queue.max_count.sqs_smartbus.max_retries_per_part = 4 +[remote_queue:sqs-test] +remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4 remote_queue.sqs_smartbus.auth_region = us-west-2 -remote_queue.sqs_smartbus.dead_letter_queue.name = ing-ind-separation-dlq +remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test remote_queue.sqs_smartbus.encoding_format = s2s remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com -remote_queue.sqs_smartbus.large_message_store.path = s3://ing-ind-separation/smartbus-test +remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test remote_queue.sqs_smartbus.retry_policy = max_count remote_queue.sqs_smartbus.send_interval = 5s remote_queue.type = sqs_smartbus ``` -4. Install IndexerCluster resource. +5. Install IndexerCluster resource. ``` $ cat idxc.yaml @@ -768,7 +718,7 @@ metadata: finalizers: - enterprise.splunk.com/delete-pvc spec: - image: splunk/splunk:9.4.4 + image: splunk/splunk:10.0.0 serviceAccount: ingestor-sa --- apiVersion: enterprise.splunk.com/v4 @@ -778,30 +728,13 @@ metadata: finalizers: - enterprise.splunk.com/delete-pvc spec: - image: splunk/splunk:9.4.4 + image: splunk/splunk:10.0.0 replicas: 3 clusterManagerRef: name: cm serviceAccount: ingestor-sa - pullBus: - type: sqs_smartbus - sqs: - queueName: ing-ind-separation-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ing-ind-separation/smartbus-test - deadLetterQueueName: ing-ind-separation-dlq - maxRetriesPerPart: 4 - retryPolicy: max_count - sendInterval: 5s - encodingFormat: s2s - pipelineConfig: - remoteQueueRuleset: false - ruleSet: true - remoteQueueTyping: false - remoteQueueOutput: false - typing: true + busConfigurationRef: + name: bus-config ``` ``` @@ -835,24 +768,24 @@ sh-4.4$ cat /opt/splunk/etc/system/local/inputs.conf [splunktcp://9997] disabled = 0 -[remote_queue:ing-ind-separation-q] -remote_queue.max_count.sqs_smartbus.max_retries_per_part = 4 +[remote_queue:sqs-test] +remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4 remote_queue.sqs_smartbus.auth_region = us-west-2 -remote_queue.sqs_smartbus.dead_letter_queue.name = ing-ind-separation-dlq +remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com -remote_queue.sqs_smartbus.large_message_store.path = s3://ing-ind-separation/smartbus-test +remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test remote_queue.sqs_smartbus.retry_policy = max_count remote_queue.type = sqs_smartbus sh-4.4$ cat /opt/splunk/etc/system/local/outputs.conf -[remote_queue:ing-ind-separation-q] -remote_queue.max_count.sqs_smartbus.max_retries_per_part = 4 +[remote_queue:sqs-test] +remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4 remote_queue.sqs_smartbus.auth_region = us-west-2 -remote_queue.sqs_smartbus.dead_letter_queue.name = ing-ind-separation-dlq +remote_queue.sqs_smartbus.dead_letter_queue.name = sqs-dlq-test remote_queue.sqs_smartbus.encoding_format = s2s remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com -remote_queue.sqs_smartbus.large_message_store.path = s3://ing-ind-separation/smartbus-test +remote_queue.sqs_smartbus.large_message_store.path = s3://ingestion/smartbus-test remote_queue.sqs_smartbus.retry_policy = max_count remote_queue.sqs_smartbus.send_interval = 5s remote_queue.type = sqs_smartbus @@ -873,7 +806,7 @@ disabled = false disabled = true ``` -5. Install Horizontal Pod Autoscaler for IngestorCluster. +6. Install Horizontal Pod Autoscaler for IngestorCluster. ``` $ cat hpa-ing.yaml @@ -956,7 +889,7 @@ NAME REFERENCE TARGETS MINPODS MAXPODS REPLICA ing-hpa IngestorCluster/ingestor cpu: 115%/50% 3 10 10 8m54s ``` -6. Generate fake load. +7. Generate fake load. - HEC_TOKEN: HEC token for making fake calls @@ -1117,7 +1050,7 @@ splunk-ingestor-ingestor-2 1/1 Running 0 45m ``` ``` -$ aws s3 ls s3://ing-ind-separation/smartbus-test/ +$ aws s3 ls s3://ingestion/smartbus-test/ PRE 29DDC1B4-D43E-47D1-AC04-C87AC7298201/ PRE 43E16731-7146-4397-8553-D68B5C2C8634/ PRE C8A4D060-DE0D-4DCB-9690-01D8902825DC/ diff --git a/go.mod b/go.mod index e499de5bf..6efe24ecb 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/google/uuid v1.6.0 github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.16 + github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.38.0 github.com/pkg/errors v0.9.1 @@ -103,7 +104,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/ginkgo v1.16.5 github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect diff --git a/go.sum b/go.sum index f91f845f7..4083e6e70 100644 --- a/go.sum +++ b/go.sum @@ -250,6 +250,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -384,8 +385,6 @@ golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -505,6 +504,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml new file mode 100644 index 000000000..2a746968e --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml @@ -0,0 +1,40 @@ +{{- if .Values.busConfiguration }} +{{- if .Values.busConfiguration.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: {{ .Values.busConfiguration.name }} + namespace: {{ default .Release.Namespace .Values.busConfiguration.namespaceOverride }} + {{- with .Values.busConfiguration.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.busConfiguration.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.busConfiguration.type | quote }} + {{- with .Values.busConfiguration.sqs }} + sqs: + {{- if .queueName }} + queueName: {{ .queueName | quote }} + {{- end }} + {{- if .authRegion }} + authRegion: {{ .authRegion | quote }} + {{- end }} + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .largeMessageStoreEndpoint }} + largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} + {{- end }} + {{- if .largeMessageStorePath }} + largeMessageStorePath: {{ .largeMessageStorePath | quote }} + {{- end }} + {{- if .deadLetterQueueName }} + deadLetterQueueName: {{ .deadLetterQueueName | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index e10a25d34..c201d186d 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: List items: -{{- range default (default (until 1) .Values.sva.c3.indexerClusters) .Values.sva.m4.indexerClusters }} +{{- range default (default (list (dict "name" .Values.indexerCluster.name)) .Values.sva.c3.indexerClusters) .Values.sva.m4.indexerClusters }} - apiVersion: enterprise.splunk.com/v4 kind: IndexerCluster metadata: @@ -147,7 +147,7 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- if and ($.Values.sva.m4.enabled) (.zone) }} + {{- if and ($.Values.sva.m4.enabled) (.name) }} affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -156,70 +156,19 @@ items: - key: topology.kubernetes.io/zone operator: In values: - - {{ .zone }} + - {{ .name }} {{- else }} {{- with $.Values.indexerCluster.affinity }} affinity: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.pipelineConfig }} - pipelineConfig: - {{- if .remoteQueueRuleset }} - remoteQueueRuleset: {{ .remoteQueueRuleset }} - {{- end }} - {{- if .ruleSet }} - ruleSet: {{ .ruleSet }} - {{- end }} - {{- if .remoteQueueTyping }} - remoteQueueTyping: {{ .remoteQueueTyping }} - {{- end }} - {{- if .remoteQueueOutput }} - remoteQueueOutput: {{ .remoteQueueOutput }} - {{- end }} - {{- if .typing }} - typing: {{ .typing }} - {{- end }} - {{- if .indexerPipe }} - indexerPipe: {{ .indexerPipe }} - {{- end }} - {{- end }} - {{- with $.Values.indexerCluster.pullBus }} - pullBus: - type: {{ .type | quote }} - {{- with .sqs }} - sqs: - {{- if .queueName }} - queueName: {{ .queueName | quote }} - {{- end }} - {{- if .authRegion }} - authRegion: {{ .authRegion | quote }} - {{- end }} - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .largeMessageStoreEndpoint }} - largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} - {{- end }} - {{- if .largeMessageStorePath }} - largeMessageStorePath: {{ .largeMessageStorePath | quote }} - {{- end }} - {{- if .deadLetterQueueName }} - deadLetterQueueName: {{ .deadLetterQueueName | quote }} - {{- end }} - {{- if not (eq .maxRetriesPerPart nil) }} - maxRetriesPerPart: {{ .maxRetriesPerPart }} - {{- end }} - {{- if .retryPolicy }} - retryPolicy: {{ .retryPolicy | quote }} - {{- end }} - {{- if .sendInterval }} - sendInterval: {{ .sendInterval | quote }} - {{- end }} - {{- if .encodingFormat }} - encodingFormat: {{ .encodingFormat | quote }} - {{- end }} - {{- end }} + {{- with $.Values.indexerCluster.busConfigurationRef }} + busConfigurationRef: + name: {{ .name }} + {{- if .namespace }} + namespace: {{ .namespace }} {{- end }} + {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index 34ef17d2c..fd72da310 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -95,62 +95,11 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 4 }} {{- end }} - {{- with .Values.ingestorCluster.pipelineConfig }} - pipelineConfig: - {{- if hasKey . "remoteQueueRuleset" }} - remoteQueueRuleset: {{ .remoteQueueRuleset }} - {{- end }} - {{- if hasKey . "ruleSet" }} - ruleSet: {{ .ruleSet }} - {{- end }} - {{- if hasKey . "remoteQueueTyping" }} - remoteQueueTyping: {{ .remoteQueueTyping }} - {{- end }} - {{- if hasKey . "remoteQueueOutput" }} - remoteQueueOutput: {{ .remoteQueueOutput }} - {{- end }} - {{- if hasKey . "typing" }} - typing: {{ .typing }} - {{- end }} - {{- if hasKey . "indexerPipe" }} - indexerPipe: {{ .indexerPipe }} - {{- end }} - {{- end }} - {{- with .Values.ingestorCluster.pushBus }} - pushBus: - type: {{ .type | quote }} - {{- with .sqs }} - sqs: - {{- if .queueName }} - queueName: {{ .queueName | quote }} - {{- end }} - {{- if .authRegion }} - authRegion: {{ .authRegion | quote }} - {{- end }} - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .largeMessageStoreEndpoint }} - largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} - {{- end }} - {{- if .largeMessageStorePath }} - largeMessageStorePath: {{ .largeMessageStorePath | quote }} - {{- end }} - {{- if .deadLetterQueueName }} - deadLetterQueueName: {{ .deadLetterQueueName | quote }} - {{- end }} - {{- if not (eq .maxRetriesPerPart nil) }} - maxRetriesPerPart: {{ .maxRetriesPerPart }} - {{- end }} - {{- if .retryPolicy }} - retryPolicy: {{ .retryPolicy | quote }} - {{- end }} - {{- if .sendInterval }} - sendInterval: {{ .sendInterval | quote }} - {{- end }} - {{- if .encodingFormat }} - encodingFormat: {{ .encodingFormat | quote }} - {{- end }} + {{- with $.Values.ingestorCluster.busConfigurationRef }} + busConfigurationRef: + name: {{ $.Values.ingestorCluster.busConfigurationRef.name }} + {{- if $.Values.ingestorCluster.busConfigurationRef.namespace }} + namespace: {{ $.Values.ingestorCluster.busConfigurationRef.namespace }} {{- end }} {{- end }} {{- with .Values.ingestorCluster.extraEnv }} diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index 539fb0152..e49073398 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,9 +350,7 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 - pipelineConfig: {} - - pullBus: {} + busConfigurationRef: {} searchHeadCluster: @@ -901,6 +899,4 @@ ingestorCluster: affinity: {} - pipelineConfig: {} - - pushBus: {} \ No newline at end of file + busConfigurationRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml new file mode 100644 index 000000000..b161aea9c --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_editor_role.yaml @@ -0,0 +1,55 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml new file mode 100644 index 000000000..47287423f --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/ingestorcluster_viewer_role.yaml @@ -0,0 +1,47 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-ingestorcluster-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 2a2869654..e9de8cf44 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -222,6 +222,32 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: diff --git a/internal/controller/busconfiguration_controller.go b/internal/controller/busconfiguration_controller.go new file mode 100644 index 000000000..c8519c017 --- /dev/null +++ b/internal/controller/busconfiguration_controller.go @@ -0,0 +1,120 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pkg/errors" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/common" + metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" + enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" +) + +// BusConfigurationReconciler reconciles a BusConfiguration object +type BusConfigurationReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the BusConfiguration object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile +func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "BusConfiguration")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "BusConfiguration") + + reqLogger := log.FromContext(ctx) + reqLogger = reqLogger.WithValues("busconfiguration", req.NamespacedName) + + // Fetch the BusConfiguration + instance := &enterpriseApi.BusConfiguration{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after + // reconcile request. Owned objects are automatically + // garbage collected. For additional cleanup logic use + // finalizers. Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, errors.Wrap(err, "could not load bus configuration data") + } + + // If the reconciliation is paused, requeue + annotations := instance.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.BusConfigurationPausedAnnotation]; ok { + return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil + } + } + + reqLogger.Info("start", "CR version", instance.GetResourceVersion()) + + result, err := ApplyBusConfiguration(ctx, r.Client, instance) + if result.Requeue && result.RequeueAfter != 0 { + reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) + } + + return result, err +} + +var ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return enterprise.ApplyBusConfiguration(ctx, client, instance) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BusConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterpriseApi.BusConfiguration{}). + WithEventFilter(predicate.Or( + common.GenerationChangedPredicate(), + common.AnnotationChangedPredicate(), + common.LabelChangedPredicate(), + common.SecretChangedPredicate(), + common.ConfigMapChangedPredicate(), + common.StatefulsetChangedPredicate(), + common.PodChangedPredicate(), + common.CrdChangedPredicate(), + )). + WithOptions(controller.Options{ + MaxConcurrentReconciles: enterpriseApi.TotalWorker, + }). + Complete(r) +} diff --git a/internal/controller/busconfiguration_controller_test.go b/internal/controller/busconfiguration_controller_test.go new file mode 100644 index 000000000..e08154211 --- /dev/null +++ b/internal/controller/busconfiguration_controller_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/testutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("BusConfiguration Controller", func() { + BeforeEach(func() { + time.Sleep(2 * time.Second) + }) + + AfterEach(func() { + + }) + + Context("BusConfiguration Management", func() { + + It("Get BusConfiguration custom resource should fail", func() { + namespace := "ns-splunk-bus-1" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + _, err := GetBusConfiguration("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("busconfigurations.enterprise.splunk.com \"test\" not found")) + + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create BusConfiguration custom resource with annotations should pause", func() { + namespace := "ns-splunk-bus-2" + annotations := make(map[string]string) + annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + icSpec, _ := GetBusConfiguration("test", nsSpecs.Name) + annotations = map[string]string{} + icSpec.Annotations = annotations + icSpec.Status.Phase = "Ready" + UpdateBusConfiguration(icSpec, enterpriseApi.PhaseReady) + DeleteBusConfiguration("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create BusConfiguration custom resource should succeeded", func() { + namespace := "ns-splunk-bus-3" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + annotations := make(map[string]string) + CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + DeleteBusConfiguration("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Cover Unused methods", func() { + namespace := "ns-splunk-bus-4" + ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + ctx := context.TODO() + builder := fake.NewClientBuilder() + c := builder.Build() + instance := BusConfigurationReconciler{ + Client: c, + Scheme: scheme.Scheme, + } + request := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: namespace, + }, + } + _, err := instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + bcSpec := testutils.NewBusConfiguration("test", namespace, "image") + Expect(c.Create(ctx, bcSpec)).Should(Succeed()) + + annotations := make(map[string]string) + annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" + bcSpec.Annotations = annotations + Expect(c.Update(ctx, bcSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + annotations = map[string]string{} + bcSpec.Annotations = annotations + Expect(c.Update(ctx, bcSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + bcSpec.DeletionTimestamp = &metav1.Time{} + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + }) + + }) +}) + +func GetBusConfiguration(name string, namespace string) (*enterpriseApi.BusConfiguration, error) { + By("Expecting BusConfiguration custom resource to be retrieved successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + bc := &enterpriseApi.BusConfiguration{} + + err := k8sClient.Get(context.Background(), key, bc) + if err != nil { + return nil, err + } + + return bc, err +} + +func CreateBusConfiguration(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { + By("Expecting BusConfiguration custom resource to be created successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + ingSpec := &enterpriseApi.BusConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + } + + Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + bc := &enterpriseApi.BusConfiguration{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, bc) + if status != "" { + fmt.Printf("status is set to %v", status) + bc.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return bc +} + +func UpdateBusConfiguration(instance *enterpriseApi.BusConfiguration, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { + By("Expecting BusConfiguration custom resource to be updated successfully") + + key := types.NamespacedName{ + Name: instance.Name, + Namespace: instance.Namespace, + } + + bcSpec := testutils.NewBusConfiguration(instance.Name, instance.Namespace, "image") + bcSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), bcSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + bc := &enterpriseApi.BusConfiguration{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, bc) + if status != "" { + fmt.Printf("status is set to %v", status) + bc.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return bc +} + +func DeleteBusConfiguration(name string, namespace string) { + By("Expecting BusConfiguration custom resource to be deleted successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + + Eventually(func() error { + bc := &enterpriseApi.BusConfiguration{} + _ = k8sClient.Get(context.Background(), key, bc) + err := k8sClient.Delete(context.Background(), bc) + return err + }, timeout, interval).Should(Succeed()) +} diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index bc9a6c9f5..3cc840baa 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -31,6 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -171,6 +172,34 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). + Watches(&enterpriseApi.BusConfiguration{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + bc, ok := obj.(*enterpriseApi.BusConfiguration) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.BusConfigurationRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). WithOptions(controller.Options{ MaxConcurrentReconciles: enterpriseApi.TotalWorker, }). diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index 37413847b..a2c5846df 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -140,6 +141,34 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). + Watches(&enterpriseApi.BusConfiguration{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + bc, ok := obj.(*enterpriseApi.BusConfiguration) + if !ok { + return nil + } + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.BusConfigurationRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). WithOptions(controller.Options{ MaxConcurrentReconciles: enterpriseApi.TotalWorker, }). diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 5cae33ac1..5e7ae4b73 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -184,28 +184,8 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string }, }, Replicas: 3, - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, - }, - PushBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - EncodingFormat: "s2s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: "busConfig", }, }, } diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index e963adcbd..9ca78593c 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -54,28 +54,26 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, }, Replicas: 3, - PushBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - EncodingFormat: "s2s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: "busConfig", }, - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, + }, + } +} + +// NewBusConfiguration returns new BusConfiguration instance with its config hash +func NewBusConfiguration(name, ns, image string) *enterpriseApi.BusConfiguration { + return &enterpriseApi.BusConfiguration{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", }, }, } @@ -315,28 +313,8 @@ func NewIndexerCluster(name, ns, image string) *enterpriseApi.IndexerCluster { ad.Spec = enterpriseApi.IndexerClusterSpec{ CommonSplunkSpec: *cs, - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, - }, - PullBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - EncodingFormat: "s2s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: "busConfig", }, } return ad diff --git a/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml b/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml new file mode 100644 index 000000000..602ebe0c1 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/00-install-operator.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: ../script/installoperator.sh + background: false \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml new file mode 100644 index 000000000..5ac9b4a7a --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -0,0 +1,118 @@ +--- +# assert for bus configurtion custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: BusConfiguration +metadata: + name: bus-config +spec: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test +status: + phase: Ready + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer +spec: + replicas: 3 + busConfigurationRef: + name: bus-config +status: + phase: Ready + busConfiguration: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-indexer-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-indexer-indexer-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 3 + busConfigurationRef: + name: bus-config +status: + phase: Ready + busConfiguration: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml new file mode 100644 index 000000000..0e9f5d58e --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm install splunk-index-ingest-sep $HELM_REPO_PATH/splunk-enterprise -f splunk_index_ingest_sep.yaml + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml new file mode 100644 index 000000000..daa1ab4ab --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -0,0 +1,30 @@ +--- +# assert for ingestor cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 4 + busConfigurationRef: + name: bus-config +status: + phase: Ready + busConfiguration: + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 4 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml new file mode 100644 index 000000000..731faf145 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm upgrade splunk-index-ingest-sep $HELM_REPO_PATH/splunk-enterprise --reuse-values --set ingestorCluster.replicaCount=4 + namespaced: true diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml new file mode 100644 index 000000000..85bf05dfe --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm uninstall splunk-index-ingest-sep + namespaced: true diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml new file mode 100644 index 000000000..6e87733cc --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -0,0 +1,39 @@ +splunk-operator: + enabled: false + splunkOperator: + clusterWideAccess: false + persistentVolumeClaim: + storageClassName: gp2 + +busConfiguration: + enabled: true + name: bus-config + type: sqs_smartbus + sqs: + queueName: sqs-test + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com + largeMessageStorePath: s3://ingestion/smartbus-test + deadLetterQueueName: sqs-dlq-test + +ingestorCluster: + enabled: true + name: ingestor + replicaCount: 3 + busConfigurationRef: + name: bus-config + +clusterManager: + enabled: true + name: cm + replicaCount: 1 + +indexerCluster: + enabled: true + name: indexer + replicaCount: 3 + clusterManagerRef: + name: cm + busConfigurationRef: + name: bus-config diff --git a/pkg/splunk/enterprise/afwscheduler_test.go b/pkg/splunk/enterprise/afwscheduler_test.go index 85f1669c0..9fd566d30 100644 --- a/pkg/splunk/enterprise/afwscheduler_test.go +++ b/pkg/splunk/enterprise/afwscheduler_test.go @@ -4232,7 +4232,7 @@ func TestGetTelAppNameExtension(t *testing.T) { "SearchHeadCluster": "shc", "ClusterMaster": "cmaster", "ClusterManager": "cmanager", - "IngestorCluster": "ingestor", + "IngestorCluster": "ingestor", } // Test all CR kinds diff --git a/pkg/splunk/enterprise/busconfiguration.go b/pkg/splunk/enterprise/busconfiguration.go new file mode 100644 index 000000000..43fd35f68 --- /dev/null +++ b/pkg/splunk/enterprise/busconfiguration.go @@ -0,0 +1,140 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyBusConfiguration reconciles the state of an IngestorCluster custom resource +func ApplyBusConfiguration(ctx context.Context, client client.Client, cr *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("ApplyBusConfiguration") + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "BusConfiguration" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Validate and updates defaults for CR + err = validateBusConfigurationSpec(ctx, client, cr) + if err != nil { + eventPublisher.Warning(ctx, "validateBusConfigurationSpec", fmt.Sprintf("validate bus configuration spec failed %s", err.Error())) + scopedLog.Error(err, "Failed to validate bus configuration spec") + return result, err + } + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} + +// validateBusConfigurationSpec checks validity and makes default updates to a BusConfigurationSpec and returns error if something is wrong +func validateBusConfigurationSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.BusConfiguration) error { + return validateBusConfigurationInputs(cr) +} + +func validateBusConfigurationInputs(cr *enterpriseApi.BusConfiguration) error { + // sqs_smartbus type is supported for now + if cr.Spec.Type != "sqs_smartbus" { + return errors.New("only sqs_smartbus type is supported in bus configuration") + } + + // Cannot be empty fields check + cannotBeEmptyFields := []string{} + if cr.Spec.SQS.QueueName == "" { + cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") + } + + if cr.Spec.SQS.AuthRegion == "" { + cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") + } + + if cr.Spec.SQS.DeadLetterQueueName == "" { + cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") + } + + if len(cannotBeEmptyFields) > 0 { + return errors.New("bus configuration sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") + } + + // Have to start with https:// or s3:// checks + haveToStartWithHttps := []string{} + if !strings.HasPrefix(cr.Spec.SQS.Endpoint, "https://") { + haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") + } + + if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStoreEndpoint, "https://") { + haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") + } + + if len(haveToStartWithHttps) > 0 { + return errors.New("bus configuration sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") + } + + if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStorePath, "s3://") { + return errors.New("bus configuration sqs largeMessageStorePath must start with s3://") + } + + return nil +} diff --git a/pkg/splunk/enterprise/busconfiguration_test.go b/pkg/splunk/enterprise/busconfiguration_test.go new file mode 100644 index 000000000..45d19bb40 --- /dev/null +++ b/pkg/splunk/enterprise/busconfiguration_test.go @@ -0,0 +1,151 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "path/filepath" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + GetReadinessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) + return fileLocation + } + GetLivenessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) + return fileLocation + } + GetStartupScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) + return fileLocation + } +} + +func TestApplyBusConfiguration(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + busConfig := &enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, busConfig) + + // ApplyBusConfiguration + result, err := ApplyBusConfiguration(ctx, c, busConfig) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, busConfig.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, busConfig.Status.Phase) +} + +func TestValidateBusConfigurationInputs(t *testing.T) { + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "othertype", + SQS: enterpriseApi.SQSSpec{}, + }, + } + + err := validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "only sqs_smartbus type is supported in bus configuration", err.Error()) + + busConfig.Spec.Type = "sqs_smartbus" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs queueName, authRegion, deadLetterQueueName cannot be empty", err.Error()) + + busConfig.Spec.SQS.AuthRegion = "us-west-2" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs queueName, deadLetterQueueName cannot be empty", err.Error()) + + busConfig.Spec.SQS.QueueName = "test-queue" + busConfig.Spec.SQS.DeadLetterQueueName = "dlq-test" + busConfig.Spec.SQS.AuthRegion = "" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs authRegion cannot be empty", err.Error()) + + busConfig.Spec.SQS.AuthRegion = "us-west-2" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) + + busConfig.Spec.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" + busConfig.Spec.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) + + busConfig.Spec.SQS.LargeMessageStorePath = "ingestion/smartbus-test" + + err = validateBusConfigurationInputs(&busConfig) + assert.NotNil(t, err) + assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) + + busConfig.Spec.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" + + err = validateBusConfigurationInputs(&busConfig) + assert.Nil(t, err) +} diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index 98add6bd2..8e5461413 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -1830,4 +1830,4 @@ func TestGetSplunkPorts(t *testing.T) { test(SplunkIndexer) test(SplunkIngestor) test(SplunkMonitoringConsole) -} \ No newline at end of file +} diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index cd2ffea5a..a3faa2446 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "reflect" "regexp" "sort" "strconv" @@ -76,6 +77,9 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -241,16 +245,36 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - if cr.Spec.PullBus.Type != "" { - err = mgr.handlePullBusOrPipelineConfigChange(ctx, cr, client) + // Bus config + busConfig := enterpriseApi.BusConfiguration{} + if cr.Spec.BusConfigurationRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.BusConfigurationRef.Namespace != "" { + ns = cr.Spec.BusConfigurationRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.BusConfigurationRef.Name, + Namespace: ns, + }, &busConfig) if err != nil { - scopedLog.Error(err, "Failed to update conf file for PullBus/Pipeline config change after pod creation") return result, err } } - cr.Status.PullBus = cr.Spec.PullBus - cr.Status.PipelineConfig = cr.Spec.PipelineConfig + // If bus config is updated + if cr.Spec.BusConfigurationRef.Name != "" { + if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + + err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + if err != nil { + scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + return result, err + } + + cr.Status.BusConfiguration = busConfig.Spec + } + } //update MC //Retrieve monitoring console ref from CM Spec @@ -340,6 +364,9 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // updates status after function completes cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -508,16 +535,36 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - if cr.Spec.PullBus.Type != "" { - err = mgr.handlePullBusOrPipelineConfigChange(ctx, cr, client) + // Bus config + busConfig := enterpriseApi.BusConfiguration{} + if cr.Spec.BusConfigurationRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.BusConfigurationRef.Namespace != "" { + ns = cr.Spec.BusConfigurationRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.BusConfigurationRef.Name, + Namespace: ns, + }, &busConfig) if err != nil { - scopedLog.Error(err, "Failed to update conf file for PullBus/Pipeline config change after pod creation") return result, err } } - cr.Status.PullBus = cr.Spec.PullBus - cr.Status.PipelineConfig = cr.Spec.PipelineConfig + // If bus config is updated + if cr.Spec.BusConfigurationRef.Name != "" { + if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + + err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + if err != nil { + scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + return result, err + } + + cr.Status.BusConfiguration = busConfig.Spec + } + } //update MC //Retrieve monitoring console ref from CM Spec @@ -1101,86 +1148,9 @@ func validateIndexerClusterSpec(ctx context.Context, c splcommon.ControllerClien return fmt.Errorf("multisite cluster does not support cluster manager to be located in a different namespace") } - err := validateIndexerSpecificInputs(cr) - if err != nil { - return err - } - return validateCommonSplunkSpec(ctx, c, &cr.Spec.CommonSplunkSpec, cr) } -func validateIndexerSpecificInputs(cr *enterpriseApi.IndexerCluster) error { - // Otherwise, it means that no Ingestion & Index separation is applied - if cr.Spec.PullBus != (enterpriseApi.PushBusSpec{}) { - if cr.Spec.PullBus.Type != "sqs_smartbus" { - return errors.New("only sqs_smartbus type is supported in pullBus type") - } - - if cr.Spec.PullBus.SQS == (enterpriseApi.SQSSpec{}) { - return errors.New("pullBus sqs cannot be empty") - } - - // Cannot be empty fields check - cannotBeEmptyFields := []string{} - if cr.Spec.PullBus.SQS.QueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") - } - - if cr.Spec.PullBus.SQS.AuthRegion == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") - } - - if cr.Spec.PullBus.SQS.DeadLetterQueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") - } - - if len(cannotBeEmptyFields) > 0 { - return errors.New("pullBus sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") - } - - // Have to start with https:// or s3:// checks - haveToStartWithHttps := []string{} - if !strings.HasPrefix(cr.Spec.PullBus.SQS.Endpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") - } - - if !strings.HasPrefix(cr.Spec.PullBus.SQS.LargeMessageStoreEndpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") - } - - if len(haveToStartWithHttps) > 0 { - return errors.New("pullBus sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") - } - - if !strings.HasPrefix(cr.Spec.PullBus.SQS.LargeMessageStorePath, "s3://") { - return errors.New("pullBus sqs largeMessageStorePath must start with s3://") - } - - // Assign default values if not provided - if cr.Spec.PullBus.SQS.MaxRetriesPerPart < 0 { - cr.Spec.PullBus.SQS.MaxRetriesPerPart = 4 - } - - if cr.Spec.PullBus.SQS.RetryPolicy == "" { - cr.Spec.PullBus.SQS.RetryPolicy = "max_count" - } - - if cr.Spec.PullBus.SQS.SendInterval == "" { - cr.Spec.PullBus.SQS.SendInterval = "5s" - } - - if cr.Spec.PullBus.SQS.EncodingFormat == "" { - cr.Spec.PullBus.SQS.EncodingFormat = "s2s" - } - - if cr.Spec.PipelineConfig == (enterpriseApi.PipelineConfigSpec{}) { - return errors.New("pipelineConfig spec cannot be empty") - } - } - - return nil -} - // helper function to get the list of IndexerCluster types in the current namespace func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.IndexerClusterList, error) { reqLogger := log.FromContext(ctx) @@ -1259,14 +1229,14 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri return extractedValue } -var newSplunkClientForPullBusPipeline = splclient.NewSplunkClient +var newSplunkClientForBusPipeline = splclient.NewSplunkClient // Checks if only PullBus or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullBusOrPipelineConfigChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, k8s client.Client) error { +func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas - // List all pods for this IngestorCluster StatefulSet + // List all pods for this IndexerCluster StatefulSet var updateErr error for n := 0; n < int(readyReplicas); n++ { memberName := GetSplunkStatefulsetPodName(SplunkIndexer, newCR.GetName(), int32(n)) @@ -1275,31 +1245,30 @@ func (mgr *indexerClusterPodManager) handlePullBusOrPipelineConfigChange(ctx con if err != nil { return err } - splunkClient := newSplunkClientForPullBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (newCR.Spec.PullBus.SQS.QueueName != "" && newCR.Status.PullBus.SQS.QueueName != "" && newCR.Spec.PullBus.SQS.QueueName != newCR.Status.PullBus.SQS.QueueName) || - (newCR.Spec.PullBus.Type != "" && newCR.Status.PullBus.Type != "" && newCR.Spec.PullBus.Type != newCR.Status.PullBus.Type) || - (newCR.Spec.PullBus.SQS.RetryPolicy != "" && newCR.Status.PullBus.SQS.RetryPolicy != "" && newCR.Spec.PullBus.SQS.RetryPolicy != newCR.Status.PullBus.SQS.RetryPolicy) { - if err := splunkClient.DeleteConfFileProperty("outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.PullBus.SQS.QueueName)); err != nil { + if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || + (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { + if err := splunkClient.DeleteConfFileProperty("outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty("inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.PullBus.SQS.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty("inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { updateErr = err } afterDelete = true } - pullBusChangedFieldsInputs, pullBusChangedFieldsOutputs, pipelineChangedFields := getChangedPullBusAndPipelineFieldsIndexer(&newCR.Status, newCR, afterDelete) + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, afterDelete) - for _, pbVal := range pullBusChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile("outputs", fmt.Sprintf("remote_queue:%s", newCR.Spec.PullBus.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range busChangedFieldsOutputs { + if err := splunkClient.UpdateConfFile("outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range pullBusChangedFieldsInputs { - if err := splunkClient.UpdateConfFile("inputs", fmt.Sprintf("remote_queue:%s", newCR.Spec.PullBus.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range busChangedFieldsInputs { + if err := splunkClient.UpdateConfFile("inputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1315,18 +1284,16 @@ func (mgr *indexerClusterPodManager) handlePullBusOrPipelineConfigChange(ctx con return updateErr } -func getChangedPullBusAndPipelineFieldsIndexer(oldCrStatus *enterpriseApi.IndexerClusterStatus, newCR *enterpriseApi.IndexerCluster, afterDelete bool) (pullBusChangedFieldsInputs, pullBusChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare PullBus fields - oldPB := oldCrStatus.PullBus - newPB := newCR.Spec.PullBus - oldPC := oldCrStatus.PipelineConfig - newPC := newCR.Spec.PipelineConfig +func getChangedBusFieldsForIndexer(busConfig *enterpriseApi.BusConfiguration, busConfigIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { + // Compare bus fields + oldPB := busConfigIndexerStatus.Status.BusConfiguration + newPB := busConfig.Spec - // Push all PullBus fields - pullBusChangedFieldsInputs, pullBusChangedFieldsOutputs = pullBusChanged(oldPB, newPB, afterDelete) + // Push all bus fields + busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(&oldPB, &newPB, afterDelete) // Always set all pipeline fields, not just changed ones - pipelineChangedFields = pipelineConfigChanged(oldPC, newPC, oldCrStatus.PullBus.SQS.QueueName != "", true) + pipelineChangedFields = pipelineConfig(true) return } @@ -1342,39 +1309,35 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldPullBus, newPullBus enterpriseApi.PushBusSpec, afterDelete bool) (inputs, outputs [][]string) { - if oldPullBus.Type != newPullBus.Type || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", newPullBus.Type}) - } - if oldPullBus.SQS.AuthRegion != newPullBus.SQS.AuthRegion || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", newPullBus.Type), newPullBus.SQS.AuthRegion}) +func pullBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (inputs, outputs [][]string) { + if oldBus.Type != newBus.Type || afterDelete { + inputs = append(inputs, []string{"remote_queue.type", newBus.Type}) } - if oldPullBus.SQS.Endpoint != newPullBus.SQS.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", newPullBus.Type), newPullBus.SQS.Endpoint}) + if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) } - if oldPullBus.SQS.LargeMessageStoreEndpoint != newPullBus.SQS.LargeMessageStoreEndpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newPullBus.Type), newPullBus.SQS.LargeMessageStoreEndpoint}) + if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) } - if oldPullBus.SQS.LargeMessageStorePath != newPullBus.SQS.LargeMessageStorePath || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newPullBus.Type), newPullBus.SQS.LargeMessageStorePath}) + if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) } - if oldPullBus.SQS.DeadLetterQueueName != newPullBus.SQS.DeadLetterQueueName || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newPullBus.Type), newPullBus.SQS.DeadLetterQueueName}) + if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) } - if oldPullBus.SQS.MaxRetriesPerPart != newPullBus.SQS.MaxRetriesPerPart || oldPullBus.SQS.RetryPolicy != newPullBus.SQS.RetryPolicy || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", newPullBus.SQS.RetryPolicy, newPullBus.Type), fmt.Sprintf("%d", newPullBus.SQS.MaxRetriesPerPart)}) - } - if oldPullBus.SQS.RetryPolicy != newPullBus.SQS.RetryPolicy || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.retry_policy", newPullBus.Type), newPullBus.SQS.RetryPolicy}) + if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) } + inputs = append(inputs, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + ) outputs = inputs - if oldPullBus.SQS.SendInterval != newPullBus.SQS.SendInterval || afterDelete { - outputs = append(outputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", newPullBus.Type), newPullBus.SQS.SendInterval}) - } - if oldPullBus.SQS.EncodingFormat != newPullBus.SQS.EncodingFormat || afterDelete { - outputs = append(outputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", newPullBus.Type), newPullBus.SQS.EncodingFormat}) - } + outputs = append(outputs, + []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + ) return inputs, outputs } diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 6592a7a5c..446a23dcc 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -35,6 +35,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -1342,11 +1343,38 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", }, + Spec: enterpriseApi.IndexerClusterSpec{ + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + }, + }, } ctx := context.TODO() @@ -2019,73 +2047,97 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedPullBusAndPipelineFieldsIndexer(t *testing.T) { +func TestGetChangedBusFieldsForIndexer(t *testing.T) { + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + newCR := &enterpriseApi.IndexerCluster{ Spec: enterpriseApi.IndexerClusterSpec{ - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - }, - PullBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - EncodingFormat: "s2s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, }, }, } - pullBusChangedFieldsInputs, pullBusChangedFieldsOutputs, pipelineChangedFields := getChangedPullBusAndPipelineFieldsIndexer(&newCR.Status, newCR, false) - assert.Equal(t, 8, len(pullBusChangedFieldsInputs)) + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, false) + assert.Equal(t, 8, len(busChangedFieldsInputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", newCR.Spec.PullBus.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", newCR.Spec.PullBus.SQS.RetryPolicy, newCR.Spec.PullBus.Type), fmt.Sprintf("%d", newCR.Spec.PullBus.SQS.MaxRetriesPerPart)}, - {fmt.Sprintf("remote_queue.%s.retry_policy", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.RetryPolicy}, - }, pullBusChangedFieldsInputs) - - assert.Equal(t, 10, len(pullBusChangedFieldsOutputs)) + {"remote_queue.type", busConfig.Spec.Type}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + }, busChangedFieldsInputs) + + assert.Equal(t, 10, len(busChangedFieldsOutputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", newCR.Spec.PullBus.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", newCR.Spec.PullBus.SQS.RetryPolicy, newCR.Spec.PullBus.Type), fmt.Sprintf("%d", newCR.Spec.PullBus.SQS.MaxRetriesPerPart)}, - {fmt.Sprintf("remote_queue.%s.retry_policy", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.RetryPolicy}, - {fmt.Sprintf("remote_queue.%s.send_interval", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.SendInterval}, - {fmt.Sprintf("remote_queue.%s.encoding_format", newCR.Spec.PullBus.Type), "s2s"}, - }, pullBusChangedFieldsOutputs) + {"remote_queue.type", busConfig.Spec.Type}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + }, busChangedFieldsOutputs) assert.Equal(t, 5, len(pipelineChangedFields)) assert.Equal(t, [][]string{ - {"pipeline:remotequeueruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueRuleset)}, - {"pipeline:ruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RuleSet)}, - {"pipeline:remotequeuetyping", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueTyping)}, - {"pipeline:remotequeueoutput", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueOutput)}, - {"pipeline:typing", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.Typing)}, + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, }, pipelineChangedFields) } -func TestHandlePullBusOrPipelineConfigChange(t *testing.T) { +func TestHandlePullBusChange(t *testing.T) { // Object definitions + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + newCR := &enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", @@ -2095,26 +2147,8 @@ func TestHandlePullBusOrPipelineConfigChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - }, - PullBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, }, }, Status: enterpriseApi.IndexerClusterStatus{ @@ -2175,13 +2209,15 @@ func TestHandlePullBusOrPipelineConfigChange(t *testing.T) { // Mock pods c := spltest.NewMockClient() ctx := context.TODO() + c.Create(ctx, &busConfig) + c.Create(ctx, newCR) c.Create(ctx, pod0) c.Create(ctx, pod1) c.Create(ctx, pod2) // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullBusOrPipelineConfigChange(ctx, newCR, c) + err := mgr.handlePullBusChange(ctx, newCR, busConfig, c) assert.NotNil(t, err) // Mock secret @@ -2192,50 +2228,50 @@ func TestHandlePullBusOrPipelineConfigChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusOrPipelineConfigChange(ctx, newCR, c) + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", newCR.Spec.PullBus.SQS.RetryPolicy, newCR.Spec.PullBus.Type), fmt.Sprintf("%d", newCR.Spec.PullBus.SQS.MaxRetriesPerPart)}, - {fmt.Sprintf("remote_queue.%s.retry_policy", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.RetryPolicy}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, } propertyKVListOutputs := propertyKVList - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", newCR.Spec.PullBus.Type), "s2s"}) - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", newCR.Spec.PullBus.Type), newCR.Spec.PullBus.SQS.SendInterval}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusOrPipelineConfigChange(ctx, newCR, c) + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusOrPipelineConfigChange(ctx, newCR, c) + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) assert.NotNil(t, err) // default-mode.conf propertyKVList = [][]string{ - {"pipeline:remotequeueruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueRuleset)}, - {"pipeline:ruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RuleSet)}, - {"pipeline:remotequeuetyping", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueTyping)}, - {"pipeline:remotequeueoutput", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueOutput)}, - {"pipeline:typing", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.Typing)}, + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, } for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { @@ -2254,7 +2290,7 @@ func TestHandlePullBusOrPipelineConfigChange(t *testing.T) { mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusOrPipelineConfigChange(ctx, newCR, c) + err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) assert.Nil(t, err) } @@ -2272,7 +2308,7 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -2280,18 +2316,18 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", cr.Spec.PullBus.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", cr.Spec.PullBus.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { - newSplunkClientForPullBusPipeline = func(uri, user, pass string) *splclient.SplunkClient { + newSplunkClientForBusPipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -2300,14 +2336,45 @@ func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inde } } return &indexerClusterPodManager{ - newSplunkClient: newSplunkClientForPullBusPipeline, + newSplunkClient: newSplunkClientForBusPipeline, } } -func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { +func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + // Object definitions + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, &busConfig) + cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, ObjectMeta: metav1.ObjectMeta{ @@ -2318,6 +2385,7 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { Phase: enterpriseApi.PhaseReady, }, } + c.Create(ctx, cm) cr := &enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"}, @@ -2327,26 +2395,9 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { }, Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - }, - PullBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + Namespace: busConfig.Namespace, }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ @@ -2359,6 +2410,7 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { Phase: enterpriseApi.PhaseReady, }, } + c.Create(ctx, cr) secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -2369,6 +2421,7 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { "password": []byte("dummy"), }, } + c.Create(ctx, secret) cmPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -2392,6 +2445,7 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { }, }, } + c.Create(ctx, cmPod) pod0 := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -2426,6 +2480,7 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { }, }, } + c.Create(ctx, pod0) replicas := int32(1) sts := &appsv1.StatefulSet{ @@ -2442,6 +2497,7 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { UpdatedReplicas: 1, }, } + c.Create(ctx, sts) svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -2449,17 +2505,7 @@ func TestApplyIndexerClusterManager_PullBusConfig_Success(t *testing.T) { Namespace: "test", }, } - - // Mock objects - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, secret) - c.Create(ctx, cmPod) - c.Create(ctx, pod0) - c.Create(ctx, sts) c.Create(ctx, svc) - c.Create(ctx, cm) - c.Create(ctx, cr) // outputs.conf mockHTTPClient := &spltest.MockHTTPClient{} @@ -2505,78 +2551,3 @@ func mustReq(method, url, body string) *http.Request { } return r } - -func TestValidateIndexerSpecificInputs(t *testing.T) { - cr := &enterpriseApi.IndexerCluster{ - Spec: enterpriseApi.IndexerClusterSpec{ - PullBus: enterpriseApi.PushBusSpec{ - Type: "othertype", - }, - }, - } - - err := validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "only sqs_smartbus type is supported in pullBus type", err.Error()) - - cr.Spec.PullBus.Type = "sqs_smartbus" - - err = validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pullBus sqs cannot be empty", err.Error()) - - cr.Spec.PullBus.SQS.AuthRegion = "us-west-2" - - err = validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pullBus sqs queueName, deadLetterQueueName cannot be empty", err.Error()) - - cr.Spec.PullBus.SQS.QueueName = "test-queue" - cr.Spec.PullBus.SQS.DeadLetterQueueName = "dlq-test" - cr.Spec.PullBus.SQS.AuthRegion = "" - - err = validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pullBus sqs authRegion cannot be empty", err.Error()) - - cr.Spec.PullBus.SQS.AuthRegion = "us-west-2" - - err = validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pullBus sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) - - cr.Spec.PullBus.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" - cr.Spec.PullBus.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" - - err = validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pullBus sqs largeMessageStorePath must start with s3://", err.Error()) - - cr.Spec.PullBus.SQS.LargeMessageStorePath = "ingestion/smartbus-test" - - err = validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pullBus sqs largeMessageStorePath must start with s3://", err.Error()) - - cr.Spec.PullBus.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" - cr.Spec.PullBus.SQS.MaxRetriesPerPart = -1 - cr.Spec.PullBus.SQS.RetryPolicy = "" - cr.Spec.PullBus.SQS.SendInterval = "" - cr.Spec.PullBus.SQS.EncodingFormat = "" - - err = validateIndexerSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pipelineConfig spec cannot be empty", err.Error()) - - cr.Spec.PipelineConfig = enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: true, - RemoteQueueTyping: true, - RemoteQueueOutput: true, - Typing: true, - RuleSet: true, - IndexerPipe: true, - } - - err = validateIndexerSpecificInputs(cr) - assert.Nil(t, err) -} diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 826137bc7..ee64bab22 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -18,10 +18,8 @@ package enterprise import ( "context" - "errors" "fmt" "reflect" - "strings" "time" "github.com/go-logr/logr" @@ -60,12 +58,6 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr cr.Kind = "IngestorCluster" - // Initialize phase - cr.Status.Phase = enterpriseApi.PhaseError - - // Update the CR Status - defer updateCRStatus(ctx, client, cr, &err) - // Validate and updates defaults for CR err = validateIngestorClusterSpec(ctx, client, cr) if err != nil { @@ -74,6 +66,15 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + } cr.Status.Replicas = cr.Spec.Replicas // If needed, migrate the app framework status @@ -97,7 +98,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-ingestor", cr.GetName()) // Create or update general config resources - _, err = ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIngestor) + namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIngestor) if err != nil { scopedLog.Error(err, "create or update general config failed", "error", err.Error()) eventPublisher.Warning(ctx, "ApplySplunkConfig", fmt.Sprintf("create or update general config failed with error %s", err.Error())) @@ -209,23 +210,34 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // No need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIngestor) - if err != nil { - scopedLog.Error(err, "create or update general config failed", "error", err.Error()) - eventPublisher.Warning(ctx, "ApplySplunkConfig", fmt.Sprintf("create or update general config failed with error %s", err.Error())) - return result, err + // Bus config + busConfig := enterpriseApi.BusConfiguration{} + if cr.Spec.BusConfigurationRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.BusConfigurationRef.Namespace != "" { + ns = cr.Spec.BusConfigurationRef.Namespace + } + err = client.Get(ctx, types.NamespacedName{ + Name: cr.Spec.BusConfigurationRef.Name, + Namespace: ns, + }, &busConfig) + if err != nil { + return result, err + } } - mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + // If bus config is updated + if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusOrPipelineConfigChange(ctx, cr, client) - if err != nil { - scopedLog.Error(err, "Failed to update conf file for PushBus/Pipeline config change after pod creation") - return result, err - } + err = mgr.handlePushBusChange(ctx, cr, busConfig, client) + if err != nil { + scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + return result, err + } - cr.Status.PushBus = cr.Spec.PushBus - cr.Status.PipelineConfig = cr.Spec.PipelineConfig + cr.Status.BusConfiguration = busConfig.Spec + } // Upgrade fron automated MC to MC CRD namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetNamespace())} @@ -281,89 +293,9 @@ func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClie } } - err := validateIngestorSpecificInputs(cr) - if err != nil { - return err - } - return validateCommonSplunkSpec(ctx, c, &cr.Spec.CommonSplunkSpec, cr) } -func validateIngestorSpecificInputs(cr *enterpriseApi.IngestorCluster) error { - if cr.Spec.PushBus == (enterpriseApi.PushBusSpec{}) { - return errors.New("pushBus cannot be empty") - } - - // sqs_smartbus type is supported for now - if cr.Spec.PushBus.Type != "sqs_smartbus" { - return errors.New("only sqs_smartbus type is supported in pushBus type") - } - - if cr.Spec.PushBus.SQS == (enterpriseApi.SQSSpec{}) { - return errors.New("pushBus sqs cannot be empty") - } - - // Cannot be empty fields check - cannotBeEmptyFields := []string{} - if cr.Spec.PushBus.SQS.QueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") - } - - if cr.Spec.PushBus.SQS.AuthRegion == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") - } - - if cr.Spec.PushBus.SQS.DeadLetterQueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") - } - - if len(cannotBeEmptyFields) > 0 { - return errors.New("pushBus sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") - } - - // Have to start with https:// or s3:// checks - haveToStartWithHttps := []string{} - if !strings.HasPrefix(cr.Spec.PushBus.SQS.Endpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") - } - - if !strings.HasPrefix(cr.Spec.PushBus.SQS.LargeMessageStoreEndpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") - } - - if len(haveToStartWithHttps) > 0 { - return errors.New("pushBus sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") - } - - if !strings.HasPrefix(cr.Spec.PushBus.SQS.LargeMessageStorePath, "s3://") { - return errors.New("pushBus sqs largeMessageStorePath must start with s3://") - } - - // Assign default values if not provided - if cr.Spec.PushBus.SQS.MaxRetriesPerPart < 0 { - cr.Spec.PushBus.SQS.MaxRetriesPerPart = 4 - } - - if cr.Spec.PushBus.SQS.RetryPolicy == "" { - cr.Spec.PushBus.SQS.RetryPolicy = "max_count" - } - - if cr.Spec.PushBus.SQS.SendInterval == "" { - cr.Spec.PushBus.SQS.SendInterval = "5s" - } - - if cr.Spec.PushBus.SQS.EncodingFormat == "" { - cr.Spec.PushBus.SQS.EncodingFormat = "s2s" - } - - // PipelineConfig cannot be empty - if cr.Spec.PipelineConfig == (enterpriseApi.PipelineConfigSpec{}) { - return errors.New("pipelineConfig spec cannot be empty") - } - - return nil -} - // getIngestorStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise ingestors func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) (*appsv1.StatefulSet, error) { ss, err := getSplunkStatefulSet(ctx, client, cr, &cr.Spec.CommonSplunkSpec, SplunkIngestor, cr.Spec.Replicas, []corev1.EnvVar{}) @@ -377,10 +309,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only PushBus or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushBusOrPipelineConfigChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, k8s client.Client) error { +// Checks if only Bus or Pipeline config changed, and updates the conf file if so +func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { // Only update config for pods that exist - readyReplicas := newCR.Status.ReadyReplicas + readyReplicas := newCR.Status.Replicas // List all pods for this IngestorCluster StatefulSet var updateErr error @@ -394,19 +326,18 @@ func (mgr *ingestorClusterPodManager) handlePushBusOrPipelineConfigChange(ctx co splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (newCR.Spec.PushBus.SQS.QueueName != "" && newCR.Status.PushBus.SQS.QueueName != "" && newCR.Spec.PushBus.SQS.QueueName != newCR.Status.PushBus.SQS.QueueName) || - (newCR.Spec.PushBus.Type != "" && newCR.Status.PushBus.Type != "" && newCR.Spec.PushBus.Type != newCR.Status.PushBus.Type) || - (newCR.Spec.PushBus.SQS.RetryPolicy != "" && newCR.Status.PushBus.SQS.RetryPolicy != "" && newCR.Spec.PushBus.SQS.RetryPolicy != newCR.Status.PushBus.SQS.RetryPolicy) { - if err := splunkClient.DeleteConfFileProperty("outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.PushBus.SQS.QueueName)); err != nil { + if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || + (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { + if err := splunkClient.DeleteConfFileProperty("outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { updateErr = err } afterDelete = true } - pushBusChangedFields, pipelineChangedFields := getChangedPushBusAndPipelineFields(&newCR.Status, newCR, afterDelete) + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, afterDelete) - for _, pbVal := range pushBusChangedFields { - if err := splunkClient.UpdateConfFile("outputs", fmt.Sprintf("remote_queue:%s", newCR.Spec.PushBus.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range busChangedFields { + if err := splunkClient.UpdateConfFile("outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } @@ -422,18 +353,16 @@ func (mgr *ingestorClusterPodManager) handlePushBusOrPipelineConfigChange(ctx co return updateErr } -// Returns the names of PushBus and PipelineConfig fields that changed between oldCR and newCR. -func getChangedPushBusAndPipelineFields(oldCrStatus *enterpriseApi.IngestorClusterStatus, newCR *enterpriseApi.IngestorCluster, afterDelete bool) (pushBusChangedFields, pipelineChangedFields [][]string) { - oldPB := oldCrStatus.PushBus - newPB := newCR.Spec.PushBus - oldPC := oldCrStatus.PipelineConfig - newPC := newCR.Spec.PipelineConfig +// Returns the names of Bus and PipelineConfig fields that changed between oldCR and newCR. +func getChangedBusFieldsForIngestor(busConfig *enterpriseApi.BusConfiguration, busConfigIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { + oldPB := &busConfigIngestorStatus.Status.BusConfiguration + newPB := &busConfig.Spec - // Push changed PushBus fields - pushBusChangedFields = pushBusChanged(oldPB, newPB, afterDelete) + // Push changed bus fields + busChangedFields = pushBusChanged(oldPB, newPB, afterDelete) // Always changed pipeline fields - pipelineChangedFields = pipelineConfigChanged(oldPC, newPC, oldCrStatus.PushBus.SQS.QueueName != "", false) + pipelineChangedFields = pipelineConfig(false) return } @@ -455,59 +384,45 @@ var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.Inges } } -func pipelineConfigChanged(oldPipelineConfig, newPipelineConfig enterpriseApi.PipelineConfigSpec, pushBusStatusExists bool, isIndexer bool) (output [][]string) { - // || !pushBusStatusExists - added to make it work for initial creation because if any field is false (which is the default of bool for Go), then it wouldn't be added to output - if oldPipelineConfig.RemoteQueueRuleset != newPipelineConfig.RemoteQueueRuleset || !pushBusStatusExists { - output = append(output, []string{"pipeline:remotequeueruleset", "disabled", fmt.Sprintf("%t", newPipelineConfig.RemoteQueueRuleset)}) - } - if oldPipelineConfig.RuleSet != newPipelineConfig.RuleSet || !pushBusStatusExists { - output = append(output, []string{"pipeline:ruleset", "disabled", fmt.Sprintf("%t", newPipelineConfig.RuleSet)}) - } - if oldPipelineConfig.RemoteQueueTyping != newPipelineConfig.RemoteQueueTyping || !pushBusStatusExists { - output = append(output, []string{"pipeline:remotequeuetyping", "disabled", fmt.Sprintf("%t", newPipelineConfig.RemoteQueueTyping)}) - } - if oldPipelineConfig.RemoteQueueOutput != newPipelineConfig.RemoteQueueOutput || !pushBusStatusExists { - output = append(output, []string{"pipeline:remotequeueoutput", "disabled", fmt.Sprintf("%t", newPipelineConfig.RemoteQueueOutput)}) - } - if oldPipelineConfig.Typing != newPipelineConfig.Typing || !pushBusStatusExists { - output = append(output, []string{"pipeline:typing", "disabled", fmt.Sprintf("%t", newPipelineConfig.Typing)}) - } - if (oldPipelineConfig.IndexerPipe != newPipelineConfig.IndexerPipe || !pushBusStatusExists) && !isIndexer { - output = append(output, []string{"pipeline:indexerPipe", "disabled", fmt.Sprintf("%t", newPipelineConfig.IndexerPipe)}) +func pipelineConfig(isIndexer bool) (output [][]string) { + output = append(output, + []string{"pipeline:remotequeueruleset", "disabled", "false"}, + []string{"pipeline:ruleset", "disabled", "true"}, + []string{"pipeline:remotequeuetyping", "disabled", "false"}, + []string{"pipeline:remotequeueoutput", "disabled", "false"}, + []string{"pipeline:typing", "disabled", "true"}, + ) + if !isIndexer { + output = append(output, []string{"pipeline:indexerPipe", "disabled", "true"}) } return output } -func pushBusChanged(oldPushBus, newPushBus enterpriseApi.PushBusSpec, afterDelete bool) (output [][]string) { - if oldPushBus.Type != newPushBus.Type || afterDelete { - output = append(output, []string{"remote_queue.type", newPushBus.Type}) - } - if oldPushBus.SQS.EncodingFormat != newPushBus.SQS.EncodingFormat || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.encoding_format", newPushBus.Type), newPushBus.SQS.EncodingFormat}) - } - if oldPushBus.SQS.AuthRegion != newPushBus.SQS.AuthRegion || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", newPushBus.Type), newPushBus.SQS.AuthRegion}) +func pushBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (output [][]string) { + if oldBus.Type != newBus.Type || afterDelete { + output = append(output, []string{"remote_queue.type", newBus.Type}) } - if oldPushBus.SQS.Endpoint != newPushBus.SQS.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", newPushBus.Type), newPushBus.SQS.Endpoint}) + if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) } - if oldPushBus.SQS.LargeMessageStoreEndpoint != newPushBus.SQS.LargeMessageStoreEndpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newPushBus.Type), newPushBus.SQS.LargeMessageStoreEndpoint}) + if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) } - if oldPushBus.SQS.LargeMessageStorePath != newPushBus.SQS.LargeMessageStorePath || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newPushBus.Type), newPushBus.SQS.LargeMessageStorePath}) + if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) } - if oldPushBus.SQS.DeadLetterQueueName != newPushBus.SQS.DeadLetterQueueName || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newPushBus.Type), newPushBus.SQS.DeadLetterQueueName}) + if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) } - if oldPushBus.SQS.MaxRetriesPerPart != newPushBus.SQS.MaxRetriesPerPart || oldPushBus.SQS.RetryPolicy != newPushBus.SQS.RetryPolicy || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", newPushBus.SQS.RetryPolicy, newPushBus.Type), fmt.Sprintf("%d", newPushBus.SQS.MaxRetriesPerPart)}) - } - if oldPushBus.SQS.RetryPolicy != newPushBus.SQS.RetryPolicy || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.retry_policy", newPushBus.Type), newPushBus.SQS.RetryPolicy}) - } - if oldPushBus.SQS.SendInterval != newPushBus.SQS.SendInterval || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.send_interval", newPushBus.Type), newPushBus.SQS.SendInterval}) + if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) } + + output = append(output, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}) + return output } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 99bd7a98c..bee3df4d6 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -31,7 +31,9 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { @@ -53,11 +55,42 @@ func TestApplyIngestorCluster(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() - c := spltest.NewMockClient() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions + busConfig := &enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + Namespace: "test", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, busConfig) + cr := &enterpriseApi.IngestorCluster{ - TypeMeta: metav1.TypeMeta{Kind: "IngestorCluster"}, + TypeMeta: metav1.TypeMeta{ + Kind: "IngestorCluster", + APIVersion: "enterprise.splunk.com/v4", + }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "test", @@ -67,27 +100,9 @@ func TestApplyIngestorCluster(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, - }, - PushBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, + Namespace: busConfig.Namespace, }, }, } @@ -246,28 +261,28 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", cr.Spec.PushBus.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", cr.Spec.PushBus.Type), cr.Spec.PushBus.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", cr.Spec.PushBus.Type), cr.Spec.PushBus.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", cr.Spec.PushBus.Type), cr.Spec.PushBus.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", cr.Spec.PushBus.Type), cr.Spec.PushBus.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", cr.Spec.PushBus.Type), cr.Spec.PushBus.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", cr.Spec.PushBus.SQS.RetryPolicy, cr.Spec.PushBus.Type), fmt.Sprintf("%d", cr.Spec.PushBus.SQS.MaxRetriesPerPart)}, - {fmt.Sprintf("remote_queue.%s.retry_policy", cr.Spec.PushBus.Type), cr.Spec.PushBus.SQS.RetryPolicy}, - {fmt.Sprintf("remote_queue.%s.send_interval", cr.Spec.PushBus.Type), cr.Spec.PushBus.SQS.SendInterval}, + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, busConfig, cr.Status.ReadyReplicas, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ - {"pipeline:remotequeueruleset", "disabled", fmt.Sprintf("%t", cr.Spec.PipelineConfig.RemoteQueueRuleset)}, - {"pipeline:ruleset", "disabled", fmt.Sprintf("%t", cr.Spec.PipelineConfig.RuleSet)}, - {"pipeline:remotequeuetyping", "disabled", fmt.Sprintf("%t", cr.Spec.PipelineConfig.RemoteQueueTyping)}, - {"pipeline:remotequeueoutput", "disabled", fmt.Sprintf("%t", cr.Spec.PipelineConfig.RemoteQueueOutput)}, - {"pipeline:typing", "disabled", fmt.Sprintf("%t", cr.Spec.PipelineConfig.Typing)}, - {"pipeline:indexerPipe", "disabled", fmt.Sprintf("%t", cr.Spec.PipelineConfig.IndexerPipe)}, + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + {"pipeline:indexerPipe", "disabled", "true"}, } for i := 0; i < int(cr.Status.ReadyReplicas); i++ { @@ -295,6 +310,27 @@ func TestGetIngestorStatefulSet(t *testing.T) { // Object definitions os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + cr := enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -305,27 +341,8 @@ func TestGetIngestorStatefulSet(t *testing.T) { }, Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 2, - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, - }, - PushBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, }, }, } @@ -378,74 +395,87 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedPushBusAndPipelineFieldsIngestor(t *testing.T) { - newCR := &enterpriseApi.IngestorCluster{ - Spec: enterpriseApi.IngestorClusterSpec{ - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, - }, - PushBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - EncodingFormat: "s2s", - }, +func TestGetChangedBusFieldsForIngestor(t *testing.T) { + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", }, }, - Status: enterpriseApi.IngestorClusterStatus{ - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: true, - RuleSet: false, - RemoteQueueTyping: true, - RemoteQueueOutput: true, - Typing: false, - IndexerPipe: false, + } + + newCR := &enterpriseApi.IngestorCluster{ + Spec: enterpriseApi.IngestorClusterSpec{ + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, }, }, + Status: enterpriseApi.IngestorClusterStatus{}, } - pushBusChangedFields, pipelineChangedFields := getChangedPushBusAndPipelineFields(&newCR.Status, newCR, false) + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, false) - assert.Equal(t, 10, len(pushBusChangedFields)) + assert.Equal(t, 10, len(busChangedFields)) assert.Equal(t, [][]string{ - {"remote_queue.type", newCR.Spec.PushBus.Type}, - {fmt.Sprintf("remote_queue.%s.encoding_format", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.EncodingFormat}, - {fmt.Sprintf("remote_queue.%s.auth_region", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", newCR.Spec.PushBus.SQS.RetryPolicy, newCR.Spec.PushBus.Type), fmt.Sprintf("%d", newCR.Spec.PushBus.SQS.MaxRetriesPerPart)}, - {fmt.Sprintf("remote_queue.%s.retry_policy", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.RetryPolicy}, - {fmt.Sprintf("remote_queue.%s.send_interval", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.SendInterval}, - }, pushBusChangedFields) + {"remote_queue.type", busConfig.Spec.Type}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + }, busChangedFields) assert.Equal(t, 6, len(pipelineChangedFields)) assert.Equal(t, [][]string{ - {"pipeline:remotequeueruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueRuleset)}, - {"pipeline:ruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RuleSet)}, - {"pipeline:remotequeuetyping", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueTyping)}, - {"pipeline:remotequeueoutput", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueOutput)}, - {"pipeline:typing", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.Typing)}, - {"pipeline:indexerPipe", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.IndexerPipe)}, + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + {"pipeline:indexerPipe", "disabled", "true"}, }, pipelineChangedFields) } -func TestHandlePushBusOrPipelineConfigChange(t *testing.T) { +func TestHandlePushBusChange(t *testing.T) { // Object definitions + busConfig := enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busConfig", + }, + Spec: enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://ingestion/smartbus-test", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + DeadLetterQueueName: "sqs-dlq-test", + }, + }, + } + newCR := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -455,30 +485,12 @@ func TestHandlePushBusOrPipelineConfigChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IngestorClusterSpec{ - PipelineConfig: enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, - }, - PushBus: enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - }, + BusConfigurationRef: corev1.ObjectReference{ + Name: busConfig.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{ + Replicas: 3, ReadyReplicas: 3, }, } @@ -543,7 +555,7 @@ func TestHandlePushBusOrPipelineConfigChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushBusOrPipelineConfigChange(ctx, newCR, c) + err := mgr.handlePushBusChange(ctx, newCR, busConfig, c) assert.NotNil(t, err) // Mock secret @@ -554,39 +566,39 @@ func TestHandlePushBusOrPipelineConfigChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusOrPipelineConfigChange(ctx, newCR, c) + err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", newCR.Spec.PushBus.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.%s.max_retries_per_part", newCR.Spec.PushBus.SQS.RetryPolicy, newCR.Spec.PushBus.Type), fmt.Sprintf("%d", newCR.Spec.PushBus.SQS.MaxRetriesPerPart)}, - {fmt.Sprintf("remote_queue.%s.retry_policy", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.RetryPolicy}, - {fmt.Sprintf("remote_queue.%s.send_interval", newCR.Spec.PushBus.Type), newCR.Spec.PushBus.SQS.SendInterval}, + {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, + {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", busConfig.Spec.Type), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusOrPipelineConfigChange(ctx, newCR, c) + err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) assert.NotNil(t, err) // default-mode.conf propertyKVList = [][]string{ - {"pipeline:remotequeueruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueRuleset)}, - {"pipeline:ruleset", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RuleSet)}, - {"pipeline:remotequeuetyping", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueTyping)}, - {"pipeline:remotequeueoutput", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.RemoteQueueOutput)}, - {"pipeline:typing", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.Typing)}, - {"pipeline:indexerPipe", "disabled", fmt.Sprintf("%t", newCR.Spec.PipelineConfig.IndexerPipe)}, + {"pipeline:remotequeueruleset", "disabled", "false"}, + {"pipeline:ruleset", "disabled", "true"}, + {"pipeline:remotequeuetyping", "disabled", "false"}, + {"pipeline:remotequeueoutput", "disabled", "false"}, + {"pipeline:typing", "disabled", "true"}, + {"pipeline:indexerPipe", "disabled", "true"}, } for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { @@ -605,11 +617,11 @@ func TestHandlePushBusOrPipelineConfigChange(t *testing.T) { mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusOrPipelineConfigChange(ctx, newCR, c) + err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, busConfig *enterpriseApi.BusConfiguration, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -617,11 +629,11 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", cr.Spec.PushBus.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", cr.Spec.PushBus.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } @@ -640,78 +652,3 @@ func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inge newSplunkClient: newSplunkClientForPushBusPipeline, } } - -func TestValidateIngestorSpecificInputs(t *testing.T) { - cr := &enterpriseApi.IngestorCluster{ - Spec: enterpriseApi.IngestorClusterSpec{ - PushBus: enterpriseApi.PushBusSpec{ - Type: "othertype", - }, - }, - } - - err := validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "only sqs_smartbus type is supported in pushBus type", err.Error()) - - cr.Spec.PushBus.Type = "sqs_smartbus" - - err = validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pushBus sqs cannot be empty", err.Error()) - - cr.Spec.PushBus.SQS.AuthRegion = "us-west-2" - - err = validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pushBus sqs queueName, deadLetterQueueName cannot be empty", err.Error()) - - cr.Spec.PushBus.SQS.QueueName = "test-queue" - cr.Spec.PushBus.SQS.DeadLetterQueueName = "dlq-test" - cr.Spec.PushBus.SQS.AuthRegion = "" - - err = validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pushBus sqs authRegion cannot be empty", err.Error()) - - cr.Spec.PushBus.SQS.AuthRegion = "us-west-2" - - err = validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pushBus sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) - - cr.Spec.PushBus.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" - cr.Spec.PushBus.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" - - err = validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pushBus sqs largeMessageStorePath must start with s3://", err.Error()) - - cr.Spec.PushBus.SQS.LargeMessageStorePath = "ingestion/smartbus-test" - - err = validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pushBus sqs largeMessageStorePath must start with s3://", err.Error()) - - cr.Spec.PushBus.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" - cr.Spec.PushBus.SQS.MaxRetriesPerPart = -1 - cr.Spec.PushBus.SQS.RetryPolicy = "" - cr.Spec.PushBus.SQS.SendInterval = "" - cr.Spec.PushBus.SQS.EncodingFormat = "" - - err = validateIngestorSpecificInputs(cr) - assert.NotNil(t, err) - assert.Equal(t, "pipelineConfig spec cannot be empty", err.Error()) - - cr.Spec.PipelineConfig = enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: true, - RemoteQueueTyping: true, - RemoteQueueOutput: true, - Typing: true, - RuleSet: true, - IndexerPipe: true, - } - - err = validateIngestorSpecificInputs(cr) - assert.Nil(t, err) -} diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 98b9a08d3..6ebd3df34 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -63,6 +63,9 @@ const ( // SplunkIngestor may be a standalone or clustered ingestion peer SplunkIngestor InstanceType = "ingestor" + // SplunkBusConfiguration is the bus configuration instance + SplunkBusConfiguration InstanceType = "busconfiguration" + // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -291,6 +294,8 @@ func KindToInstanceString(kind string) string { return SplunkIndexer.ToString() case "IngestorCluster": return SplunkIngestor.ToString() + case "BusConfiguration": + return SplunkBusConfiguration.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 5f5fd765a..3df285264 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2288,6 +2288,20 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) return latestIngCR, nil + case "BusConfiguration": + latestBusCR := &enterpriseApi.BusConfiguration{} + err = client.Get(ctx, namespacedName, latestBusCR) + if err != nil { + return nil, err + } + + origCR.(*enterpriseApi.BusConfiguration).Status.Message = "" + if (crError != nil) && ((*crError) != nil) { + origCR.(*enterpriseApi.BusConfiguration).Status.Message = (*crError).Error() + } + origCR.(*enterpriseApi.BusConfiguration).Status.DeepCopyInto(&latestBusCR.Status) + return latestBusCR, nil + case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} err = client.Get(ctx, namespacedName, latestLmCR) diff --git a/pkg/splunk/test/controller.go b/pkg/splunk/test/controller.go index b4cd59507..b1adff420 100644 --- a/pkg/splunk/test/controller.go +++ b/pkg/splunk/test/controller.go @@ -359,63 +359,63 @@ func (c MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Ob // List returns mock client's Err field func (c MockClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { - // Check for induced errors - if value, ok := c.InduceErrorKind[splcommon.MockClientInduceErrorList]; ok && value != nil { - return value - } - c.Calls["List"] = append(c.Calls["List"], MockFuncCall{ - CTX: ctx, - ListOpts: opts, - ObjList: obj, - }) - - // Only handle PodList for this test - podList, ok := obj.(*corev1.PodList) - if !ok { - // fallback to old logic - listObj := c.ListObj - if listObj != nil { - srcObj := listObj - copyMockObjectList(&obj, &srcObj) - return nil - } - return c.NotFoundError - } - - // Gather label selector and namespace from opts - var ns string - var matchLabels map[string]string - for _, opt := range opts { - switch v := opt.(type) { - case client.InNamespace: - ns = string(v) - case client.MatchingLabels: - matchLabels = v - } - } - - // Filter pods in State - for _, v := range c.State { - pod, ok := v.(*corev1.Pod) - if !ok { - continue - } - if ns != "" && pod.Namespace != ns { - continue - } - matches := true - for k, val := range matchLabels { - if pod.Labels[k] != val { - matches = false - break - } - } - if matches { - podList.Items = append(podList.Items, *pod) - } - } - - return nil + // Check for induced errors + if value, ok := c.InduceErrorKind[splcommon.MockClientInduceErrorList]; ok && value != nil { + return value + } + c.Calls["List"] = append(c.Calls["List"], MockFuncCall{ + CTX: ctx, + ListOpts: opts, + ObjList: obj, + }) + + // Only handle PodList for this test + podList, ok := obj.(*corev1.PodList) + if !ok { + // fallback to old logic + listObj := c.ListObj + if listObj != nil { + srcObj := listObj + copyMockObjectList(&obj, &srcObj) + return nil + } + return c.NotFoundError + } + + // Gather label selector and namespace from opts + var ns string + var matchLabels map[string]string + for _, opt := range opts { + switch v := opt.(type) { + case client.InNamespace: + ns = string(v) + case client.MatchingLabels: + matchLabels = v + } + } + + // Filter pods in State + for _, v := range c.State { + pod, ok := v.(*corev1.Pod) + if !ok { + continue + } + if ns != "" && pod.Namespace != ns { + continue + } + matches := true + for k, val := range matchLabels { + if pod.Labels[k] != val { + matches = false + break + } + } + if matches { + podList.Items = append(podList.Items, *pod) + } + } + + return nil } // Create returns mock client's Err field diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go index 8a7d267bc..ba0162ffa 100644 --- a/test/appframework_aws/c3/appframework_aws_test.go +++ b/test/appframework_aws/c3/appframework_aws_test.go @@ -3182,7 +3182,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go index 46cfc94b6..afc7abae6 100644 --- a/test/appframework_aws/c3/manager_appframework_test.go +++ b/test/appframework_aws/c3/manager_appframework_test.go @@ -355,7 +355,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3324,7 +3324,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go index 19c365517..0622700a4 100644 --- a/test/appframework_az/c3/appframework_azure_test.go +++ b/test/appframework_az/c3/appframework_azure_test.go @@ -993,7 +993,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go index 05b652d16..2a0af0b3b 100644 --- a/test/appframework_az/c3/manager_appframework_azure_test.go +++ b/test/appframework_az/c3/manager_appframework_azure_test.go @@ -991,7 +991,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go index 756f962c4..02ad17cfb 100644 --- a/test/appframework_gcp/c3/manager_appframework_test.go +++ b/test/appframework_gcp/c3/manager_appframework_test.go @@ -361,7 +361,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3327,7 +3327,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 106a0d221..c040802f8 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -11,15 +11,18 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package indexingestsep +package indingsep import ( + "os" + "path/filepath" "testing" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" "github.com/splunk/splunk-operator/test/testenv" ) @@ -34,7 +37,97 @@ const ( var ( testenvInstance *testenv.TestEnv - testSuiteName = "indexingestsep-" + testenv.RandomDNSName(3) + testSuiteName = "indingsep-" + testenv.RandomDNSName(3) + + bus = enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://test-bucket/smartbus-test", + DeadLetterQueueName: "test-dead-letter-queue", + }, + } + serviceAccountName = "index-ingest-sa" + + inputs = []string{ + "[remote_queue:test-queue]", + "remote_queue.type = sqs_smartbus", + "remote_queue.sqs_smartbus.auth_region = us-west-2", + "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.retry_policy = max_count", + "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} + outputs = append(inputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 5s") + defaultsAll = []string{ + "[pipeline:remotequeueruleset]\ndisabled = false", + "[pipeline:ruleset]\ndisabled = true", + "[pipeline:remotequeuetyping]\ndisabled = false", + "[pipeline:remotequeueoutput]\ndisabled = false", + "[pipeline:typing]\ndisabled = true", + } + defaultsIngest = append(defaultsAll, "[pipeline:indexerPipe]\ndisabled = true") + + awsEnvVars = []string{ + "AWS_REGION=us-west-2", + "AWS_DEFAULT_REGION=us-west-2", + "AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token", + "AWS_ROLE_ARN=arn:aws:iam::", + "AWS_STS_REGIONAL_ENDPOINTS=regional", + } + + updateBus = enterpriseApi.BusConfigurationSpec{ + Type: "sqs_smartbus", + SQS: enterpriseApi.SQSSpec{ + QueueName: "test-queue-updated", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", + LargeMessageStorePath: "s3://test-bucket-updated/smartbus-test", + DeadLetterQueueName: "test-dead-letter-queue-updated", + }, + } + + updatedInputs = []string{ + "[remote_queue:test-queue-updated]", + "remote_queue.type = sqs_smartbus", + "remote_queue.sqs_smartbus.auth_region = us-west-2", + "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue-updated", + "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", + "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket-updated/smartbus-test", + "remote_queue.sqs_smartbus.retry_policy = max", + "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} + updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") + updatedDefaultsAll = []string{ + "[pipeline:remotequeueruleset]\ndisabled = false", + "[pipeline:ruleset]\ndisabled = false", + "[pipeline:remotequeuetyping]\ndisabled = false", + "[pipeline:remotequeueoutput]\ndisabled = false", + "[pipeline:typing]\ndisabled = true", + } + updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") + + inputsShouldNotContain = []string{ + "[remote_queue:test-queue]", + "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.retry_policy = max_count", + "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} + outputsShouldNotContain = append(inputs, "remote_queue.sqs_smartbus.send_interval = 5s") + + testDataS3Bucket = os.Getenv("TEST_BUCKET") + testS3Bucket = os.Getenv("TEST_INDEXES_S3_BUCKET") + currDir, _ = os.Getwd() + downloadDirV1 = filepath.Join(currDir, "icappfwV1-"+testenv.RandomDNSName(4)) + appSourceVolumeName = "appframework-test-volume-" + testenv.RandomDNSName(3) + s3TestDir = "icappfw-" + testenv.RandomDNSName(4) + appListV1 = testenv.BasicApps + s3AppDirV1 = testenv.AppLocationV1 ) // TestBasic is the main entry point @@ -48,10 +141,20 @@ var _ = BeforeSuite(func() { var err error testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) Expect(err).ToNot(HaveOccurred()) + + appListV1 = testenv.BasicApps + appFileList := testenv.GetAppFileList(appListV1) + + // Download V1 Apps from S3 + err = testenv.DownloadFilesFromS3(testDataS3Bucket, s3AppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download V1 app files") }) var _ = AfterSuite(func() { if testenvInstance != nil { Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) } + + err := os.RemoveAll(downloadDirV1) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V1 app files") }) diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index bf6ce87a2..8bccddb47 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -11,37 +11,53 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -package indexingestsep +package indingsep import ( "context" "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/onsi/ginkgo/types" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" "github.com/splunk/splunk-operator/test/testenv" ) -var _ = Describe("indexingestsep test", func() { +var _ = Describe("indingsep test", func() { var testcaseEnvInst *testenv.TestCaseEnv var deployment *testenv.Deployment + var cmSpec enterpriseApi.ClusterManagerSpec + ctx := context.TODO() BeforeEach(func() { var err error - name := fmt.Sprintf("%s-%s", "master"+testenvInstance.GetName(), testenv.RandomDNSName(3)) + name := fmt.Sprintf("%s-%s", testenvInstance.GetName(), testenv.RandomDNSName(3)) testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name) Expect(err).To(Succeed(), "Unable to create testcaseenv") deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3)) Expect(err).To(Succeed(), "Unable to create deployment") + + cmSpec = enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: testcaseEnvInst.GetSplunkImage(), + }, + }, + } }) AfterEach(func() { @@ -58,65 +74,431 @@ var _ = Describe("indexingestsep test", func() { }) Context("Ingestor and Indexer deployment", func() { - It("indexingestsep, smoke, indexingestseparation: Splunk Operator can configure Ingestors and Indexers", func() { + It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers", func() { + // Create Service Account + testcaseEnvInst.Log.Info("Create Service Account") + testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + + // Deploy Ingestor Cluster + testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + // Deploy Cluster Manager testcaseEnvInst.Log.Info("Deploy Cluster Manager") - cmSpec := enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: testcaseEnvInst.GetSplunkImage(), + _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) + Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + + // Deploy Indexer Cluster + testcaseEnvInst.Log.Info("Deploy Indexer Cluster") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") + + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Cluster Manager is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Delete the Indexer Cluster + idxc := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", idxc) + Expect(err).To(Succeed(), "Unable to get Indexer Cluster instance", "Indexer Cluster Name", idxc) + err = deployment.DeleteCR(ctx, idxc) + Expect(err).To(Succeed(), "Unable to delete Indexer Cluster instance", "Indexer Cluster Name", idxc) + + // Delete the Ingestor Cluster + ingest := &enterpriseApi.IngestorCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) + Expect(err).To(Succeed(), "Unable to get Ingestor Cluster instance", "Ingestor Cluster Name", ingest) + err = deployment.DeleteCR(ctx, ingest) + Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) + + // Delete the Bus Configuration + busConfiguration := &enterpriseApi.BusConfiguration{} + err = deployment.GetInstance(ctx, "bus-config", busConfiguration) + Expect(err).To(Succeed(), "Unable to get Bus Configuration instance", "Bus Configuration Name", busConfiguration) + err = deployment.DeleteCR(ctx, busConfiguration) + Expect(err).To(Succeed(), "Unable to delete Bus Configuration", "Bus Configuration Name", busConfiguration) + }) + }) + + Context("Ingestor and Indexer deployment", func() { + It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers with additional configurations", func() { + // Create Service Account + testcaseEnvInst.Log.Info("Create Service Account") + testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + + // Upload apps to S3 + testcaseEnvInst.Log.Info("Upload apps to S3") + appFileList := testenv.GetAppFileList(appListV1) + _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") + + // Deploy Ingestor Cluster with additional configurations (similar to standalone app framework test) + appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, s3TestDir, 60) + appFrameworkSpec.MaxConcurrentAppDownloads = uint64(5) + ic := &enterpriseApi.IngestorCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: deployment.GetName() + "-ingest", + Namespace: testcaseEnvInst.GetName(), + }, + Spec: enterpriseApi.IngestorClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ServiceAccount: serviceAccountName, + LivenessInitialDelaySeconds: 600, + ReadinessInitialDelaySeconds: 50, + StartupProbe: &enterpriseApi.Probe{ + InitialDelaySeconds: 40, + TimeoutSeconds: 30, + PeriodSeconds: 30, + FailureThreshold: 12, + }, + LivenessProbe: &enterpriseApi.Probe{ + InitialDelaySeconds: 400, + TimeoutSeconds: 30, + PeriodSeconds: 30, + FailureThreshold: 12, + }, + ReadinessProbe: &enterpriseApi.Probe{ + InitialDelaySeconds: 20, + TimeoutSeconds: 30, + PeriodSeconds: 30, + FailureThreshold: 12, + }, + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: testcaseEnvInst.GetSplunkImage(), + }, }, + BusConfigurationRef: v1.ObjectReference{Name: bc.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } - _, err := deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) - Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + testcaseEnvInst.Log.Info("Deploy Ingestor Cluster with additional configurations") + _, err = deployment.DeployIngestorClusterWithAdditionalConfiguration(ctx, ic) + Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Verify Ingestor Cluster Pods have apps installed + testcaseEnvInst.Log.Info("Verify Ingestor Cluster Pods have apps installed") + ingestorPod := []string{fmt.Sprintf(testenv.IngestorPod, deployment.GetName()+"-ingest", 0)} + ingestorAppSourceInfo := testenv.AppSourceInfo{ + CrKind: ic.Kind, + CrName: ic.Name, + CrAppSourceName: appSourceName, + CrPod: ingestorPod, + CrAppVersion: "V1", + CrAppScope: enterpriseApi.ScopeLocal, + CrAppList: testenv.BasicApps, + CrAppFileList: testenv.GetAppFileList(testenv.BasicApps), + CrReplicas: 3, + } + allAppSourceInfo := []testenv.AppSourceInfo{ingestorAppSourceInfo} + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify probe configuration + testcaseEnvInst.Log.Info("Get config map for probes") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for probes", "ConfigMap", ConfigMapName) + testcaseEnvInst.Log.Info("Verify probe configurations on Ingestor pods") + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + }) + }) + + Context("Ingestor and Indexer deployment", func() { + It("indingsep, integration, indingsep: Splunk Operator can deploy Ingestors and Indexers with correct setup", func() { // Create Service Account - serviceAccountName := "index-ingest-sa" + testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + + // Deploy Ingestor Cluster + testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + + // Deploy Cluster Manager + testcaseEnvInst.Log.Info("Deploy Cluster Manager") + _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) + Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - pullBus := enterpriseApi.PushBusSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue", - MaxRetriesPerPart: 4, - RetryPolicy: "max_count", - SendInterval: "5s", - EncodingFormat: "s2s", - }, - } - pipelineConfig := enterpriseApi.PipelineConfigSpec{ - RemoteQueueRuleset: false, - RuleSet: true, - RemoteQueueTyping: false, - RemoteQueueOutput: false, - Typing: true, - IndexerPipe: true, - } - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", pullBus, pipelineConfig, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Cluster Manager is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Get instance of current Ingestor Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") + ingest := &enterpriseApi.IngestorCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) + Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") + + // Verify Ingestor Cluster Status + testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") + Expect(ingest.Status.BusConfiguration).To(Equal(bus), "Ingestor bus configuration status is not the same as provided as input") + + // Get instance of current Indexer Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") + index := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + + // Verify Indexer Cluster Status + testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") + Expect(index.Status.BusConfiguration).To(Equal(bus), "Indexer bus configuration status is not the same as provided as input") + + // Verify conf files + testcaseEnvInst.Log.Info("Verify conf files") + pods := testenv.DumpGetPods(deployment.GetName()) + for _, pod := range pods { + defaultsConf := "" + + if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { + // Verify outputs.conf + testcaseEnvInst.Log.Info("Verify outputs.conf") + outputsPath := "opt/splunk/etc/system/local/outputs.conf" + outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") + testenv.ValidateContent(outputsConf, outputs, true) + + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" + defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") + testenv.ValidateContent(defaultsConf, defaultsAll, true) + + // Verify AWS env variables + testcaseEnvInst.Log.Info("Verify AWS env variables") + envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") + testenv.ValidateContent(envVars, awsEnvVars, true) + } + + if strings.Contains(pod, "ingest") { + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + testenv.ValidateContent(defaultsConf, defaultsIngest, true) + } else if strings.Contains(pod, "idxc") { + // Verify inputs.conf + testcaseEnvInst.Log.Info("Verify inputs.conf") + inputsPath := "opt/splunk/etc/system/local/inputs.conf" + inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") + testenv.ValidateContent(inputsConf, inputs, true) + } + } + }) + }) + + Context("Ingestor and Indexer deployment", func() { + It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { + // Create Service Account + testcaseEnvInst.Log.Info("Create Service Account") + testcaseEnvInst.CreateServiceAccount(serviceAccountName) + + // Deploy Bus Configuration + testcaseEnvInst.Log.Info("Deploy Bus Configuration") + bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, pullBus, pipelineConfig, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") + // Deploy Cluster Manager + testcaseEnvInst.Log.Info("Deploy Cluster Manager") + _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) + Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + + // Deploy Indexer Cluster + testcaseEnvInst.Log.Info("Deploy Indexer Cluster") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") + + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + // Ensure that Cluster Manager is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Ensure that Ingestor Cluster is in Ready phase + // Get instance of current Bus Configuration CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Bus Configuration CR with latest config") + bus := &enterpriseApi.BusConfiguration{} + err = deployment.GetInstance(ctx, bc.Name, bus) + Expect(err).To(Succeed(), "Failed to get instance of Bus Configuration") + + // Update instance of BusConfiguration CR with new bus configuration + testcaseEnvInst.Log.Info("Update instance of BusConfiguration CR with new bus configuration") + bus.Spec = updateBus + err = deployment.UpdateCR(ctx, bus) + Expect(err).To(Succeed(), "Unable to deploy Bus Configuration with updated CR") + + // Ensure that Ingestor Cluster has not been restarted + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + + // Ensure that Indexer Cluster has not been restarted + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster has not been restarted") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Get instance of current Ingestor Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") + ingest := &enterpriseApi.IngestorCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) + Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") + + // Verify Ingestor Cluster Status + testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") + Expect(ingest.Status.BusConfiguration).To(Equal(updateBus), "Ingestor bus configuration status is not the same as provided as input") + + // Get instance of current Indexer Cluster CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") + index := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + + // Verify Indexer Cluster Status + testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") + Expect(index.Status.BusConfiguration).To(Equal(updateBus), "Indexer bus configuration status is not the same as provided as input") + + // Verify conf files + testcaseEnvInst.Log.Info("Verify conf files") + pods := testenv.DumpGetPods(deployment.GetName()) + for _, pod := range pods { + defaultsConf := "" + + if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { + // Verify outputs.conf + testcaseEnvInst.Log.Info("Verify outputs.conf") + outputsPath := "opt/splunk/etc/system/local/outputs.conf" + outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") + testenv.ValidateContent(outputsConf, updatedOutputs, true) + testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) + + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" + defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") + testenv.ValidateContent(defaultsConf, defaultsAll, true) + + // Verify AWS env variables + testcaseEnvInst.Log.Info("Verify AWS env variables") + envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") + testenv.ValidateContent(envVars, awsEnvVars, true) + } + + if strings.Contains(pod, "ingest") { + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + testenv.ValidateContent(defaultsConf, defaultsIngest, true) + } else if strings.Contains(pod, "idxc") { + // Verify inputs.conf + testcaseEnvInst.Log.Info("Verify inputs.conf") + inputsPath := "opt/splunk/etc/system/local/inputs.conf" + inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") + testenv.ValidateContent(inputsConf, updatedInputs, true) + testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) + } + } + + // Verify conf files + testcaseEnvInst.Log.Info("Verify conf files") + pods = testenv.DumpGetPods(deployment.GetName()) + for _, pod := range pods { + defaultsConf := "" + + if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { + // Verify outputs.conf + testcaseEnvInst.Log.Info("Verify outputs.conf") + outputsPath := "opt/splunk/etc/system/local/outputs.conf" + outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") + testenv.ValidateContent(outputsConf, updatedOutputs, true) + testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) + + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" + defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") + testenv.ValidateContent(defaultsConf, updatedDefaultsAll, true) + + // Verify AWS env variables + testcaseEnvInst.Log.Info("Verify AWS env variables") + envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") + testenv.ValidateContent(envVars, awsEnvVars, true) + } + + if strings.Contains(pod, "ingest") { + // Verify default-mode.conf + testcaseEnvInst.Log.Info("Verify default-mode.conf") + testenv.ValidateContent(defaultsConf, updatedDefaultsIngest, true) + } else if strings.Contains(pod, "idxc") { + // Verify inputs.conf + testcaseEnvInst.Log.Info("Verify inputs.conf") + inputsPath := "opt/splunk/etc/system/local/inputs.conf" + inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) + Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") + testenv.ValidateContent(inputsConf, updatedInputs, true) + testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) + } + } }) }) }) diff --git a/test/testenv/appframework_utils.go b/test/testenv/appframework_utils.go index d1f2f938c..e9879679b 100644 --- a/test/testenv/appframework_utils.go +++ b/test/testenv/appframework_utils.go @@ -250,6 +250,28 @@ func GetAppDeploymentInfoStandalone(ctx context.Context, deployment *Deployment, return appDeploymentInfo, err } +// GetAppDeploymentInfoIngestorCluster returns AppDeploymentInfo for given IngestorCluster, appSourceName and appName +func GetAppDeploymentInfoIngestorCluster(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, appSourceName string, appName string) (enterpriseApi.AppDeploymentInfo, error) { + ingestor := &enterpriseApi.IngestorCluster{} + appDeploymentInfo := enterpriseApi.AppDeploymentInfo{} + err := deployment.GetInstance(ctx, name, ingestor) + if err != nil { + testenvInstance.Log.Error(err, "Failed to get CR ", "CR Name", name) + return appDeploymentInfo, err + } + appInfoList := ingestor.Status.AppContext.AppsSrcDeployStatus[appSourceName].AppDeploymentInfoList + for _, appInfo := range appInfoList { + testenvInstance.Log.Info("Checking Ingestor AppInfo Struct", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "AppDeploymentInfo", appInfo) + if strings.Contains(appName, appInfo.AppName) { + testenvInstance.Log.Info("App Deployment Info found.", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "AppDeploymentInfo", appInfo) + appDeploymentInfo = appInfo + return appDeploymentInfo, nil + } + } + testenvInstance.Log.Info("App Info not found in App Info List", "App Name", appName, "App Source", appSourceName, "Ingestor Name", name, "App Info List", appInfoList) + return appDeploymentInfo, err +} + // GetAppDeploymentInfoMonitoringConsole returns AppDeploymentInfo for given Monitoring Console, appSourceName and appName func GetAppDeploymentInfoMonitoringConsole(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, name string, appSourceName string, appName string) (enterpriseApi.AppDeploymentInfo, error) { mc := &enterpriseApi.MonitoringConsole{} @@ -345,6 +367,8 @@ func GetAppDeploymentInfo(ctx context.Context, deployment *Deployment, testenvIn switch crKind { case "Standalone": appDeploymentInfo, err = GetAppDeploymentInfoStandalone(ctx, deployment, testenvInstance, name, appSourceName, appName) + case "IngestorCluster": + appDeploymentInfo, err = GetAppDeploymentInfoIngestorCluster(ctx, deployment, testenvInstance, name, appSourceName, appName) case "MonitoringConsole": appDeploymentInfo, err = GetAppDeploymentInfoMonitoringConsole(ctx, deployment, testenvInstance, name, appSourceName, appName) case "SearchHeadCluster": diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 85ea4ada9..2e312c652 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, busSpec enterpriseApi.PushBusSpec, pipelineConfig enterpriseApi.PipelineConfigSpec, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, busSpec, pipelineConfig, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, busConfig, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, busSpec enterpriseApi.PushBusSpec, pipelineConfig enterpriseApi.PipelineConfigSpec, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, busSpec, pipelineConfig, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, busConfig, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -460,6 +460,37 @@ func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, cou return deployed.(*enterpriseApi.IngestorCluster), err } +// DeployBusConfiguration deploys the bus configuration +func (d *Deployment) DeployBusConfiguration(ctx context.Context, name string, busConfig enterpriseApi.BusConfigurationSpec) (*enterpriseApi.BusConfiguration, error) { + d.testenv.Log.Info("Deploying bus configuration", "name", name) + + busCfg := newBusConfiguration(name, d.testenv.namespace, busConfig) + pdata, _ := json.Marshal(busCfg) + + d.testenv.Log.Info("bus configuration spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, busCfg) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.BusConfiguration), err +} + +// DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration +func (d *Deployment) DeployIngestorClusterWithAdditionalConfiguration(ctx context.Context, ic *enterpriseApi.IngestorCluster) (*enterpriseApi.IngestorCluster, error) { + d.testenv.Log.Info("Deploying ingestor cluster with additional configuration", "name", ic.Name) + + pdata, _ := json.Marshal(ic) + + d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, ic.Name, ic) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.IngestorCluster), err +} + // DeploySearchHeadCluster deploys a search head cluster func (d *Deployment) DeploySearchHeadCluster(ctx context.Context, name, ClusterManagerRef, LicenseManagerName string, ansibleConfig string, mcRef string) (*enterpriseApi.SearchHeadCluster, error) { d.testenv.Log.Info("Deploying search head cluster", "name", name) @@ -592,6 +623,24 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IndexerCluster) current.Spec = ucr.Spec cobject = current + case "IngestorCluster": + current := &enterpriseApi.IngestorCluster{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.IngestorCluster) + current.Spec = ucr.Spec + cobject = current + case "BusConfiguration": + current := &enterpriseApi.BusConfiguration{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.BusConfiguration) + current.Spec = ucr.Spec + cobject = current case "ClusterMaster": current := &enterpriseApiV3.ClusterMaster{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) @@ -691,7 +740,7 @@ func (d *Deployment) DeploySingleSiteCluster(ctx context.Context, name string, i } // Deploy the indexer cluster - _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return err } @@ -749,7 +798,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHead(ctx context.Cont multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -821,7 +870,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(ctx context.Context, n multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -882,7 +931,7 @@ func (d *Deployment) DeployMultisiteCluster(ctx context.Context, name string, in multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1018,7 +1067,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(ctx context. multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1073,7 +1122,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndIndexes(ctx co multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1178,7 +1227,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx contex } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1256,7 +1305,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1356,7 +1405,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1460,7 +1509,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(c multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1541,7 +1590,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenMonitoringConsole(ctx conte } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return err } @@ -1613,7 +1662,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenMonitoringConsole(ctx } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") if err != nil { return err } @@ -1707,7 +1756,7 @@ func (d *Deployment) DeployMultisiteClusterWithMonitoringConsole(ctx context.Con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1807,7 +1856,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, enterpriseApi.PushBusSpec{}, enterpriseApi.PipelineConfigSpec{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") if err != nil { return err } diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index 7e4579ee2..f82310015 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -20,9 +20,10 @@ import ( "fmt" "net" "os" - "sigs.k8s.io/controller-runtime/pkg/metrics/server" "time" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" + enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -77,6 +78,9 @@ const ( // LicenseMasterPod Template String for standalone pod LicenseMasterPod = "splunk-%s-" + splcommon.LicenseManager + "-%d" + // IngestorPod Template String for ingestor pod + IngestorPod = "splunk-%s-ingestor-%d" + // IndexerPod Template String for indexer pod IndexerPod = "splunk-%s-idxc-indexer-%d" diff --git a/test/testenv/util.go b/test/testenv/util.go index 357af2b0f..b779ab3c3 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -30,6 +30,8 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" + . "github.com/onsi/gomega" + "github.com/onsi/ginkgo/v2" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" @@ -357,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, busSpec enterpriseApi.PushBusSpec, pipelineConfig enterpriseApi.PipelineConfigSpec, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -394,9 +396,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - PipelineConfig: pipelineConfig, - PullBus: busSpec, + Replicas: int32(replicas), + BusConfigurationRef: busConfig, }, } @@ -404,7 +405,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, busSpec enterpriseApi.PushBusSpec, pipelineConfig enterpriseApi.PipelineConfigSpec, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -424,10 +425,24 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, busSp Image: splunkImage, }, }, - Replicas: int32(replicas), - PipelineConfig: pipelineConfig, - PushBus: busSpec, + Replicas: int32(replicas), + BusConfigurationRef: busConfig, + }, + } +} + +// newBusConfiguration creates and initializes the CR for BusConfiguration Kind +func newBusConfiguration(name, ns string, busConfig enterpriseApi.BusConfigurationSpec) *enterpriseApi.BusConfiguration { + return &enterpriseApi.BusConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "BusConfiguration", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, }, + Spec: busConfig, + } } @@ -1219,3 +1234,47 @@ func DeleteConfigMap(ns string, ConfigMapName string) error { } return nil } + +// GetConfFile gets config file from pod +func GetConfFile(podName, filePath, ns string) (string, error) { + var config string + var err error + + output, err := exec.Command("kubectl", "exec", "-n", ns, podName, "--", "cat", filePath).Output() + if err != nil { + cmd := fmt.Sprintf("kubectl exec -n %s %s -- cat %s", ns, podName, filePath) + logf.Log.Error(err, "Failed to execute command", "command", cmd) + return config, err + } + + return string(output), err +} + +// GetAWSEnv gets AWS environment variables from pod +func GetAWSEnv(podName, ns string) (string, error) { + var config string + var err error + + output, err := exec.Command("kubectl", "exec", "-n", ns, podName, "--", "env", "|", "grep", "-i", "aws").Output() + if err != nil { + cmd := fmt.Sprintf("kubectl exec -n %s %s -- env | grep -i aws", ns, podName) + logf.Log.Error(err, "Failed to execute command", "command", cmd) + return config, err + } + + return string(output), err +} + +func ValidateContent(confFileContent string, listOfStringsForValidation []string, shouldContain bool) { + for _, str := range listOfStringsForValidation { + if shouldContain { + if !strings.Contains(confFileContent, str) { + Expect(confFileContent).To(ContainSubstring(str), "Failed to find string "+str+" in conf file") + } + } else { + if strings.Contains(confFileContent, str) { + Expect(confFileContent).ToNot(ContainSubstring(str), "Found string "+str+" in conf file, but it should not be there") + } + } + } +}