diff --git a/cmd/main.go b/cmd/main.go
index 6a152ce16..f8aba0ae1 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -107,11 +107,11 @@ func main() {
// as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing
// unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName
// to provide certificates, ensuring the server communicates using trusted and secure certificates.
- TLSOpts: tlsOpts,
+ TLSOpts: tlsOpts,
FilterProvider: filters.WithAuthenticationAndAuthorization,
}
- // TODO: enable https for /metrics endpoint by default
+ // TODO: enable https for /metrics endpoint by default
// if secureMetrics {
// // FilterProvider is used to protect the metrics endpoint with authn/authz.
// // These configurations ensure that only authorized users and service accounts
diff --git a/go.mod b/go.mod
index 526789b3c..1d4123394 100644
--- a/go.mod
+++ b/go.mod
@@ -12,13 +12,13 @@ require (
github.com/aws/aws-sdk-go-v2/credentials v1.17.71
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.85
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1
- github.com/go-logr/logr v1.4.2
+ github.com/go-logr/logr v1.4.3
github.com/google/go-cmp v0.7.0
github.com/google/uuid v1.6.0
github.com/joho/godotenv v1.5.1
github.com/minio/minio-go/v7 v7.0.16
- github.com/onsi/ginkgo/v2 v2.23.4
- github.com/onsi/gomega v1.38.0
+ github.com/onsi/ginkgo/v2 v2.27.2
+ github.com/onsi/gomega v1.38.2
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.19.1
github.com/stretchr/testify v1.9.0
@@ -39,6 +39,7 @@ require (
cloud.google.com/go/iam v1.1.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
+ github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
@@ -126,8 +127,10 @@ require (
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
+ golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.18.0 // indirect
@@ -143,7 +146,7 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/grpc v1.65.0 // indirect
- google.golang.org/protobuf v1.36.6 // indirect
+ google.golang.org/protobuf v1.36.7 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
diff --git a/go.sum b/go.sum
index 03aed3514..3c67aa5d5 100644
--- a/go.sum
+++ b/go.sum
@@ -21,6 +21,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQK
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
@@ -118,6 +120,8 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
@@ -247,8 +251,12 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
+github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
+github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY=
github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o=
+github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
+github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -333,6 +341,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -354,6 +364,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -501,6 +513,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
+google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
diff --git a/internal/controller/clustermanager_controller.go b/internal/controller/clustermanager_controller.go
index f149bf129..a36cd240e 100644
--- a/internal/controller/clustermanager_controller.go
+++ b/internal/controller/clustermanager_controller.go
@@ -26,6 +26,7 @@ import (
"github.com/pkg/errors"
metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics"
enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise"
+ splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
@@ -103,7 +104,7 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
reqLogger.Info("start", "CR version", instance.GetResourceVersion())
- result, err := ApplyClusterManager(ctx, r.Client, instance)
+ result, err := ApplyClusterManager(ctx, r.Client, instance, nil)
if result.Requeue && result.RequeueAfter != 0 {
reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second))
}
@@ -112,8 +113,8 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
}
// ApplyClusterManager adding to handle unit test case
-var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
- return enterprise.ApplyClusterManager(ctx, client, instance)
+var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
+ return enterprise.ApplyClusterManager(ctx, client, instance, podExecClient)
}
func (r *ClusterManagerReconciler) SetupWithManager(mgr ctrl.Manager) error {
diff --git a/internal/controller/clustermanager_controller_test.go b/internal/controller/clustermanager_controller_test.go
index 99cd1289a..771d2f4f6 100644
--- a/internal/controller/clustermanager_controller_test.go
+++ b/internal/controller/clustermanager_controller_test.go
@@ -3,9 +3,11 @@ package controller
import (
"context"
"fmt"
+
"github.com/splunk/splunk-operator/internal/controller/testutils"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
+ splutil "github.com/splunk/splunk-operator/pkg/splunk/util"
"time"
@@ -35,7 +37,7 @@ var _ = Describe("ClusterManager Controller", func() {
It("Get ClusterManager custom resource should failed", func() {
namespace := "ns-splunk-cm-1"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
@@ -51,7 +53,7 @@ var _ = Describe("ClusterManager Controller", func() {
It("Create ClusterManager custom resource with annotations should pause", func() {
namespace := "ns-splunk-cm-2"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
@@ -71,7 +73,7 @@ var _ = Describe("ClusterManager Controller", func() {
Context("ClusterManager Management", func() {
It("Create ClusterManager custom resource should succeeded", func() {
namespace := "ns-splunk-cm-3"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
@@ -84,7 +86,7 @@ var _ = Describe("ClusterManager Controller", func() {
It("Cover Unused methods", func() {
namespace := "ns-splunk-cm-4"
- ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) {
+ ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
diff --git a/pkg/splunk/enterprise/afwscheduler.go b/pkg/splunk/enterprise/afwscheduler.go
index 2dd2fd667..81174850f 100644
--- a/pkg/splunk/enterprise/afwscheduler.go
+++ b/pkg/splunk/enterprise/afwscheduler.go
@@ -35,6 +35,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
)
+var (
+ phaseManagerBusyWaitDuration = 1 * time.Second
+ phaseManagerLoopSleepDuration = 200 * time.Millisecond
+)
+
var appPhaseInfoStatuses = map[enterpriseApi.AppPhaseStatusType]bool{
enterpriseApi.AppPkgDownloadPending: true,
enterpriseApi.AppPkgDownloadInProgress: true,
@@ -597,10 +602,10 @@ downloadWork:
default:
// All the workers are busy, check after one second
scopedLog.Info("All the workers are busy, we will check again after one second")
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
// wait for all the download threads to finish
@@ -680,7 +685,7 @@ downloadPhase:
}
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
}
@@ -1002,7 +1007,11 @@ func runPodCopyWorker(ctx context.Context, worker *PipelineWorker, ch chan struc
}
// get the podExecClient to be used for copying file to pod
- podExecClient := splutil.GetPodExecClient(worker.client, cr, worker.targetPodName)
+ // Use injected client if available (for testing), otherwise create real client
+ podExecClient := worker.podExecClient
+ if podExecClient == nil {
+ podExecClient = splutil.GetPodExecClient(worker.client, cr, worker.targetPodName)
+ }
stdOut, stdErr, err := CopyFileToPod(ctx, worker.client, cr.GetNamespace(), appPkgLocalPath, appPkgPathOnPod, podExecClient)
if err != nil {
phaseInfo.FailCount++
@@ -1062,10 +1071,10 @@ podCopyHandler:
}
default:
// All the workers are busy, check after one second
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
// Wait for all the workers to finish
@@ -1131,7 +1140,7 @@ podCopyPhase:
}
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
}
@@ -1231,9 +1240,12 @@ installHandler:
// Install workers can exist for local scope and premium app scopes
if installWorker != nil {
- podExecClient := splutil.GetPodExecClient(installWorker.client, installWorker.cr, installWorker.targetPodName)
+ // Use injected client if available (for testing), otherwise create real client
+ podExecClient := installWorker.podExecClient
+ if podExecClient == nil {
+ podExecClient = splutil.GetPodExecClient(installWorker.client, installWorker.cr, installWorker.targetPodName)
+ }
podID, _ := getOrdinalValFromPodName(installWorker.targetPodName)
-
// Get app source spec
appSrcSpec, err := getAppSrcSpec(installWorker.afwConfig.AppSources, installWorker.appSrcName)
if err != nil {
@@ -1264,10 +1276,10 @@ installHandler:
}
default:
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
for {
@@ -1287,7 +1299,7 @@ installHandler:
}
// Sleep for a second before retry
- time.Sleep(1 * time.Second)
+ time.Sleep(phaseManagerBusyWaitDuration)
}
// Wait for all the workers to finish
@@ -1383,7 +1395,7 @@ installPhase:
}
}
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(phaseManagerLoopSleepDuration)
}
}
diff --git a/pkg/splunk/enterprise/afwscheduler_test.go b/pkg/splunk/enterprise/afwscheduler_test.go
index e3b1f336c..2f1e3916f 100644
--- a/pkg/splunk/enterprise/afwscheduler_test.go
+++ b/pkg/splunk/enterprise/afwscheduler_test.go
@@ -713,6 +713,16 @@ func TestPhaseManagersTermination(t *testing.T) {
}
func TestPhaseManagersMsgChannels(t *testing.T) {
+ // Override timing variables for faster test execution
+ origBusyWait := phaseManagerBusyWaitDuration
+ origLoopSleep := phaseManagerLoopSleepDuration
+ phaseManagerBusyWaitDuration = 1 * time.Millisecond
+ phaseManagerLoopSleepDuration = 1 * time.Millisecond
+ defer func() {
+ phaseManagerBusyWaitDuration = origBusyWait
+ phaseManagerLoopSleepDuration = origLoopSleep
+ }()
+
ctx := context.TODO()
appDeployContext := &enterpriseApi.AppDeploymentContext{
AppsStatusMaxConcurrentAppDownloads: 1,
@@ -789,6 +799,18 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
t.Errorf("unable to apply statefulset")
}
+ // Create mock PodExecClient for all workers
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-standalone-0",
+ }
+ mockClient.AddMockPodExecReturnContext(ctx, "", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+
// Just make the lint conversion checks happy
capacity := 1
var workerList []*PipelineWorker = make([]*PipelineWorker, capacity)
@@ -806,9 +828,10 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
FailCount: 2,
},
},
- afwConfig: &cr.Spec.AppFrameworkConfig,
- client: client,
- fanOut: cr.GetObjectKind().GroupVersionKind().Kind == "Standalone",
+ afwConfig: &cr.Spec.AppFrameworkConfig,
+ client: client,
+ fanOut: cr.GetObjectKind().GroupVersionKind().Kind == "Standalone",
+ podExecClient: mockClient,
}
}
@@ -832,7 +855,7 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
}
worker.appDeployInfo.PhaseInfo.FailCount = 4
// Let the phase hop on empty channel, to get more coverage
- time.Sleep(600 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
ppln.pplnPhases[enterpriseApi.PhaseDownload].q = nil
// add the worker to the pod copy phase
@@ -859,7 +882,7 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
}
worker.appDeployInfo.PhaseInfo.FailCount = 4
// Let the phase hop on empty channel, to get more coverage
- time.Sleep(600 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
ppln.pplnPhases[enterpriseApi.PhasePodCopy].q = nil
// add the worker to the install phase
@@ -879,7 +902,7 @@ func TestPhaseManagersMsgChannels(t *testing.T) {
worker.appDeployInfo.PhaseInfo.FailCount = 4
// Let the phase hop on empty channel, to get more coverage
- time.Sleep(600 * time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
close(ppln.sigTerm)
@@ -2127,7 +2150,10 @@ func TestExtractClusterScopedAppOnPod(t *testing.T) {
}
func TestRunPodCopyWorker(t *testing.T) {
- ctx := context.TODO()
+ // Use context with timeout to prevent workers from hanging indefinitely
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
cr := enterpriseApi.ClusterManager{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterManager",
@@ -2189,6 +2215,21 @@ func TestRunPodCopyWorker(t *testing.T) {
var client splcommon.ControllerClient = getConvertedClient(c)
var waiter sync.WaitGroup
+ // Create MockPodExecClient to avoid real network I/O
+ mockPodExecClient := &spltest.MockPodExecClient{
+ Client: c,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-clustermanager-0",
+ }
+
+ // Setup mock responses for CopyFileToPod operations
+ dirCheckCmd := fmt.Sprintf("test -d %s; echo -n $?", "/operator-staging/appframework/adminApps")
+ mockPodExecClient.AddMockPodExecReturnContext(ctx, dirCheckCmd, &spltest.MockPodExecReturnContext{
+ StdOut: "0",
+ StdErr: "",
+ Err: nil,
+ })
+
worker := &PipelineWorker{
cr: &cr,
targetPodName: "splunk-stack1-clustermanager-0",
@@ -2201,10 +2242,11 @@ func TestRunPodCopyWorker(t *testing.T) {
},
ObjectHash: "abcd1234abcd",
},
- client: client,
- afwConfig: appFrameworkConfig,
- waiter: &waiter,
- appSrcName: appFrameworkConfig.AppSources[0].Name,
+ client: client,
+ afwConfig: appFrameworkConfig,
+ waiter: &waiter,
+ appSrcName: appFrameworkConfig.AppSources[0].Name,
+ podExecClient: mockPodExecClient, // Inject the mock to avoid real network I/O
}
var ch chan struct{} = make(chan struct{}, 1)
@@ -2252,7 +2294,19 @@ func TestRunPodCopyWorker(t *testing.T) {
}
func TestPodCopyWorkerHandler(t *testing.T) {
- ctx := context.TODO()
+ // Override timing variables for faster test execution
+ origBusyWait := phaseManagerBusyWaitDuration
+ origLoopSleep := phaseManagerLoopSleepDuration
+ phaseManagerBusyWaitDuration = 1 * time.Millisecond
+ phaseManagerLoopSleepDuration = 1 * time.Millisecond
+ defer func() {
+ phaseManagerBusyWaitDuration = origBusyWait
+ phaseManagerLoopSleepDuration = origLoopSleep
+ }()
+
+ // Use context with timeout to prevent workers from hanging indefinitely
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
cr := enterpriseApi.ClusterManager{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterManager",
@@ -2310,6 +2364,28 @@ func TestPodCopyWorkerHandler(t *testing.T) {
// Add object
client.AddObject(pod)
+ // Create MockPodExecClient to avoid real network I/O
+ mockPodExecClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-clustermanager-0",
+ }
+
+ // Setup mock responses for CopyFileToPod operations
+ // CopyFileToPod makes 2 exec calls:
+ // 1. Directory existence check: "test -d
; echo -n $?"
+ // 2. Tar extraction: ["tar", "-xf", "-", "-C", ""]
+
+ // Mock response for directory check (should return "0" for success)
+ dirCheckCmd := fmt.Sprintf("test -d %s; echo -n $?", "/operator-staging/appframework/adminApps")
+ mockPodExecClient.AddMockPodExecReturnContext(ctx, dirCheckCmd, &spltest.MockPodExecReturnContext{
+ StdOut: "0",
+ StdErr: "",
+ Err: nil,
+ })
+
+ // Note: tar command will be handled by the default case in MockPodExecClient (returns empty with nil error)
+
worker := &PipelineWorker{
cr: &cr,
targetPodName: "splunk-stack1-clustermanager-0",
@@ -2322,9 +2398,10 @@ func TestPodCopyWorkerHandler(t *testing.T) {
},
ObjectHash: "abcd1234abcd",
},
- client: client,
- afwConfig: appFrameworkConfig,
- appSrcName: appFrameworkConfig.AppSources[0].Name,
+ client: client,
+ afwConfig: appFrameworkConfig,
+ appSrcName: appFrameworkConfig.AppSources[0].Name,
+ podExecClient: mockPodExecClient, // Inject the mock to avoid real network I/O
}
defaultVol := splcommon.AppDownloadVolume
@@ -2371,7 +2448,7 @@ func TestPodCopyWorkerHandler(t *testing.T) {
ppln.pplnPhases[enterpriseApi.PhaseInstall].msgChannel <- worker
- time.Sleep(2 * time.Second)
+ time.Sleep(10 * time.Millisecond)
// sending null worker should not cause a crash
ppln.pplnPhases[enterpriseApi.PhaseInstall].msgChannel <- nil
@@ -2382,7 +2459,7 @@ func TestPodCopyWorkerHandler(t *testing.T) {
}
// wait for the handler to consue the worker
- time.Sleep(2 * time.Second)
+ time.Sleep(10 * time.Millisecond)
// Closing the channels should exit podCopyWorkerHandler test cleanly
close(ppln.pplnPhases[enterpriseApi.PhaseInstall].msgChannel)
@@ -3064,7 +3141,7 @@ func TestRunLocalScopedPlaybook(t *testing.T) {
// Test3: get installed app name passes but isAppAlreadyInstalled fails with real error (not "Could not find object")
mockPodExecReturnContexts[1].StdErr = ""
mockPodExecReturnContexts[2].StdErr = "Some other real error message" // Real error, not "Could not find object"
- mockPodExecReturnContexts[2].Err = fmt.Errorf("exit status 2") // Real error, not grep exit code 1
+ mockPodExecReturnContexts[2].Err = fmt.Errorf("exit status 2") // Real error, not grep exit code 1
localInstallCtxt.sem <- struct{}{}
waiter.Add(1)
err = localInstallCtxt.runPlaybook(ctx)
@@ -3073,10 +3150,10 @@ func TestRunLocalScopedPlaybook(t *testing.T) {
}
// Test4: isAppAlreadyInstalled returns app not enabled (grep exit code 1), then install fails
- mockPodExecReturnContexts[2].StdOut = "" // No stdout means grep didn't find ENABLED
- mockPodExecReturnContexts[2].StdErr = "" // No stderr
- mockPodExecReturnContexts[2].Err = fmt.Errorf("exit status 1") // grep exit code 1 = pattern not found
- mockPodExecReturnContexts[3].StdErr = "real installation error" // This is just logged now
+ mockPodExecReturnContexts[2].StdOut = "" // No stdout means grep didn't find ENABLED
+ mockPodExecReturnContexts[2].StdErr = "" // No stderr
+ mockPodExecReturnContexts[2].Err = fmt.Errorf("exit status 1") // grep exit code 1 = pattern not found
+ mockPodExecReturnContexts[3].StdErr = "real installation error" // This is just logged now
mockPodExecReturnContexts[3].Err = fmt.Errorf("install command failed") // This causes the actual failure
localInstallCtxt.sem <- struct{}{}
@@ -3103,7 +3180,7 @@ func TestRunLocalScopedPlaybook(t *testing.T) {
// Test6: Install succeeds with stderr content (should be ignored), but cleanup fails
mockPodExecReturnContexts[3].StdErr = "Some informational message in stderr" // Stderr content should be ignored
- mockPodExecReturnContexts[3].Err = nil // No actual error for install
+ mockPodExecReturnContexts[3].Err = nil // No actual error for install
// Keep cleanup failure from previous test setup to make overall test fail
// mockPodExecReturnContexts[4] still has error from earlier
@@ -3326,10 +3403,10 @@ func TestPremiumAppScopedPlaybook(t *testing.T) {
// Test4: isAppAlreadyInstalled returns app is not enabled (grep exit code 1)
// so app install will run and it should fail with real error
- mockPodExecReturnContexts[2].StdOut = "" // No stdout means grep didn't find ENABLED
- mockPodExecReturnContexts[2].StdErr = "" // No stderr
- mockPodExecReturnContexts[2].Err = fmt.Errorf("exit status 1") // grep exit code 1 = pattern not found
- mockPodExecReturnContexts[3].StdErr = "real installation error" // This is just logged now
+ mockPodExecReturnContexts[2].StdOut = "" // No stdout means grep didn't find ENABLED
+ mockPodExecReturnContexts[2].StdErr = "" // No stderr
+ mockPodExecReturnContexts[2].Err = fmt.Errorf("exit status 1") // grep exit code 1 = pattern not found
+ mockPodExecReturnContexts[3].StdErr = "real installation error" // This is just logged now
mockPodExecReturnContexts[3].Err = fmt.Errorf("install command failed") // This causes the actual failure
localInstallCtxt.sem <- struct{}{}
@@ -3341,7 +3418,7 @@ func TestPremiumAppScopedPlaybook(t *testing.T) {
// Test5: Install succeeds with stderr content (should be ignored), but post install fails
mockPodExecReturnContexts[3].StdErr = "Some informational message in stderr" // Stderr content should be ignored
- mockPodExecReturnContexts[3].Err = nil // No actual error for install
+ mockPodExecReturnContexts[3].Err = nil // No actual error for install
localInstallCtxt.sem <- struct{}{}
waiter.Add(1)
@@ -3871,6 +3948,16 @@ func TestHandleAppPkgInstallComplete(t *testing.T) {
}
func TestInstallWorkerHandler(t *testing.T) {
+ // Override timing variables for faster test execution
+ origBusyWait := phaseManagerBusyWaitDuration
+ origLoopSleep := phaseManagerLoopSleepDuration
+ phaseManagerBusyWaitDuration = 1 * time.Millisecond
+ phaseManagerLoopSleepDuration = 1 * time.Millisecond
+ defer func() {
+ phaseManagerBusyWaitDuration = origBusyWait
+ phaseManagerLoopSleepDuration = origLoopSleep
+ }()
+
ctx := context.TODO()
cr := enterpriseApi.ClusterManager{
TypeMeta: metav1.TypeMeta{
@@ -3948,6 +4035,18 @@ func TestInstallWorkerHandler(t *testing.T) {
t.Errorf("unable to apply statefulset")
}
+ // Create mock PodExecClient to avoid real pod command execution
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: &cr,
+ TargetPodName: "splunk-stack1-clustermanager-0",
+ }
+ mockClient.AddMockPodExecReturnContext(ctx, "", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+
worker := &PipelineWorker{
cr: &cr,
targetPodName: "splunk-stack1-clustermanager-0",
@@ -3960,10 +4059,11 @@ func TestInstallWorkerHandler(t *testing.T) {
},
ObjectHash: "abcd1234abcd",
},
- client: client,
- afwConfig: appFrameworkConfig,
- sts: sts,
- appSrcName: appFrameworkConfig.AppSources[0].Name,
+ client: client,
+ afwConfig: appFrameworkConfig,
+ sts: sts,
+ appSrcName: appFrameworkConfig.AppSources[0].Name,
+ podExecClient: mockClient,
}
var appDeployContext *enterpriseApi.AppDeploymentContext = &enterpriseApi.AppDeploymentContext{
diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go
index 269753c5c..b3f1521f5 100644
--- a/pkg/splunk/enterprise/clustermanager.go
+++ b/pkg/splunk/enterprise/clustermanager.go
@@ -40,10 +40,11 @@ import (
)
// ApplyClusterManager reconciles the state of a Splunk Enterprise cluster manager.
-func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) {
-
+// podExecClient parameter is optional - if nil, a real PodExecClient will be created.
+// This allows tests to inject a mock client.
+func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) (result reconcile.Result, err error) {
// unless modified, reconcile for this object will be requeued after 5 seconds
- result := reconcile.Result{
+ result = reconcile.Result{
Requeue: true,
RequeueAfter: time.Second * 5,
}
@@ -56,8 +57,6 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient,
if cr.Status.ResourceRevMap == nil {
cr.Status.ResourceRevMap = make(map[string]string)
}
-
- var err error
// Initialize phase
cr.Status.Phase = enterpriseApi.PhaseError
@@ -226,8 +225,10 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient,
scopedLog.Error(err, "Error in deleting automated monitoring console resource")
}
- // Create podExecClient
- podExecClient := splutil.GetPodExecClient(client, cr, "")
+ // Create podExecClient (use injected one if provided, otherwise create real one)
+ if podExecClient == nil {
+ podExecClient = splutil.GetPodExecClient(client, cr, "")
+ }
// Add a splunk operator telemetry app
if cr.Spec.EtcVolumeStorageConfig.EphemeralStorage || !cr.Status.TelAppInstalled {
@@ -242,7 +243,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient,
// Manager apps bundle push requires multiple reconcile iterations in order to reflect the configMap on the CM pod.
// So keep PerformCmBundlePush() as the last call in this block of code, so that other functionalities are not blocked
- err = PerformCmBundlePush(ctx, client, cr)
+ err = PerformCmBundlePush(ctx, client, cr, podExecClient)
if err != nil {
return result, err
}
@@ -349,8 +350,9 @@ func CheckIfsmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcommon.Con
return fmt.Errorf("smartstore ConfigMap is missing")
}
-// PerformCmBundlePush initiates the bundle push from cluster manager
-func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error {
+// PerformCmBundlePush performs cluster manager bundle push operation
+// Defined as a variable to allow mocking in unit tests
+var PerformCmBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) error {
if !cr.Status.BundlePushTracker.NeedToPushManagerApps {
return nil
}
@@ -375,8 +377,11 @@ func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *
// for the configMap update to the Pod before proceeding for the manager apps
// bundle push.
- cmPodName := fmt.Sprintf("splunk-%s-%s-0", cr.GetName(), "cluster-manager")
- podExecClient := splutil.GetPodExecClient(c, cr, cmPodName)
+ // Create podExecClient if not provided
+ if podExecClient == nil {
+ cmPodName := fmt.Sprintf("splunk-%s-%s-0", cr.GetName(), "cluster-manager")
+ podExecClient = splutil.GetPodExecClient(c, cr, cmPodName)
+ }
err := CheckIfsmartstoreConfigMapUpdatedToPod(ctx, c, cr, podExecClient)
if err != nil {
return err
diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go
index 586adb316..837a9570a 100644
--- a/pkg/splunk/enterprise/clustermanager_test.go
+++ b/pkg/splunk/enterprise/clustermanager_test.go
@@ -137,7 +137,7 @@ func TestApplyClusterManager(t *testing.T) {
revised.Spec.Image = "splunk/test"
revised.SetGroupVersionKind(gvk)
reconcile := func(c *spltest.MockClient, cr interface{}) error {
- _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager))
+ _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), nil)
return err
}
spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true)
@@ -147,7 +147,7 @@ func TestApplyClusterManager(t *testing.T) {
revised.ObjectMeta.DeletionTimestamp = ¤tTime
revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"}
deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) {
- _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager))
+ _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), nil)
return true, err
}
splunkDeletionTester(t, revised, deleteFunc)
@@ -159,7 +159,7 @@ func TestApplyClusterManager(t *testing.T) {
c := spltest.NewMockClient()
_ = errors.New(splcommon.Rerr)
current.Kind = "ClusterManager"
- _, err := ApplyClusterManager(ctx, c, ¤t)
+ _, err := ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -226,7 +226,7 @@ func TestApplyClusterManager(t *testing.T) {
}
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -243,7 +243,7 @@ func TestApplyClusterManager(t *testing.T) {
current.Status.SmartStore.VolList[0].SecretRef = "s3-secret"
current.Status.ResourceRevMap["s3-secret"] = "v2"
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -258,7 +258,7 @@ func TestApplyClusterManager(t *testing.T) {
current.Spec.SmartStore.VolList[0].SecretRef = ""
current.Spec.SmartStore.Defaults.IndexAndGlobalCommonSpec.VolName = "msos_s2s3_vol"
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err != nil {
t.Errorf("Don't expected error here")
}
@@ -315,7 +315,7 @@ func TestApplyClusterManager(t *testing.T) {
},
}
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -333,7 +333,7 @@ func TestApplyClusterManager(t *testing.T) {
rerr := errors.New(splcommon.Rerr)
c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr
current.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, ¤t)
+ _, err = ApplyClusterManager(ctx, c, ¤t, nil)
if err == nil {
t.Errorf("Expected error")
}
@@ -535,7 +535,7 @@ func TestClusterManagerSpecNotCreatedWithoutGeneralTerms(t *testing.T) {
c := spltest.NewMockClient()
// Attempt to apply the cluster manager spec
- _, err := ApplyClusterManager(ctx, c, &cm)
+ _, err := ApplyClusterManager(ctx, c, &cm, nil)
// Assert that an error is returned
if err == nil {
@@ -663,7 +663,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
// Without S3 keys, ApplyClusterManager should fail
current.Kind = "ClusterManager"
- _, err := ApplyClusterManager(ctx, client, ¤t)
+ _, err := ApplyClusterManager(ctx, client, ¤t, nil)
if err == nil {
t.Errorf("ApplyClusterManager should fail without S3 secrets configured")
}
@@ -693,7 +693,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
revised.Spec.Image = "splunk/test"
reconcile := func(c *spltest.MockClient, cr interface{}) error {
current.Kind = "ClusterManager"
- _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager))
+ _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager), nil)
return err
}
@@ -721,12 +721,12 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
current.Status.BundlePushTracker.NeedToPushManagerApps = true
current.Kind = "ClusterManager"
- if _, err = ApplyClusterManager(context.Background(), client, ¤t); err != nil {
+ if _, err = ApplyClusterManager(context.Background(), client, ¤t, nil); err != nil {
t.Errorf("ApplyClusterManager() should not have returned error")
}
current.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.StorageCapacity = "-abcd"
- if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil {
+ if _, err := ApplyClusterManager(context.Background(), client, ¤t, nil); err == nil {
t.Errorf("ApplyClusterManager() should have returned error")
}
@@ -736,7 +736,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
ss.Spec.Replicas = &replicas
ss.Spec.Template.Spec.Containers[0].Image = "splunk/splunk"
client.AddObject(ss)
- if result, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil && !result.Requeue {
+ if result, err := ApplyClusterManager(context.Background(), client, ¤t, nil); err == nil && !result.Requeue {
t.Errorf("ApplyClusterManager() should have returned error or result.requeue should have been false")
}
@@ -746,7 +746,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) {
client.AddObjects(objects)
current.Spec.CommonSplunkSpec.Mock = false
- if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil {
+ if _, err := ApplyClusterManager(context.Background(), client, ¤t, nil); err == nil {
t.Errorf("ApplyClusterManager() should have returned error")
}
}
@@ -774,7 +774,7 @@ func TestPerformCmBundlePush(t *testing.T) {
// When the secret object is not present, should return an error
current.Status.BundlePushTracker.NeedToPushManagerApps = true
- err := PerformCmBundlePush(ctx, client, ¤t)
+ err := PerformCmBundlePush(ctx, client, ¤t, nil)
if err == nil {
t.Errorf("Should return error, when the secret object is not present")
}
@@ -806,28 +806,28 @@ func TestPerformCmBundlePush(t *testing.T) {
//Re-attempting to push the CM bundle in less than 5 seconds should return an error
current.Status.BundlePushTracker.LastCheckInterval = time.Now().Unix() - 1
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err == nil {
t.Errorf("Bundle Push Should fail, if attempted to push within 5 seconds interval")
}
//Re-attempting to push the CM bundle after 5 seconds passed, should not return an error
current.Status.BundlePushTracker.LastCheckInterval = time.Now().Unix() - 10
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err != nil && strings.HasPrefix(err.Error(), "Will re-attempt to push the bundle after the 5 seconds") {
t.Errorf("Bundle Push Should not fail if reattempted after 5 seconds interval passed. Error: %s", err.Error())
}
// When the CM Bundle push is not pending, should not return an error
current.Status.BundlePushTracker.NeedToPushManagerApps = false
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err != nil {
t.Errorf("Should not return an error when the Bundle push is not required. Error: %s", err.Error())
}
// Negative testing
current.Status.BundlePushTracker.NeedToPushManagerApps = true
- err = PerformCmBundlePush(ctx, client, ¤t)
+ err = PerformCmBundlePush(ctx, client, ¤t, nil)
if err != nil && strings.HasPrefix(err.Error(), "Will re-attempt to push the bundle after the 5 seconds") {
t.Errorf("Bundle Push Should not fail if reattempted after 5 seconds interval passed. Error: %s", err.Error())
}
@@ -958,7 +958,7 @@ func TestAppFrameworkApplyClusterManagerShouldNotFail(t *testing.T) {
}
cm.Kind = "ClusterManager"
- _, err = ApplyClusterManager(context.Background(), client, &cm)
+ _, err = ApplyClusterManager(context.Background(), client, &cm, nil)
if err != nil {
t.Errorf("ApplyClusterManager should not have returned error here.")
}
@@ -1061,7 +1061,7 @@ func TestApplyClusterManagerDeletion(t *testing.T) {
t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume)
}
cm.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, &cm)
+ _, err = ApplyClusterManager(ctx, c, &cm, nil)
if err != nil {
t.Errorf("ApplyClusterManager should not have returned error here.")
}
@@ -1576,7 +1576,7 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) {
cm.Kind = "ClusterManager"
client.Create(ctx, &cm)
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
@@ -1715,7 +1715,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) {
cm.Kind = "ClusterManager"
client.Create(ctx, cm)
- _, err = ApplyClusterManager(ctx, client, cm)
+ _, err = ApplyClusterManager(ctx, client, cm, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
@@ -1746,6 +1746,41 @@ func TestClusterManagerWitReadyState(t *testing.T) {
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
+ // Mock GetCMMultisiteEnvVarsCall to avoid 5-second HTTP timeout
+ // This function tries to connect to Splunk REST API which doesn't exist in unit tests
+ GetCMMultisiteEnvVarsCall = func(ctx context.Context, cr *enterpriseApi.ClusterManager, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterManagerExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ savedPerformCmBundlePush := PerformCmBundlePush
+ PerformCmBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, podExecClient splutil.PodExecClientImpl) error {
+ // Just set the flag to false to simulate successful bundle push
+ cr.Status.BundlePushTracker.NeedToPushManagerApps = false
+ return nil
+ }
+ defer func() { PerformCmBundlePush = savedPerformCmBundlePush }()
+
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
// adding getapplist to fix test case
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
remoteDataListResponse := splclient.RemoteDataListResponse{}
@@ -1870,7 +1905,7 @@ func TestClusterManagerWitReadyState(t *testing.T) {
// simulate create clustermanager instance before reconcilation
c.Create(ctx, clustermanager)
- _, err := ApplyClusterManager(ctx, c, clustermanager)
+ _, err := ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for clustermanager with app framework %v", err)
debug.PrintStack()
@@ -1913,7 +1948,7 @@ func TestClusterManagerWitReadyState(t *testing.T) {
// call reconciliation
clustermanager.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -2032,7 +2067,7 @@ func TestClusterManagerWitReadyState(t *testing.T) {
// call reconciliation
clustermanager.Kind = "ClusterManager"
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
diff --git a/pkg/splunk/enterprise/clustermaster.go b/pkg/splunk/enterprise/clustermaster.go
index 85fcb70cc..4db87ddf4 100644
--- a/pkg/splunk/enterprise/clustermaster.go
+++ b/pkg/splunk/enterprise/clustermaster.go
@@ -331,7 +331,8 @@ func CheckIfMastersmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcomm
}
// PerformCmasterBundlePush initiates the bundle push from cluster manager
-func PerformCmasterBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster) error {
+// Defined as a variable to allow mocking in unit tests
+var PerformCmasterBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster) error {
if !cr.Status.BundlePushTracker.NeedToPushMasterApps {
return nil
}
@@ -428,7 +429,8 @@ func getClusterMasterList(ctx context.Context, c splcommon.ControllerClient, cr
}
// VerifyCMasterisMultisite checks if its a multisite
-func VerifyCMasterisMultisite(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+// Defined as a variable to allow mocking in unit tests
+var VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
var err error
reqLogger := log.FromContext(ctx)
scopedLog := reqLogger.WithName("Verify if Multisite Indexer Cluster").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
diff --git a/pkg/splunk/enterprise/clustermaster_test.go b/pkg/splunk/enterprise/clustermaster_test.go
index fea79b90d..f9b49b496 100644
--- a/pkg/splunk/enterprise/clustermaster_test.go
+++ b/pkg/splunk/enterprise/clustermaster_test.go
@@ -241,6 +241,27 @@ func TestClusterMasterSpecNotCreatedWithoutGeneralTerms(t *testing.T) {
func TestApplyClusterMasterWithSmartstore(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ // Mock VerifyCMasterisMultisite to avoid 5-second HTTP timeout
+ // This function tries to connect to Splunk REST API which doesn't exist in unit tests
+ savedVerifyCMasterisMultisite := VerifyCMasterisMultisite
+ defer func() { VerifyCMasterisMultisite = savedVerifyCMasterisMultisite }()
+ VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterMasterExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ // Mock PerformCmasterBundlePush to avoid pod exec operations
+ // When Mock=false and NeedToPushMasterApps=true, return error to simulate test expectations
+ savedPerformCmasterBundlePush := PerformCmasterBundlePush
+ defer func() { PerformCmasterBundlePush = savedPerformCmasterBundlePush }()
+ PerformCmasterBundlePush = func(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApiV3.ClusterMaster) error {
+ if !cr.Spec.CommonSplunkSpec.Mock && cr.Status.BundlePushTracker.NeedToPushMasterApps {
+ return fmt.Errorf("simulated bundle push error when Mock=false")
+ }
+ return nil
+ }
+
ctx := context.TODO()
funcCalls := []spltest.MockFuncCall{
{MetaName: "*v1.Secret-test-splunk-test-secret"},
@@ -1161,16 +1182,51 @@ func TestCheckIfMastersmartstoreConfigMapUpdatedToPod(t *testing.T) {
func TestClusterMasterWitReadyState(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
+
+ // Mock VerifyCMasterisMultisite to avoid 5-second HTTP timeout
+ // This function tries to connect to Splunk REST API which doesn't exist in unit tests
+ savedVerifyCMasterisMultisiteForReadyState := VerifyCMasterisMultisite
+ defer func() { VerifyCMasterisMultisite = savedVerifyCMasterisMultisiteForReadyState }()
+ VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterMasterExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsListForReadyState := GetAppsList
+ defer func() { GetAppsList = savedGetAppsListForReadyState }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go
index 3be6d0393..3340b7cfa 100644
--- a/pkg/splunk/enterprise/configuration_test.go
+++ b/pkg/splunk/enterprise/configuration_test.go
@@ -254,7 +254,7 @@ func TestSmartstoreApplyClusterManagerFailsOnInvalidSmartStoreConfig(t *testing.
client := spltest.NewMockClient()
- _, err := ApplyClusterManager(context.TODO(), client, &cr)
+ _, err := ApplyClusterManager(context.TODO(), client, &cr, nil)
if err == nil {
t.Errorf("ApplyClusterManager should fail on invalid smartstore config")
}
diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go
index 92f562c5a..13cc02fc4 100644
--- a/pkg/splunk/enterprise/indexercluster_test.go
+++ b/pkg/splunk/enterprise/indexercluster_test.go
@@ -1533,18 +1533,44 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
},
}
+ // Mock cluster config endpoint for VerifyRFPeers
+ type ClusterInfoEntry struct {
+ Content splclient.ClusterInfo `json:"content"`
+ }
+ clusterInfoResponse := struct {
+ Entry []ClusterInfoEntry `json:"entry"`
+ }{
+ Entry: []ClusterInfoEntry{
+ {
+ Content: splclient.ClusterInfo{
+ MultiSite: "false",
+ ReplicationFactor: 3,
+ SiteReplicationFactor: "",
+ },
+ },
+ },
+ }
+ response3, _ := json.Marshal(clusterInfoResponse)
+
response1, _ := json.Marshal(apiResponse1)
response2, _ := json.Marshal(apiResponse2)
wantRequest1, _ := http.NewRequest("GET", "https://splunk-test-cluster-manager-service.default.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json", nil)
wantRequest2, _ := http.NewRequest("GET", "https://splunk-test-cluster-manager-service.default.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json", nil)
+ wantRequest3, _ := http.NewRequest("GET", "https://splunk-test-cluster-manager-service.default.svc.cluster.local:8089/services/cluster/config?count=0&output_mode=json", nil)
mclient.AddHandler(wantRequest1, 200, string(response1), nil)
mclient.AddHandler(wantRequest2, 200, string(response2), nil)
+ mclient.AddHandler(wantRequest3, 200, string(response3), nil)
- // mock the verify RF peer function
- VerifyRFPeers = func(ctx context.Context, mgr indexerClusterPodManager, client splcommon.ControllerClient) error {
- return nil
+ // Mock GetSpecificSecretTokenFromPod to return a dummy password
+ // This allows VerifyRFPeers to execute its real logic with HTTP calls mocked via MockHTTPClient
+ savedGetSpecificSecretTokenFromPod := splutil.GetSpecificSecretTokenFromPod
+ defer func() { splutil.GetSpecificSecretTokenFromPod = savedGetSpecificSecretTokenFromPod }()
+ splutil.GetSpecificSecretTokenFromPod = func(ctx context.Context, c splcommon.ControllerClient, podName string, namespace string, secretToken string) (string, error) {
+ return "dummypassword", nil
}
+ savedNewIndexerClusterPodManager := newIndexerClusterPodManager
+ defer func() { newIndexerClusterPodManager = savedNewIndexerClusterPodManager }()
newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager {
return indexerClusterPodManager{
log: log,
@@ -1558,16 +1584,41 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
}
}
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsList := GetAppsList
+ defer func() { GetAppsList = savedGetAppsList }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
@@ -1707,7 +1758,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -1786,7 +1837,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go
index ae5afb98a..55ad14292 100644
--- a/pkg/splunk/enterprise/licensemanager_test.go
+++ b/pkg/splunk/enterprise/licensemanager_test.go
@@ -828,25 +828,54 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
mclient.AddHandler(wantRequest2, 200, string(response2), nil)
// mock the verify RF peer funciton
+ savedVerifyRFPeers := VerifyRFPeers
+ defer func() { VerifyRFPeers = savedVerifyRFPeers }()
VerifyRFPeers = func(ctx context.Context, mgr indexerClusterPodManager, client splcommon.ControllerClient) error {
return nil
}
// Mock the addTelApp function for unit tests
+ savedAddTelApp := addTelApp
+ defer func() { addTelApp = savedAddTelApp }()
addTelApp = func(ctx context.Context, podExecClient splutil.PodExecClientImpl, replicas int32, cr splcommon.MetaObject) error {
return nil
}
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsList := GetAppsList
+ defer func() { GetAppsList = savedGetAppsList }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
@@ -1190,7 +1219,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
@@ -1264,7 +1293,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
}
// call reconciliation
- _, err = ApplyClusterManager(ctx, c, clustermanager)
+ _, err = ApplyClusterManager(ctx, c, clustermanager, nil)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go
index 9044dff89..467c561d0 100644
--- a/pkg/splunk/enterprise/licensemaster_test.go
+++ b/pkg/splunk/enterprise/licensemaster_test.go
@@ -844,20 +844,55 @@ func TestLicenseMasterWithReadyState(t *testing.T) {
mclient.AddHandler(wantRequest2, 200, string(response2), nil)
// mock the verify RF peer funciton
+ savedVerifyRFPeers := VerifyRFPeers
+ defer func() { VerifyRFPeers = savedVerifyRFPeers }()
VerifyRFPeers = func(ctx context.Context, mgr indexerClusterPodManager, client splcommon.ControllerClient) error {
return nil
}
+ // Mock VerifyCMasterisMultisite to avoid HTTP timeout when ApplyClusterMaster is called
+ savedVerifyCMasterisMultisite := VerifyCMasterisMultisite
+ defer func() { VerifyCMasterisMultisite = savedVerifyCMasterisMultisite }()
+ VerifyCMasterisMultisite = func(ctx context.Context, cr *enterpriseApiV3.ClusterMaster, namespaceScopedSecret *corev1.Secret) ([]corev1.EnvVar, error) {
+ extraEnv := getClusterMasterExtraEnv(cr, &cr.Spec.CommonSplunkSpec)
+ return extraEnv, nil
+ }
+
+ // Initialize GlobalResourceTracker to enable app framework
+ initGlobalResourceTracker()
+
// create directory for app framework
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
// adding getapplist to fix test case
+ savedGetAppsList := GetAppsList
+ defer func() { GetAppsList = savedGetAppsList }()
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
+
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
utilruntime.Must(corev1.AddToScheme(sch))
@@ -914,6 +949,8 @@ func TestLicenseMasterWithReadyState(t *testing.T) {
}
// Mock the addTelApp function for unit tests
+ savedAddTelApp := addTelApp
+ defer func() { addTelApp = savedAddTelApp }()
addTelApp = func(ctx context.Context, podExecClient splutil.PodExecClientImpl, replicas int32, cr splcommon.MetaObject) error {
return nil
}
diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go
index 3108c7d6a..f3a56fef1 100644
--- a/pkg/splunk/enterprise/monitoringconsole_test.go
+++ b/pkg/splunk/enterprise/monitoringconsole_test.go
@@ -1180,7 +1180,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) {
cm.Spec.Image = "splunk/splunk:latest"
// Create the instances
client.Create(ctx, cm)
- _, err := ApplyClusterManager(ctx, client, cm)
+ _, err := ApplyClusterManager(ctx, client, cm, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
diff --git a/pkg/splunk/enterprise/standalone_test.go b/pkg/splunk/enterprise/standalone_test.go
index acdb07515..f933ca08d 100644
--- a/pkg/splunk/enterprise/standalone_test.go
+++ b/pkg/splunk/enterprise/standalone_test.go
@@ -1236,15 +1236,49 @@ func TestStandaloneWitAppFramework(t *testing.T) {
func TestStandaloneWithReadyState(t *testing.T) {
os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com")
- // create directory for app framework
+
+ // Initialize the global resource tracker to allow app framework to run
+ initGlobalResourceTracker()
+
+ // Create temporary directory for app framework operations
newpath := filepath.Join("/tmp", "appframework")
_ = os.MkdirAll(newpath, os.ModePerm)
+ defer os.RemoveAll(newpath)
- // adding getapplist to fix test case
+ // Create app download directory required by app framework
+ err := os.MkdirAll(splcommon.AppDownloadVolume, 0755)
+ if err != nil {
+ t.Fatalf("Unable to create download directory for apps: %s", splcommon.AppDownloadVolume)
+ }
+ defer os.RemoveAll(splcommon.AppDownloadVolume)
+
+ // Mock GetAppsList to return empty list (no apps to download)
+ savedGetAppsList := GetAppsList
GetAppsList = func(ctx context.Context, remoteDataClientMgr RemoteDataClientManager) (splclient.RemoteDataListResponse, error) {
RemoteDataListResponse := splclient.RemoteDataListResponse{}
return RemoteDataListResponse, nil
}
+ defer func() { GetAppsList = savedGetAppsList }()
+
+ // Mock GetPodExecClient to return a mock client that simulates pod operations locally
+ savedGetPodExecClient := splutil.GetPodExecClient
+ splutil.GetPodExecClient = func(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) splutil.PodExecClientImpl {
+ mockClient := &spltest.MockPodExecClient{
+ Client: client,
+ Cr: cr,
+ TargetPodName: targetPodName,
+ }
+ // Add mock responses for common commands
+ ctx := context.TODO()
+ // Mock mkdir command (used by createDirOnSplunkPods)
+ mockClient.AddMockPodExecReturnContext(ctx, "mkdir -p", &spltest.MockPodExecReturnContext{
+ StdOut: "",
+ StdErr: "",
+ Err: nil,
+ })
+ return mockClient
+ }
+ defer func() { splutil.GetPodExecClient = savedGetPodExecClient }()
sch := pkgruntime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(sch))
@@ -1352,7 +1386,7 @@ func TestStandaloneWithReadyState(t *testing.T) {
// simulate create standalone instance before reconcilation
c.Create(ctx, &standalone)
- _, err := ApplyStandalone(ctx, c, &standalone)
+ _, err = ApplyStandalone(ctx, c, &standalone)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for standalone with app framework %v", err)
debug.PrintStack()
diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go
index 7b34c5eeb..1bf622f1e 100644
--- a/pkg/splunk/enterprise/types.go
+++ b/pkg/splunk/enterprise/types.go
@@ -129,6 +129,10 @@ type PipelineWorker struct {
// indicates a fan out worker
fanOut bool
+
+ // Optional injected pod exec client for testing (avoids real network I/O)
+ // If nil, runPodCopyWorker will create a real client
+ podExecClient splutil.PodExecClientImpl
}
// PipelinePhase represents one phase in the overall installation pipeline
diff --git a/pkg/splunk/enterprise/upgrade_test.go b/pkg/splunk/enterprise/upgrade_test.go
index b501527fd..7551d10ee 100644
--- a/pkg/splunk/enterprise/upgrade_test.go
+++ b/pkg/splunk/enterprise/upgrade_test.go
@@ -214,7 +214,7 @@ func TestUpgradePathValidation(t *testing.T) {
t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err)
}
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
// license manager statefulset is not created
if err != nil && !k8serrors.IsNotFound(err) {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
@@ -268,7 +268,7 @@ func TestUpgradePathValidation(t *testing.T) {
t.Errorf("lm is not in ready state")
}
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
// lm statefulset should have been created by now, this should pass
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
@@ -279,7 +279,7 @@ func TestUpgradePathValidation(t *testing.T) {
updateStatefulSetsInTest(t, ctx, client, 1, fmt.Sprintf("splunk-%s-cluster-manager", cm.Name), cm.Namespace)
cm.Status.TelAppInstalled = true
// cluster manager is found and creat
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
// lm statefulset should have been created by now, this should pass
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
@@ -532,7 +532,7 @@ func TestUpgradePathValidation(t *testing.T) {
}
cm.Status.TelAppInstalled = true
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager after update should not have returned error; err=%v", err)
}
@@ -585,13 +585,13 @@ func TestUpgradePathValidation(t *testing.T) {
}
cm.Status.TelAppInstalled = true
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager after update should not have returned error; err=%v", err)
}
cm.Status.TelAppInstalled = true
- _, err = ApplyClusterManager(ctx, client, &cm)
+ _, err = ApplyClusterManager(ctx, client, &cm, nil)
if err != nil {
t.Errorf("applyClusterManager after update should not have returned error; err=%v", err)
}
diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go
index 70b9b8f8a..abd96482a 100644
--- a/pkg/splunk/enterprise/util.go
+++ b/pkg/splunk/enterprise/util.go
@@ -2503,7 +2503,7 @@ func loadFixture(t *testing.T, filename string) string {
if err != nil {
t.Fatalf("Failed to load fixture %s: %v", filename, err)
}
-
+
// Compact the JSON to match the output from json.Marshal
var compactJSON bytes.Buffer
if err := json.Compact(&compactJSON, data); err != nil {
diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go
index e717e82da..a02cefb61 100644
--- a/pkg/splunk/enterprise/util_test.go
+++ b/pkg/splunk/enterprise/util_test.go
@@ -3261,7 +3261,7 @@ func TestGetCurrentImage(t *testing.T) {
WithStatusSubresource(&enterpriseApi.SearchHeadCluster{})
client := builder.Build()
client.Create(ctx, ¤t)
- _, err := ApplyClusterManager(ctx, client, ¤t)
+ _, err := ApplyClusterManager(ctx, client, ¤t, nil)
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
diff --git a/pkg/splunk/test/controller.go b/pkg/splunk/test/controller.go
index 6e5871cc4..bfdf891e0 100644
--- a/pkg/splunk/test/controller.go
+++ b/pkg/splunk/test/controller.go
@@ -349,11 +349,32 @@ func (c MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Ob
return nil
}
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ if gvk.Empty() {
+ // Infer GVK from object type
+ typeName := reflect.TypeOf(obj).Elem().Name()
+ // Determine group based on type
+ var group, version string
+ switch obj.(type) {
+ case *corev1.Pod, *corev1.Service, *corev1.ConfigMap, *corev1.Secret:
+ group, version = "", "v1"
+ case *appsv1.StatefulSet, *appsv1.Deployment:
+ group, version = "apps", "v1"
+ default:
+ group, version = "enterprise.splunk.com", "v4"
+ }
+ gvk = schema.GroupVersionKind{
+ Group: group,
+ Version: version,
+ Kind: typeName,
+ }
+ }
+
dummySchemaResource := schema.GroupResource{
- Group: obj.GetObjectKind().GroupVersionKind().Group,
- Resource: obj.GetObjectKind().GroupVersionKind().Kind,
+ Group: gvk.Group,
+ Resource: gvk.Kind,
}
- c.NotFoundError = k8serrors.NewNotFound(dummySchemaResource, obj.GetName())
+ c.NotFoundError = k8serrors.NewNotFound(dummySchemaResource, key.Name)
return c.NotFoundError
}
diff --git a/pkg/splunk/util/secrets.go b/pkg/splunk/util/secrets.go
index 35071e80a..2b2febddb 100644
--- a/pkg/splunk/util/secrets.go
+++ b/pkg/splunk/util/secrets.go
@@ -34,7 +34,7 @@ import (
)
// GetSpecificSecretTokenFromPod retrieves a specific secret token's value from a Pod
-func GetSpecificSecretTokenFromPod(ctx context.Context, c splcommon.ControllerClient, PodName string, namespace string, secretToken string) (string, error) {
+func getSpecificSecretTokenFromPodImpl(ctx context.Context, c splcommon.ControllerClient, PodName string, namespace string, secretToken string) (string, error) {
// Get Pod data
secret, err := GetSecretFromPod(ctx, c, PodName, namespace)
if err != nil {
@@ -57,6 +57,9 @@ func GetSpecificSecretTokenFromPod(ctx context.Context, c splcommon.ControllerCl
return string(secret.Data[secretToken]), nil
}
+// GetSpecificSecretTokenFromPod is a var function to allow mocking in tests
+var GetSpecificSecretTokenFromPod = getSpecificSecretTokenFromPodImpl
+
// GetSecretFromPod retrieves secret data from a pod
func GetSecretFromPod(ctx context.Context, c splcommon.ControllerClient, PodName string, namespace string) (*corev1.Secret, error) {
var currentPod corev1.Pod
diff --git a/pkg/splunk/util/util.go b/pkg/splunk/util/util.go
index a393d7703..57a6442b0 100644
--- a/pkg/splunk/util/util.go
+++ b/pkg/splunk/util/util.go
@@ -228,8 +228,8 @@ type PodExecClient struct {
targetPodName string
}
-// GetPodExecClient returns the client object used to execute pod exec commands
-func GetPodExecClient(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) *PodExecClient {
+// getPodExecClientImpl is the actual implementation that creates a real PodExecClient
+func getPodExecClientImpl(client splcommon.ControllerClient, cr splcommon.MetaObject, targetPodName string) PodExecClientImpl {
return &PodExecClient{
client: client,
cr: cr,
@@ -238,6 +238,10 @@ func GetPodExecClient(client splcommon.ControllerClient, cr splcommon.MetaObject
}
}
+// GetPodExecClient is a var that can be mocked in tests to return a mock PodExecClient
+// By default it returns a real PodExecClient
+var GetPodExecClient = getPodExecClientImpl
+
// suppressHarmlessErrorMessages suppresses harmless error messages
func suppressHarmlessErrorMessages(values ...*string) {
for _, val := range values {
diff --git a/pkg/splunk/util/util_test.go b/pkg/splunk/util/util_test.go
index 5e61f1676..1f933c452 100644
--- a/pkg/splunk/util/util_test.go
+++ b/pkg/splunk/util/util_test.go
@@ -214,6 +214,18 @@ func TestDeepCopy(t *testing.T) {
func TestPodExecCommand(t *testing.T) {
ctx := context.TODO()
+
+ // Mock podExecGetConfig to return a config with localhost as server
+ // This prevents the test from trying to connect to a real Kubernetes cluster
+ // and timing out. Instead, it will fail fast with connection refused.
+ savedPodExecGetConfig := podExecGetConfig
+ defer func() { podExecGetConfig = savedPodExecGetConfig }()
+ podExecGetConfig = func() (*rest.Config, error) {
+ return &rest.Config{
+ Host: "http://127.0.0.1:1", // Use invalid port for fast failure
+ }, nil
+ }
+
// Create pod
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
diff --git a/test/secret/manager_secret_m4_test.go b/test/secret/manager_secret_m4_test.go
index 526af6d31..fdf2d2a31 100644
--- a/test/secret/manager_secret_m4_test.go
+++ b/test/secret/manager_secret_m4_test.go
@@ -17,8 +17,8 @@ import (
"context"
"fmt"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
diff --git a/test/secret/manager_secret_s1_test.go b/test/secret/manager_secret_s1_test.go
index d51e004fd..123538317 100644
--- a/test/secret/manager_secret_s1_test.go
+++ b/test/secret/manager_secret_s1_test.go
@@ -19,8 +19,8 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
diff --git a/test/secret/secret_c3_test.go b/test/secret/secret_c3_test.go
index 90bb9fe9c..698c84786 100644
--- a/test/secret/secret_c3_test.go
+++ b/test/secret/secret_c3_test.go
@@ -19,8 +19,8 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
"github.com/splunk/splunk-operator/test/testenv"
diff --git a/test/secret/secret_m4_test.go b/test/secret/secret_m4_test.go
index f257e70ce..e40d94cfd 100644
--- a/test/secret/secret_m4_test.go
+++ b/test/secret/secret_m4_test.go
@@ -19,8 +19,8 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
"github.com/splunk/splunk-operator/test/testenv"
diff --git a/test/secret/secret_s1_test.go b/test/secret/secret_s1_test.go
index 11c621815..fc7a0e47d 100644
--- a/test/secret/secret_s1_test.go
+++ b/test/secret/secret_s1_test.go
@@ -19,8 +19,8 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
diff --git a/test/smartstore/manager_smartstore_test.go b/test/smartstore/manager_smartstore_test.go
index 45db78875..b90a68337 100644
--- a/test/smartstore/manager_smartstore_test.go
+++ b/test/smartstore/manager_smartstore_test.go
@@ -7,8 +7,8 @@ import (
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
"github.com/splunk/splunk-operator/test/testenv"
diff --git a/test/smartstore/smartstore_test.go b/test/smartstore/smartstore_test.go
index f1c330a66..c2d550411 100644
--- a/test/smartstore/smartstore_test.go
+++ b/test/smartstore/smartstore_test.go
@@ -5,8 +5,8 @@ import (
"fmt"
"time"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
diff --git a/test/smoke/smoke_test.go b/test/smoke/smoke_test.go
index 9c0a609e6..de4d26e88 100644
--- a/test/smoke/smoke_test.go
+++ b/test/smoke/smoke_test.go
@@ -17,8 +17,8 @@ import (
"context"
"fmt"
- "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
. "github.com/onsi/gomega"
"github.com/splunk/splunk-operator/test/testenv"