From dfb8f851f12f29501bdf22590aaddcbd8d541ed2 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 22 Jul 2025 15:47:57 +0200 Subject: [PATCH 1/2] replace loggers with zaptest logger --- controllers/om/automation_status_test.go | 4 +- controllers/om/backup_test.go | 7 +- .../om/deployment/om_deployment_test.go | 9 +-- controllers/om/deployment_test.go | 75 +++++++++---------- controllers/om/depshardedcluster_test.go | 15 ++-- .../appdbreplicaset_controller_multi_test.go | 25 ++++--- .../appdbreplicaset_controller_test.go | 71 +++++++++--------- .../configure_authentication_test.go | 30 +++----- .../operator/authentication/ldap_test.go | 16 ++-- .../operator/authentication/oidc_test.go | 10 +-- .../operator/authentication/scramsha_test.go | 6 +- .../operator/authentication/x509_test.go | 9 +-- controllers/operator/authentication_test.go | 12 +-- .../operator/common_controller_test.go | 25 ++++--- .../construct/backup_construction_test.go | 13 ++-- .../operator/construct/construction_test.go | 41 +++++----- .../construct/database_construction_test.go | 21 +++--- .../construct/opsmanager_construction_test.go | 33 ++++---- controllers/operator/create/create_test.go | 25 ++++--- .../mongodbmultireplicaset_controller_test.go | 3 +- ...mongodbopsmanager_controller_multi_test.go | 4 +- .../mongodbopsmanager_controller_test.go | 44 +++++------ .../mongodbreplicaset_controller_test.go | 17 +++-- ...odbshardedcluster_controller_multi_test.go | 52 ++++++------- .../mongodbshardedcluster_controller_test.go | 51 ++++++------- .../mongodbstandalone_controller_test.go | 16 ++-- controllers/operator/pem_test.go | 10 +-- .../pkg/agent/agent_readiness_test.go | 23 +++--- .../agent/replica_set_port_manager_test.go | 4 +- .../pkg/util/state/statemachine_test.go | 33 +++----- .../memberwatch/clusterhealth_test.go | 19 ++--- 31 files changed, 352 insertions(+), 371 deletions(-) diff --git a/controllers/om/automation_status_test.go b/controllers/om/automation_status_test.go index 7ac7f3dcf..e857d7183 100644 --- a/controllers/om/automation_status_test.go +++ b/controllers/om/automation_status_test.go @@ -1,10 +1,10 @@ package om import ( + "go.uber.org/zap/zaptest" "testing" "github.com/stretchr/testify/assert" - "go.uber.org/zap" ) func TestCheckAutomationStatusIsGoal(t *testing.T) { @@ -113,7 +113,7 @@ func TestCheckAutomationStatusIsGoal(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - goal, msg := checkAutomationStatusIsGoal(tt.args.as, tt.args.relevantProcesses, zap.S()) + goal, msg := checkAutomationStatusIsGoal(tt.args.as, tt.args.relevantProcesses, zaptest.NewLogger(t).Sugar()) assert.Equalf(t, tt.expectedResult, goal, "checkAutomationStatusIsGoal(%v, %v)", tt.args.as, tt.args.relevantProcesses) assert.Contains(t, msg, tt.expectedMsg) }) diff --git a/controllers/om/backup_test.go b/controllers/om/backup_test.go index f2988fb7f..0121eee89 100644 --- a/controllers/om/backup_test.go +++ b/controllers/om/backup_test.go @@ -1,14 +1,13 @@ package om import ( + "go.uber.org/zap/zaptest" "testing" "github.com/google/uuid" - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - "github.com/mongodb/mongodb-kubernetes/controllers/om/backup" "github.com/mongodb/mongodb-kubernetes/pkg/util" + "github.com/stretchr/testify/assert" ) // TestBackupWaitsForTermination tests that 'StopBackupIfEnabled' procedure waits for backup statuses on each stage @@ -19,7 +18,7 @@ func TestBackupWaitsForTermination(t *testing.T) { connection := NewMockedOmConnection(NewDeployment()) connection.EnableBackup("test", backup.ReplicaSetType, uuid.New().String()) - err := backup.StopBackupIfEnabled(connection, connection, "test", backup.ReplicaSetType, zap.S()) + err := backup.StopBackupIfEnabled(connection, connection, "test", backup.ReplicaSetType, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) connection.CheckResourcesAndBackupDeleted(t, "test") diff --git a/controllers/om/deployment/om_deployment_test.go b/controllers/om/deployment/om_deployment_test.go index 9826188aa..e5b7eceff 100644 --- a/controllers/om/deployment/om_deployment_test.go +++ b/controllers/om/deployment/om_deployment_test.go @@ -1,20 +1,17 @@ package deployment import ( + "go.uber.org/zap/zaptest" "testing" - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/om/replicaset" "github.com/mongodb/mongodb-kubernetes/controllers/operator/mock" + "github.com/stretchr/testify/assert" ) func init() { - logger, _ := zap.NewDevelopment() - zap.ReplaceGlobals(logger) mock.InitDefaultEnvVariables() } @@ -29,7 +26,7 @@ func TestPrepareScaleDown_OpsManagerRemovedMember(t *testing.T) { // We try to prepare two members for scale down, but one of them will fail (bam-2) rsWithThreeMembers := map[string][]string{"bam": {"bam-1", "bam-2"}} - assert.NoError(t, replicaset.PrepareScaleDownFromMap(mockedOmConnection, rsWithThreeMembers, rsWithThreeMembers["bam"], zap.S())) + assert.NoError(t, replicaset.PrepareScaleDownFromMap(mockedOmConnection, rsWithThreeMembers, rsWithThreeMembers["bam"], zaptest.NewLogger(t).Sugar())) expectedDeployment := CreateFromReplicaSet("fake-mongoDBImage", false, rs) diff --git a/controllers/om/deployment_test.go b/controllers/om/deployment_test.go index 8f0808dae..a94f4f1a1 100644 --- a/controllers/om/deployment_test.go +++ b/controllers/om/deployment_test.go @@ -2,6 +2,7 @@ package om import ( "fmt" + "go.uber.org/zap/zaptest" "os" "strconv" "strings" @@ -9,23 +10,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/automationconfig" "github.com/mongodb/mongodb-kubernetes/pkg/util" ) -func init() { - logger, _ := zap.NewDevelopment() - zap.ReplaceGlobals(logger) -} - // First time merge adds the new standalone // second invocation doesn't add new node as the existing standalone is found (by name) and the data is merged func TestMergeStandalone(t *testing.T) { d := NewDeployment() - mergeStandalone(d, createStandalone()) + mergeStandalone(t, d, createStandalone()) assert.Len(t, d.getProcesses(), 1) @@ -35,7 +30,7 @@ func TestMergeStandalone(t *testing.T) { d.getProcesses()[0]["authSchemaVersion"] = 10 d.getProcesses()[0]["featureCompatibilityVersion"] = "bla" - mergeStandalone(d, createStandalone()) + mergeStandalone(t, d, createStandalone()) assert.Len(t, d.getProcesses(), 1) @@ -52,7 +47,7 @@ func TestMergeStandalone(t *testing.T) { // Second merge performs real merge operation func TestMergeReplicaSet(t *testing.T) { d := NewDeployment() - mergeReplicaSet(d, "fooRs", createReplicaSetProcesses("fooRs")) + mergeReplicaSet(t, d, "fooRs", createReplicaSetProcesses("fooRs")) expectedRs := buildRsByProcesses("fooRs", createReplicaSetProcesses("fooRs")) assert.Len(t, d.getProcesses(), 3) @@ -71,7 +66,7 @@ func TestMergeReplicaSet(t *testing.T) { d.GetReplicaSets()[0].addMember(newProcess, "", automationconfig.MemberOptions{}) // "adding" some new node d.GetReplicaSets()[0].Members()[0]["arbiterOnly"] = true // changing data for first node - mergeReplicaSet(d, "fooRs", createReplicaSetProcesses("fooRs")) + mergeReplicaSet(t, d, "fooRs", createReplicaSetProcesses("fooRs")) assert.Len(t, d.getProcesses(), 3) assert.Len(t, d.GetReplicaSets(), 1) @@ -87,13 +82,13 @@ func TestMergeReplicaSet(t *testing.T) { func TestMergeReplica_ScaleDown(t *testing.T) { d := NewDeployment() - mergeReplicaSet(d, "someRs", createReplicaSetProcesses("someRs")) + mergeReplicaSet(t, d, "someRs", createReplicaSetProcesses("someRs")) assert.Len(t, d.getProcesses(), 3) assert.Len(t, d.GetReplicaSets()[0].Members(), 3) // "scale down" scaledDownRsProcesses := createReplicaSetProcesses("someRs")[0:2] - mergeReplicaSet(d, "someRs", scaledDownRsProcesses) + mergeReplicaSet(t, d, "someRs", scaledDownRsProcesses) assert.Len(t, d.getProcesses(), 2) assert.Len(t, d.GetReplicaSets()[0].Members(), 2) @@ -110,8 +105,8 @@ func TestMergeReplica_ScaleDown(t *testing.T) { func TestMergeReplicaSet_MergeFirstProcess(t *testing.T) { d := NewDeployment() - mergeReplicaSet(d, "fooRs", createReplicaSetProcesses("fooRs")) - mergeReplicaSet(d, "anotherRs", createReplicaSetProcesses("anotherRs")) + mergeReplicaSet(t, d, "fooRs", createReplicaSetProcesses("fooRs")) + mergeReplicaSet(t, d, "anotherRs", createReplicaSetProcesses("anotherRs")) // Now the first process (and usually all others in practice) are changed by OM d.getProcesses()[0].EnsureNetConfig()["MaxIncomingConnections"] = 20 @@ -120,7 +115,7 @@ func TestMergeReplicaSet_MergeFirstProcess(t *testing.T) { d.getProcesses()[0]["kerberos"] = map[string]string{"keytab": "123456"} // Now we merged the scaled up RS - mergeReplicaSet(d, "fooRs", createReplicaSetProcessesCount(5, "fooRs")) + mergeReplicaSet(t, d, "fooRs", createReplicaSetProcessesCount(5, "fooRs")) assert.Len(t, d.getProcesses(), 8) assert.Len(t, d.GetReplicaSets(), 2) @@ -173,14 +168,14 @@ func TestMergeDeployment_BigReplicaset(t *testing.T) { rs := buildRsByProcesses("my-rs", createReplicaSetProcessesCount(8, "my-rs")) checkNumberOfVotingMembers(t, rs, 8, 8) - omDeployment.MergeReplicaSet(rs, nil, nil, zap.S()) + omDeployment.MergeReplicaSet(rs, nil, nil, zaptest.NewLogger(t).Sugar()) checkNumberOfVotingMembers(t, rs, 7, 8) // Now OM user "has changed" votes for some of the members - this must stay the same after merge omDeployment.GetReplicaSets()[0].Members()[2].setVotes(0).setPriority(0) omDeployment.GetReplicaSets()[0].Members()[4].setVotes(0).setPriority(0) - omDeployment.MergeReplicaSet(rs, nil, nil, zap.S()) + omDeployment.MergeReplicaSet(rs, nil, nil, zaptest.NewLogger(t).Sugar()) checkNumberOfVotingMembers(t, rs, 5, 8) // Now operator scales up by one - the "OM votes" should not suffer, but total number of votes will increase by one @@ -188,7 +183,7 @@ func TestMergeDeployment_BigReplicaset(t *testing.T) { rsToMerge.Rs.Members()[2].setVotes(0).setPriority(0) rsToMerge.Rs.Members()[4].setVotes(0).setPriority(0) rsToMerge.Rs.Members()[7].setVotes(0).setPriority(0) - omDeployment.MergeReplicaSet(rsToMerge, nil, nil, zap.S()) + omDeployment.MergeReplicaSet(rsToMerge, nil, nil, zaptest.NewLogger(t).Sugar()) checkNumberOfVotingMembers(t, rs, 6, 9) // Now operator scales up by two - the "OM votes" should not suffer, but total number of votes will increase by one @@ -197,7 +192,7 @@ func TestMergeDeployment_BigReplicaset(t *testing.T) { rsToMerge.Rs.Members()[2].setVotes(0).setPriority(0) rsToMerge.Rs.Members()[4].setVotes(0).setPriority(0) - omDeployment.MergeReplicaSet(rsToMerge, nil, nil, zap.S()) + omDeployment.MergeReplicaSet(rsToMerge, nil, nil, zaptest.NewLogger(t).Sugar()) checkNumberOfVotingMembers(t, rs, 7, 11) assert.Equal(t, 0, omDeployment.GetReplicaSets()[0].Members()[2].Votes()) assert.Equal(t, 0, omDeployment.GetReplicaSets()[0].Members()[4].Votes()) @@ -209,11 +204,11 @@ func TestGetAllProcessNames_MergedReplicaSetsAndShardedClusters(t *testing.T) { d := NewDeployment() rs0 := buildRsByProcesses("my-rs", createReplicaSetProcessesCount(3, "my-rs")) - d.MergeReplicaSet(rs0, nil, nil, zap.S()) + d.MergeReplicaSet(rs0, nil, nil, zaptest.NewLogger(t).Sugar()) assert.Equal(t, []string{"my-rs-0", "my-rs-1", "my-rs-2"}, d.GetAllProcessNames()) rs1 := buildRsByProcesses("another-rs", createReplicaSetProcessesCount(5, "another-rs")) - d.MergeReplicaSet(rs1, nil, nil, zap.S()) + d.MergeReplicaSet(rs1, nil, nil, zaptest.NewLogger(t).Sugar()) assert.Equal( t, @@ -304,14 +299,14 @@ func TestDeploymentCountIsCorrect(t *testing.T) { d := NewDeployment() rs0 := buildRsByProcesses("my-rs", createReplicaSetProcessesCount(3, "my-rs")) - d.MergeReplicaSet(rs0, nil, nil, zap.S()) + d.MergeReplicaSet(rs0, nil, nil, zaptest.NewLogger(t).Sugar()) excessProcesses := d.GetNumberOfExcessProcesses("my-rs") // There's only one resource in this deployment assert.Equal(t, 0, excessProcesses) rs1 := buildRsByProcesses("my-rs-second", createReplicaSetProcessesCount(3, "my-rs-second")) - d.MergeReplicaSet(rs1, nil, nil, zap.S()) + d.MergeReplicaSet(rs1, nil, nil, zaptest.NewLogger(t).Sugar()) excessProcesses = d.GetNumberOfExcessProcesses("my-rs") // another replica set was added to the deployment. 3 processes do not belong to this one @@ -402,7 +397,7 @@ func TestIsShardOf(t *testing.T) { func TestProcessBelongsToReplicaSet(t *testing.T) { d := NewDeployment() rs0 := buildRsByProcesses("my-rs", createReplicaSetProcessesCount(3, "my-rs")) - d.MergeReplicaSet(rs0, nil, nil, zap.S()) + d.MergeReplicaSet(rs0, nil, nil, zaptest.NewLogger(t).Sugar()) assert.True(t, d.ProcessBelongsToResource("my-rs-0", "my-rs")) assert.True(t, d.ProcessBelongsToResource("my-rs-1", "my-rs")) @@ -468,7 +463,7 @@ func TestDeploymentMinimumMajorVersion(t *testing.T) { d0 := NewDeployment() rs0Processes := createReplicaSetProcessesCount(3, "my-rs") rs0 := buildRsByProcesses("my-rs", rs0Processes) - d0.MergeReplicaSet(rs0, nil, nil, zap.S()) + d0.MergeReplicaSet(rs0, nil, nil, zaptest.NewLogger(t).Sugar()) assert.Equal(t, uint64(3), d0.MinimumMajorVersion()) @@ -476,14 +471,14 @@ func TestDeploymentMinimumMajorVersion(t *testing.T) { rs1Processes := createReplicaSetProcessesCount(3, "my-rs") rs1Processes[0]["featureCompatibilityVersion"] = "2.4" rs1 := buildRsByProcesses("my-rs", rs1Processes) - d1.MergeReplicaSet(rs1, nil, nil, zap.S()) + d1.MergeReplicaSet(rs1, nil, nil, zaptest.NewLogger(t).Sugar()) assert.Equal(t, uint64(2), d1.MinimumMajorVersion()) d2 := NewDeployment() rs2Processes := createReplicaSetProcessesCountEnt(3, "my-rs") rs2 := buildRsByProcesses("my-rs", rs2Processes) - d2.MergeReplicaSet(rs2, nil, nil, zap.S()) + d2.MergeReplicaSet(rs2, nil, nil, zaptest.NewLogger(t).Sugar()) assert.Equal(t, uint64(3), d2.MinimumMajorVersion()) } @@ -507,8 +502,8 @@ func TestAddMonitoring(t *testing.T) { d := NewDeployment() rs0 := buildRsByProcesses("my-rs", createReplicaSetProcessesCount(3, "my-rs")) - d.MergeReplicaSet(rs0, nil, nil, zap.S()) - d.AddMonitoring(zap.S(), false, util.CAFilePathInContainer) + d.MergeReplicaSet(rs0, nil, nil, zaptest.NewLogger(t).Sugar()) + d.AddMonitoring(zaptest.NewLogger(t).Sugar(), false, util.CAFilePathInContainer) expectedMonitoringVersions := []interface{}{ map[string]interface{}{"hostname": "my-rs-0.some.host", "name": MonitoringAgentDefaultVersion}, @@ -518,7 +513,7 @@ func TestAddMonitoring(t *testing.T) { assert.Equal(t, expectedMonitoringVersions, d.getMonitoringVersions()) // adding again - nothing changes - d.AddMonitoring(zap.S(), false, util.CAFilePathInContainer) + d.AddMonitoring(zaptest.NewLogger(t).Sugar(), false, util.CAFilePathInContainer) assert.Equal(t, expectedMonitoringVersions, d.getMonitoringVersions()) } @@ -526,8 +521,8 @@ func TestAddMonitoringTls(t *testing.T) { d := NewDeployment() rs0 := buildRsByProcesses("my-rs", createReplicaSetProcessesCount(3, "my-rs")) - d.MergeReplicaSet(rs0, nil, nil, zap.S()) - d.AddMonitoring(zap.S(), true, util.CAFilePathInContainer) + d.MergeReplicaSet(rs0, nil, nil, zaptest.NewLogger(t).Sugar()) + d.AddMonitoring(zaptest.NewLogger(t).Sugar(), true, util.CAFilePathInContainer) expectedAdditionalParams := map[string]string{ "useSslForAllConnections": "true", @@ -542,7 +537,7 @@ func TestAddMonitoringTls(t *testing.T) { assert.Equal(t, expectedMonitoringVersions, d.getMonitoringVersions()) // adding again - nothing changes - d.AddMonitoring(zap.S(), false, util.CAFilePathInContainer) + d.AddMonitoring(zaptest.NewLogger(t).Sugar(), false, util.CAFilePathInContainer) assert.Equal(t, expectedMonitoringVersions, d.getMonitoringVersions()) } @@ -550,8 +545,8 @@ func TestAddBackup(t *testing.T) { d := NewDeployment() rs0 := buildRsByProcesses("my-rs", createReplicaSetProcessesCount(3, "my-rs")) - d.MergeReplicaSet(rs0, nil, nil, zap.S()) - d.addBackup(zap.S()) + d.MergeReplicaSet(rs0, nil, nil, zaptest.NewLogger(t).Sugar()) + d.addBackup(zaptest.NewLogger(t).Sugar()) expectedBackupVersions := []interface{}{ map[string]interface{}{"hostname": "my-rs-0.some.host", "name": BackupAgentDefaultVersion}, @@ -561,7 +556,7 @@ func TestAddBackup(t *testing.T) { assert.Equal(t, expectedBackupVersions, d.getBackupVersions()) // adding again - nothing changes - d.addBackup(zap.S()) + d.addBackup(zaptest.NewLogger(t).Sugar()) assert.Equal(t, expectedBackupVersions, d.getBackupVersions()) } @@ -813,14 +808,14 @@ func createConfigSrvRsCount(count int, name string, check bool) ReplicaSetWithPr return replicaSetWithProcesses } -func mergeReplicaSet(d Deployment, rsName string, rsProcesses []Process) ReplicaSetWithProcesses { +func mergeReplicaSet(t *testing.T, d Deployment, rsName string, rsProcesses []Process) ReplicaSetWithProcesses { rs := buildRsByProcesses(rsName, rsProcesses) - d.MergeReplicaSet(rs, nil, nil, zap.S()) + d.MergeReplicaSet(rs, nil, nil, zaptest.NewLogger(t).Sugar()) return rs } -func mergeStandalone(d Deployment, s Process) Process { - d.MergeStandalone(s, nil, nil, zap.S()) +func mergeStandalone(t *testing.T, d Deployment, s Process) Process { + d.MergeStandalone(s, nil, nil, zaptest.NewLogger(t).Sugar()) return s } diff --git a/controllers/om/depshardedcluster_test.go b/controllers/om/depshardedcluster_test.go index e9f025dd0..5eef78b6c 100644 --- a/controllers/om/depshardedcluster_test.go +++ b/controllers/om/depshardedcluster_test.go @@ -1,15 +1,14 @@ package om import ( + "go.uber.org/zap/zaptest" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/automationconfig" "github.com/mongodb/mongodb-kubernetes/pkg/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // TestMergeShardedClusterNoExisting that just merges the Sharded cluster into an empty deployment @@ -164,7 +163,7 @@ func TestMergeShardedCluster_ShardedClusterModified(t *testing.T) { (*d.getShardedClusterByName("cluster")).setShards(d.getShardedClusterByName("cluster").shards()[0:2]) (*d.getShardedClusterByName("cluster")).setShards(append(d.getShardedClusterByName("cluster").shards(), newShard("fakeShard"))) - mergeReplicaSet(d, "fakeShard", createReplicaSetProcesses("fakeShard")) + mergeReplicaSet(t, d, "fakeShard", createReplicaSetProcesses("fakeShard")) require.Len(t, d.GetReplicaSets(), 5) @@ -509,11 +508,11 @@ func TestRemoveShardedClusterByName(t *testing.T) { _, err = d.MergeShardedCluster(mergeOpts) assert.NoError(t, err) - mergeStandalone(d, createStandalone()) + mergeStandalone(t, d, createStandalone()) - rs := mergeReplicaSet(d, "fooRs", createReplicaSetProcesses("fooRs")) + rs := mergeReplicaSet(t, d, "fooRs", createReplicaSetProcesses("fooRs")) - err = d.RemoveShardedClusterByName("otherCluster", zap.S()) + err = d.RemoveShardedClusterByName("otherCluster", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) // First check that all other entities stay untouched diff --git a/controllers/operator/appdbreplicaset_controller_multi_test.go b/controllers/operator/appdbreplicaset_controller_multi_test.go index cedc58dd7..bc2d7513b 100644 --- a/controllers/operator/appdbreplicaset_controller_multi_test.go +++ b/controllers/operator/appdbreplicaset_controller_multi_test.go @@ -37,6 +37,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/kube" "github.com/mongodb/mongodb-kubernetes/pkg/multicluster" "github.com/mongodb/mongodb-kubernetes/pkg/util" + "go.uber.org/zap/zaptest" ) const opsManagerUserPassword = "MBPYfkAj5ZM0l9uw6C7ggw" //nolint @@ -79,7 +80,7 @@ func TestAppDB_MultiCluster(t *testing.T) { tlsCertSecretName, tlsSecretPemHash := createAppDBTLSCert(ctx, t, kubeClient, appdb) pemSecretName := tlsCertSecretName + "-pem" - reconciler, err := newAppDbMultiReconciler(ctx, kubeClient, opsManager, memberClusterMap, zap.S(), omConnectionFactory.GetConnectionFunc) + reconciler, err := newAppDbMultiReconciler(ctx, kubeClient, opsManager, memberClusterMap, zaptest.NewLogger(t).Sugar(), omConnectionFactory.GetConnectionFunc) require.NoError(t, err) err = createOpsManagerUserPasswordSecret(ctx, kubeClient, opsManager, opsManagerUserPassword) @@ -146,7 +147,7 @@ func agentAPIKeySecretName(projectID string) string { func TestAppDB_MultiCluster_AutomationConfig(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() centralClusterName := multicluster.LegacyCentralClusterName memberClusterName := "member-cluster-1" memberClusterName2 := "member-cluster-2" @@ -492,7 +493,7 @@ func makeClusterSpecList(clusters ...string) mdbv1.ClusterSpecList { func TestAppDB_MultiCluster_ClusterMapping(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() centralClusterName := multicluster.LegacyCentralClusterName memberClusterName1 := "member-cluster-1" memberClusterName2 := "member-cluster-2" @@ -613,7 +614,7 @@ func TestAppDB_MultiCluster_ClusterMapping(t *testing.T) { func TestAppDB_MultiCluster_ClusterMappingMigrationToDeploymentState(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() centralClusterName := multicluster.LegacyCentralClusterName memberClusterName1 := "member-cluster-1" memberClusterName2 := "member-cluster-2" @@ -673,7 +674,7 @@ func TestAppDB_MultiCluster_ClusterMappingMigrationToDeploymentState(t *testing. // This test ensures that we update legacy Config Maps on top of the new Deployment State func TestAppDB_MultiCluster_KeepUpdatingLegacyState(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() centralClusterName := multicluster.LegacyCentralClusterName memberClusterName1 := "member-cluster-1" memberClusterName2 := "member-cluster-2" @@ -850,7 +851,7 @@ func createAppDBTLSCert(ctx context.Context, t *testing.T, k8sClient client.Clie err := k8sClient.Create(ctx, tlsSecret) require.NoError(t, err) - pemHash := enterprisepem.ReadHashFromData(secrets.DataToStringData(tlsSecret.Data), zap.S()) + pemHash := enterprisepem.ReadHashFromData(secrets.DataToStringData(tlsSecret.Data), zaptest.NewLogger(t).Sugar()) require.NotEmpty(t, pemHash) return tlsSecret.Name, pemHash @@ -868,7 +869,7 @@ func TestAppDB_MultiCluster_ReconcilerFailsWhenThereIsNoClusterListConfigured(t SetAppDBTopology(mdbv1.ClusterTopologyMultiCluster) opsManager := builder.Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(opsManager) - _, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + _, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) assert.Error(t, err) } @@ -898,7 +899,7 @@ func TestAppDBMultiClusterRemoveResources(t *testing.T) { reconciler, _, _ := defaultTestOmReconciler(ctx, t, nil, "", "", opsManager, memberClusterMap, omConnectionFactory) // create opsmanager reconciler - appDBReconciler, _ := newAppDbMultiReconciler(ctx, kubeClient, opsManager, memberClusterMap, zap.S(), omConnectionFactory.GetConnectionFunc) + appDBReconciler, _ := newAppDbMultiReconciler(ctx, kubeClient, opsManager, memberClusterMap, zaptest.NewLogger(t).Sugar(), omConnectionFactory.GetConnectionFunc) // initially requeued as monitoring needs to be configured _, err := appDBReconciler.ReconcileAppDB(ctx, opsManager) @@ -912,7 +913,7 @@ func TestAppDBMultiClusterRemoveResources(t *testing.T) { } // delete the OM resource - reconciler.OnDelete(ctx, opsManager, zap.S()) + reconciler.OnDelete(ctx, opsManager, zaptest.NewLogger(t).Sugar()) assert.Zero(t, len(reconciler.resourceWatcher.GetWatchedResources())) // assert STS objects in member cluster @@ -925,7 +926,7 @@ func TestAppDBMultiClusterRemoveResources(t *testing.T) { func TestAppDBMultiClusterMonitoringHostnames(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() centralClusterName := multicluster.LegacyCentralClusterName memberClusterName := "member-cluster-1" memberClusterName2 := "member-cluster-2" @@ -1020,7 +1021,7 @@ func TestAppDBMultiClusterMonitoringHostnames(t *testing.T) { func TestAppDBMultiClusterTryConfigureMonitoring(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() centralClusterName := multicluster.LegacyCentralClusterName memberClusterName1 := "member-cluster-1" memberClusterName2 := "member-cluster-2" @@ -1746,7 +1747,7 @@ func TestAppDBMultiClusterServiceCreation_WithExternalName(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(opsManager) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusters, omConnectionFactory) - reconciler, err := newAppDbMultiReconciler(ctx, kubeClient, opsManager, memberClusterMap, zap.S(), omConnectionFactory.GetConnectionFunc) + reconciler, err := newAppDbMultiReconciler(ctx, kubeClient, opsManager, memberClusterMap, zaptest.NewLogger(t).Sugar(), omConnectionFactory.GetConnectionFunc) require.NoError(t, err) err = createOpsManagerUserPasswordSecret(ctx, kubeClient, opsManager, opsManagerUserPassword) diff --git a/controllers/operator/appdbreplicaset_controller_test.go b/controllers/operator/appdbreplicaset_controller_test.go index 8559cf824..b813e9ab5 100644 --- a/controllers/operator/appdbreplicaset_controller_test.go +++ b/controllers/operator/appdbreplicaset_controller_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap/zaptest" "os" "path/filepath" "strings" @@ -130,7 +131,7 @@ func TestAutomationConfig_IsCreatedInSecret(t *testing.T) { opsManager := builder.Build() appdb := opsManager.Spec.AppDB kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(opsManager) - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) err = createOpsManagerUserPasswordSecret(ctx, kubeClient, opsManager, "MBPYfkAj5ZM0l9uw6C7ggw") @@ -152,18 +153,18 @@ func TestPublishAutomationConfigCreate(t *testing.T) { appdb := opsManager.Spec.AppDB omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() kubeClient := mock.NewEmptyFakeClientWithInterceptor(omConnectionFactory) - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) memberCluster := multicluster.GetLegacyCentralMemberCluster(opsManager.Spec.Replicas, 0, reconciler.client, reconciler.SecretClient) - automationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, automation, zap.S()) + automationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, automation, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) version, err := reconciler.publishAutomationConfig(ctx, opsManager, automationConfig, appdb.AutomationConfigSecretName(), memberCluster.SecretClient) assert.NoError(t, err) assert.Equal(t, 1, version) - monitoringAutomationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, monitoring, zap.S()) + monitoringAutomationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, monitoring, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) version, err = reconciler.publishAutomationConfig(ctx, opsManager, monitoringAutomationConfig, appdb.MonitoringAutomationConfigSecretName(), memberCluster.SecretClient) assert.NoError(t, err) @@ -217,7 +218,7 @@ func TestPublishAutomationConfig_Update(t *testing.T) { opsManager := builder.Build() appdb := opsManager.Spec.AppDB kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(opsManager) - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) // create @@ -276,9 +277,9 @@ func TestBuildAppDbAutomationConfig(t *testing.T) { err := createOpsManagerUserPasswordSecret(ctx, kubeClient, om, "omPass") assert.NoError(t, err) - automationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, automation, zap.S()) + automationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, automation, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - monitoringAutomationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, monitoring, zap.S()) + monitoringAutomationConfig, err := buildAutomationConfigForAppDb(ctx, builder, kubeClient, omConnectionFactory.GetConnectionFunc, monitoring, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) // processes assert.Len(t, automationConfig.Processes, 2) @@ -320,14 +321,14 @@ func TestRegisterAppDBHostsWithProject(t *testing.T) { return mock.GetFakeClientInterceptorGetFunc(omConnectionFactory, true, false)(ctx, client, key, obj, opts...) }, }) - reconciler, err := newAppDbReconciler(ctx, fakeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, fakeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) t.Run("Ensure all hosts are added", func(t *testing.T) { _, err = reconciler.ReconcileAppDB(ctx, opsManager) hostnames := reconciler.getCurrentStatefulsetHostnames(opsManager) - err = reconciler.registerAppDBHostsWithProject(hostnames, omConnectionFactory.GetConnection(), "password", zap.S()) + err = reconciler.registerAppDBHostsWithProject(hostnames, omConnectionFactory.GetConnection(), "password", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) hosts, _ := omConnectionFactory.GetConnection().(*om.MockedOmConnection).GetHosts() @@ -339,7 +340,7 @@ func TestRegisterAppDBHostsWithProject(t *testing.T) { _, err = reconciler.ReconcileAppDB(ctx, opsManager) hostnames := reconciler.getCurrentStatefulsetHostnames(opsManager) - err = reconciler.registerAppDBHostsWithProject(hostnames, omConnectionFactory.GetConnection(), "password", zap.S()) + err = reconciler.registerAppDBHostsWithProject(hostnames, omConnectionFactory.GetConnection(), "password", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) hosts, _ := omConnectionFactory.GetConnection().GetHosts() @@ -354,11 +355,11 @@ func TestEnsureAppDbAgentApiKey(t *testing.T) { // we need to pre-initialize connection as we don't call full reconciler in this test and connection is never created by calling connection factory func omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(om.NewDeployment())) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory) - reconciler, err := newAppDbReconciler(ctx, fakeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, fakeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) omConnectionFactory.GetConnection().(*om.MockedOmConnection).AgentAPIKey = "my-api-key" - _, err = reconciler.ensureAppDbAgentApiKey(ctx, opsManager, omConnectionFactory.GetConnection(), omConnectionFactory.GetConnection().GroupID(), zap.S()) + _, err = reconciler.ensureAppDbAgentApiKey(ctx, opsManager, omConnectionFactory.GetConnection(), omConnectionFactory.GetConnection().GroupID(), zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) secretName := agents.ApiKeySecretName(omConnectionFactory.GetConnection().GroupID()) @@ -373,18 +374,18 @@ func TestTryConfigureMonitoringInOpsManager(t *testing.T) { opsManager := builder.Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() appdbScaler := scalers.GetAppDBScaler(opsManager, multicluster.LegacyCentralClusterName, 0, nil) - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) // attempt configuring monitoring when there is no api key secret - podVars, err := reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zap.S()) + podVars, err := reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Empty(t, podVars.ProjectID) assert.Empty(t, podVars.User) opsManager.Spec.AppDB.Members = 5 - appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zap.S()) + appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Nil(t, findVolumeByName(appDbSts.Spec.Template.Spec.Volumes, construct.AgentAPIKeyVolumeName)) @@ -408,7 +409,7 @@ func TestTryConfigureMonitoringInOpsManager(t *testing.T) { assert.NoError(t, err) // once the secret exists, monitoring should be fully configured - podVars, err = reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zap.S()) + podVars, err = reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Equal(t, om.TestGroupID, podVars.ProjectID) @@ -424,7 +425,7 @@ func TestTryConfigureMonitoringInOpsManager(t *testing.T) { assertExpectedHostnamesAndPreferred(t, omConnectionFactory.GetConnection().(*om.MockedOmConnection), expectedHostnames) - appDbSts, err = construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zap.S()) + appDbSts, err = construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.NotNil(t, findVolumeByName(appDbSts.Spec.Template.Spec.Volumes, construct.AgentAPIKeyVolumeName)) @@ -459,7 +460,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { t.Run("do not override images while activating monitoring", func(t *testing.T) { podVars := env.PodEnvVars{ProjectID: "something"} - appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zap.S()) + appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.NotNil(t, appDbSts) @@ -485,7 +486,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { t.Run("do not override images, but remove monitoring if not activated", func(t *testing.T) { podVars := env.PodEnvVars{} - appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zap.S()) + appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.NotNil(t, appDbSts) @@ -518,18 +519,18 @@ func TestTryConfigureMonitoringInOpsManagerWithExternalDomains(t *testing.T) { }).Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() appdbScaler := scalers.GetAppDBScaler(opsManager, multicluster.LegacyCentralClusterName, 0, nil) - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) // attempt configuring monitoring when there is no api key secret - podVars, err := reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zap.S()) + podVars, err := reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Empty(t, podVars.ProjectID) assert.Empty(t, podVars.User) opsManager.Spec.AppDB.Members = 5 - appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zap.S()) + appDbSts, err := construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Nil(t, findVolumeByName(appDbSts.Spec.Template.Spec.Volumes, construct.AgentAPIKeyVolumeName)) @@ -553,7 +554,7 @@ func TestTryConfigureMonitoringInOpsManagerWithExternalDomains(t *testing.T) { assert.NoError(t, err) // once the secret exists, monitoring should be fully configured - podVars, err = reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zap.S()) + podVars, err = reconciler.tryConfigureMonitoringInOpsManager(ctx, opsManager, "password", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Equal(t, om.TestGroupID, podVars.ProjectID) @@ -569,7 +570,7 @@ func TestTryConfigureMonitoringInOpsManagerWithExternalDomains(t *testing.T) { assertExpectedHostnamesAndPreferred(t, omConnectionFactory.GetConnection().(*om.MockedOmConnection), expectedHostnames) - appDbSts, err = construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zap.S()) + appDbSts, err = construct.AppDbStatefulSet(*opsManager, &podVars, construct.AppDBStatefulSetOptions{}, appdbScaler, appsv1.OnDeleteStatefulSetStrategyType, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.NotNil(t, findVolumeByName(appDbSts.Spec.Template.Spec.Volumes, construct.AgentAPIKeyVolumeName)) @@ -1045,7 +1046,7 @@ func TestAppDBServiceCreation_WithExternalName(t *testing.T) { opsManager := opsManagerBuilder.Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) err = createOpsManagerUserPasswordSecret(ctx, kubeClient, opsManager, opsManagerUserPassword) @@ -1157,7 +1158,7 @@ func TestAppDBSkipsReconciliation_IfAnyProcessesAreDisabled(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() err := createOpsManagerUserPasswordSecret(ctx, kubeClient, opsManager, "my-password") assert.NoError(t, err) - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) memberCluster := multicluster.GetLegacyCentralMemberCluster(opsManager.Spec.Replicas, 0, reconciler.client, reconciler.SecretClient) @@ -1165,7 +1166,7 @@ func TestAppDBSkipsReconciliation_IfAnyProcessesAreDisabled(t *testing.T) { // if the automation is not there, we will always want to reconcile. Otherwise, we may not reconcile // based on whether or not there are disabled processes. if createAutomationConfig { - ac, err := reconciler.buildAppDbAutomationConfig(ctx, opsManager, automation, UnusedPrometheusConfiguration, multicluster.LegacyCentralClusterName, zap.S()) + ac, err := reconciler.buildAppDbAutomationConfig(ctx, opsManager, automation, UnusedPrometheusConfiguration, multicluster.LegacyCentralClusterName, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) _, err = reconciler.publishAutomationConfig(ctx, opsManager, ac, opsManager.Spec.AppDB.AutomationConfigSecretName(), memberCluster.SecretClient) @@ -1193,7 +1194,7 @@ func TestAppDBSkipsReconciliation_IfAnyProcessesAreDisabled(t *testing.T) { }, }).Build() - shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zap.S()) + shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.True(t, shouldReconcile) }) @@ -1215,7 +1216,7 @@ func TestAppDBSkipsReconciliation_IfAnyProcessesAreDisabled(t *testing.T) { reconciler := createReconcilerWithAllRequiredSecrets(opsManager, true) - shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zap.S()) + shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.False(t, shouldReconcile) }) @@ -1234,7 +1235,7 @@ func TestAppDBSkipsReconciliation_IfAnyProcessesAreDisabled(t *testing.T) { reconciler := createReconcilerWithAllRequiredSecrets(opsManager, false) - shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zap.S()) + shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.True(t, shouldReconcile) }) @@ -1255,7 +1256,7 @@ func TestAppDBSkipsReconciliation_IfAnyProcessesAreDisabled(t *testing.T) { opsManager = DefaultOpsManagerBuilder().SetName(omName).Build() - shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zap.S()) + shouldReconcile, err := reconciler.shouldReconcileAppDB(ctx, opsManager, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.True(t, shouldReconcile) }) @@ -1333,11 +1334,11 @@ func buildAutomationConfigForAppDb(ctx context.Context, builder *omv1.OpsManager // Ensure the password exists for the Ops Manager User. The Ops Manager controller will have ensured this. // We are ignoring this err on purpose since the secret might already exist. _ = createOpsManagerUserPasswordSecret(ctx, kubeClient, opsManager, "my-password") - reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactoryFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, kubeClient, opsManager, omConnectionFactoryFunc, log) if err != nil { return automationconfig.AutomationConfig{}, err } - return reconciler.buildAppDbAutomationConfig(ctx, opsManager, acType, UnusedPrometheusConfiguration, multicluster.LegacyCentralClusterName, zap.S()) + return reconciler.buildAppDbAutomationConfig(ctx, opsManager, acType, UnusedPrometheusConfiguration, multicluster.LegacyCentralClusterName, log) } func checkDeploymentEqualToPublished(t *testing.T, expected automationconfig.AutomationConfig, s *corev1.Secret) { @@ -1353,7 +1354,7 @@ func checkDeploymentEqualToPublished(t *testing.T, expected automationconfig.Aut func newAppDbReconciler(ctx context.Context, c client.Client, opsManager *omv1.MongoDBOpsManager, omConnectionFactoryFunc om.ConnectionFactory, log *zap.SugaredLogger) (*ReconcileAppDbReplicaSet, error) { commonController := NewReconcileCommonController(ctx, c) - return NewAppDBReplicaSetReconciler(ctx, nil, "", opsManager.Spec.AppDB, commonController, omConnectionFactoryFunc, opsManager.Annotations, nil, zap.S()) + return NewAppDBReplicaSetReconciler(ctx, nil, "", opsManager.Spec.AppDB, commonController, omConnectionFactoryFunc, opsManager.Annotations, nil, log) } func newAppDbMultiReconciler(ctx context.Context, c client.Client, opsManager *omv1.MongoDBOpsManager, memberClusterMap map[string]client.Client, log *zap.SugaredLogger, omConnectionFactoryFunc om.ConnectionFactory) (*ReconcileAppDbReplicaSet, error) { @@ -1412,7 +1413,7 @@ func readAutomationConfigMonitoringSecret(ctx context.Context, t *testing.T, kub func createRunningAppDB(ctx context.Context, t *testing.T, startingMembers int, fakeClient kubernetesClient.Client, opsManager *omv1.MongoDBOpsManager, omConnectionFactory *om.CachedOMConnectionFactory) *ReconcileAppDbReplicaSet { err := createOpsManagerUserPasswordSecret(ctx, fakeClient, opsManager, "pass") assert.NoError(t, err) - reconciler, err := newAppDbReconciler(ctx, fakeClient, opsManager, omConnectionFactory.GetConnectionFunc, zap.S()) + reconciler, err := newAppDbReconciler(ctx, fakeClient, opsManager, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) // create the apiKey and OM user diff --git a/controllers/operator/authentication/configure_authentication_test.go b/controllers/operator/authentication/configure_authentication_test.go index 8e8ae95df..7f2bc5250 100644 --- a/controllers/operator/authentication/configure_authentication_test.go +++ b/controllers/operator/authentication/configure_authentication_test.go @@ -1,21 +1,15 @@ package authentication import ( + "go.uber.org/zap/zaptest" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/pkg/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func init() { - logger, _ := zap.NewDevelopment() - zap.ReplaceGlobals(logger) -} - func TestConfigureScramSha256(t *testing.T) { dep := om.NewDeployment() conn := om.NewMockedOmConnection(dep) @@ -27,7 +21,7 @@ func TestConfigureScramSha256(t *testing.T) { AgentMechanism: "SCRAM", } - if err := Configure(conn, opts, false, zap.S()); err != nil { + if err := Configure(conn, opts, false, zaptest.NewLogger(t).Sugar()); err != nil { t.Fatal(err) } @@ -55,7 +49,7 @@ func TestConfigureX509(t *testing.T) { }, } - if err := Configure(conn, opts, false, zap.S()); err != nil { + if err := Configure(conn, opts, false, zaptest.NewLogger(t).Sugar()); err != nil { t.Fatal(err) } @@ -79,7 +73,7 @@ func TestConfigureScramSha1(t *testing.T) { AgentMechanism: "SCRAM-SHA-1", } - if err := Configure(conn, opts, false, zap.S()); err != nil { + if err := Configure(conn, opts, false, zaptest.NewLogger(t).Sugar()); err != nil { t.Fatal(err) } @@ -104,7 +98,7 @@ func TestConfigureMultipleAuthenticationMechanisms(t *testing.T) { }, } - if err := Configure(conn, opts, false, zap.S()); err != nil { + if err := Configure(conn, opts, false, zaptest.NewLogger(t).Sugar()); err != nil { t.Fatal(err) } @@ -131,9 +125,9 @@ func TestDisableAuthentication(t *testing.T) { _ = conn.ReadUpdateAutomationConfig(func(ac *om.AutomationConfig) error { ac.Auth.Enable() return nil - }, zap.S()) + }, zaptest.NewLogger(t).Sugar()) - if err := Disable(conn, Options{}, true, zap.S()); err != nil { + if err := Disable(conn, Options{}, true, zaptest.NewLogger(t).Sugar()); err != nil { t.Fatal(err) } @@ -203,7 +197,7 @@ func assertAuthenticationMechanism(t *testing.T, auth *om.Auth, mechanism string } func assertDeploymentMechanismsConfigured(t *testing.T, authMechanism Mechanism, conn om.Connection, opts Options) { - err := authMechanism.EnableDeploymentAuthentication(conn, opts, zap.S()) + err := authMechanism.EnableDeploymentAuthentication(conn, opts, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) ac, err := conn.ReadAutomationConfig() @@ -212,14 +206,14 @@ func assertDeploymentMechanismsConfigured(t *testing.T, authMechanism Mechanism, } func assertAgentAuthenticationDisabled(t *testing.T, authMechanism Mechanism, conn om.Connection, opts Options) { - err := authMechanism.EnableAgentAuthentication(conn, opts, zap.S()) + err := authMechanism.EnableAgentAuthentication(conn, opts, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) ac, err := conn.ReadAutomationConfig() require.NoError(t, err) assert.True(t, authMechanism.IsAgentAuthenticationConfigured(ac, opts)) - err = authMechanism.DisableAgentAuthentication(conn, zap.S()) + err = authMechanism.DisableAgentAuthentication(conn, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) ac, err = conn.ReadAutomationConfig() diff --git a/controllers/operator/authentication/ldap_test.go b/controllers/operator/authentication/ldap_test.go index 0b619e3df..4be36d329 100644 --- a/controllers/operator/authentication/ldap_test.go +++ b/controllers/operator/authentication/ldap_test.go @@ -1,20 +1,20 @@ package authentication import ( + "go.uber.org/zap/zaptest" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/operator/ldap" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ldapPlainMechanism = getMechanismByName(LDAPPlain) func TestLdapDeploymentMechanism(t *testing.T) { conn := om.NewMockedOmConnection(om.NewDeployment()) + logger := zaptest.NewLogger(t).Sugar() opts := Options{ Ldap: &ldap.Ldap{ @@ -24,7 +24,8 @@ func TestLdapDeploymentMechanism(t *testing.T) { }, } - err := ldapPlainMechanism.EnableDeploymentAuthentication(conn, opts, zap.S()) + zaptest.NewLogger(t).Sugar() + err := ldapPlainMechanism.EnableDeploymentAuthentication(conn, opts, logger) require.NoError(t, err) ac, err := conn.ReadAutomationConfig() @@ -34,7 +35,7 @@ func TestLdapDeploymentMechanism(t *testing.T) { assert.Equal(t, "Servers", ac.Ldap.Servers) assert.Equal(t, "BindMethod", ac.Ldap.BindMethod) - err = ldapPlainMechanism.DisableDeploymentAuthentication(conn, zap.S()) + err = ldapPlainMechanism.DisableDeploymentAuthentication(conn, logger) require.NoError(t, err) ac, err = conn.ReadAutomationConfig() @@ -55,7 +56,8 @@ func TestLdapEnableAgentAuthentication(t *testing.T) { AutoPwd: "LDAPPassword.", } - err := ldapPlainMechanism.EnableAgentAuthentication(conn, opts, zap.S()) + zaptest.NewLogger(t).Sugar() + err := ldapPlainMechanism.EnableAgentAuthentication(conn, opts, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) ac, err := conn.ReadAutomationConfig() diff --git a/controllers/operator/authentication/oidc_test.go b/controllers/operator/authentication/oidc_test.go index 6460db803..2e99bab20 100644 --- a/controllers/operator/authentication/oidc_test.go +++ b/controllers/operator/authentication/oidc_test.go @@ -1,11 +1,11 @@ package authentication import ( + "go.uber.org/zap/zaptest" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" "k8s.io/utils/ptr" "github.com/mongodb/mongodb-kubernetes/controllers/om" @@ -52,7 +52,7 @@ func TestOIDC_EnableDeploymentAuthentication(t *testing.T) { configured := mongoDBOIDCMechanism.IsDeploymentAuthenticationConfigured(ac, opts) assert.False(t, configured) - err = mongoDBOIDCMechanism.EnableDeploymentAuthentication(conn, opts, zap.S()) + err = mongoDBOIDCMechanism.EnableDeploymentAuthentication(conn, opts, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) ac, err = conn.ReadAutomationConfig() @@ -63,7 +63,7 @@ func TestOIDC_EnableDeploymentAuthentication(t *testing.T) { configured = mongoDBOIDCMechanism.IsDeploymentAuthenticationConfigured(ac, opts) assert.True(t, configured) - err = mongoDBOIDCMechanism.DisableDeploymentAuthentication(conn, zap.S()) + err = mongoDBOIDCMechanism.DisableDeploymentAuthentication(conn, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) ac, err = conn.ReadAutomationConfig() @@ -88,9 +88,9 @@ func TestOIDC_EnableAgentAuthentication(t *testing.T) { configured := mongoDBOIDCMechanism.IsAgentAuthenticationConfigured(ac, opts) assert.False(t, configured) - err = mongoDBOIDCMechanism.EnableAgentAuthentication(conn, opts, zap.S()) + err = mongoDBOIDCMechanism.EnableAgentAuthentication(conn, opts, zaptest.NewLogger(t).Sugar()) require.Error(t, err) - err = mongoDBOIDCMechanism.DisableAgentAuthentication(conn, zap.S()) + err = mongoDBOIDCMechanism.DisableAgentAuthentication(conn, zaptest.NewLogger(t).Sugar()) require.Error(t, err) } diff --git a/controllers/operator/authentication/scramsha_test.go b/controllers/operator/authentication/scramsha_test.go index 1c97e9943..39909741e 100644 --- a/controllers/operator/authentication/scramsha_test.go +++ b/controllers/operator/authentication/scramsha_test.go @@ -1,11 +1,11 @@ package authentication import ( + "go.uber.org/zap/zaptest" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/pkg/util" @@ -43,10 +43,10 @@ func TestAgentsAuthentication(t *testing.T) { CAFilePath: util.CAFilePathInContainer, } - err := s.EnableAgentAuthentication(conn, opts, zap.S()) + err := s.EnableAgentAuthentication(conn, opts, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) - err = s.EnableDeploymentAuthentication(conn, opts, zap.S()) + err = s.EnableDeploymentAuthentication(conn, opts, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) ac, err := conn.ReadAutomationConfig() diff --git a/controllers/operator/authentication/x509_test.go b/controllers/operator/authentication/x509_test.go index f342b6702..1034f0ae0 100644 --- a/controllers/operator/authentication/x509_test.go +++ b/controllers/operator/authentication/x509_test.go @@ -2,14 +2,13 @@ package authentication import ( "fmt" + "go.uber.org/zap/zaptest" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/pkg/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var mongoDBX509Mechanism = getMechanismByName(MongoDBX509) @@ -25,7 +24,7 @@ func TestX509EnableAgentAuthentication(t *testing.T) { }, AuthoritativeSet: true, } - if err := mongoDBX509Mechanism.EnableAgentAuthentication(conn, options, zap.S()); err != nil { + if err := mongoDBX509Mechanism.EnableAgentAuthentication(conn, options, zaptest.NewLogger(t).Sugar()); err != nil { t.Fatal(err) } diff --git a/controllers/operator/authentication_test.go b/controllers/operator/authentication_test.go index 6b47167de..95551aef7 100644 --- a/controllers/operator/authentication_test.go +++ b/controllers/operator/authentication_test.go @@ -10,6 +10,7 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" + "go.uber.org/zap/zaptest" "math/big" "os" "testing" @@ -17,7 +18,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -91,7 +91,7 @@ func TestUpdateOmAuthentication_NoAuthenticationEnabled(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) - r.updateOmAuthentication(ctx, conn, processNames, rs, "", "", "", false, zap.S()) + r.updateOmAuthentication(ctx, conn, processNames, rs, "", "", "", false, zaptest.NewLogger(t).Sugar()) ac, _ := conn.ReadAutomationConfig() @@ -112,7 +112,7 @@ func TestUpdateOmAuthentication_EnableX509_TlsNotEnabled(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) - status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, conn, []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) + status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, conn, []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zaptest.NewLogger(t).Sugar()) assert.True(t, status.IsOK(), "configuring both options at once should not result in a failed status") assert.True(t, isMultiStageReconciliation, "configuring both tls and x509 at once should result in a multi stage reconciliation") @@ -124,7 +124,7 @@ func TestUpdateOmAuthentication_EnableX509_WithTlsAlreadyEnabled(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rs))) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) - status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) + status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zaptest.NewLogger(t).Sugar()) assert.True(t, status.IsOK(), "configuring x509 when tls has already been enabled should not result in a failed status") assert.False(t, isMultiStageReconciliation, "if tls is already enabled, we should be able to configure x509 is a single reconciliation") @@ -140,7 +140,7 @@ func TestUpdateOmAuthentication_AuthenticationIsNotConfigured_IfAuthIsNotSet(t * kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) - status, _ := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) + status, _ := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zaptest.NewLogger(t).Sugar()) assert.True(t, status.IsOK(), "no authentication should have been configured") ac, _ := omConnectionFactory.GetConnection().ReadAutomationConfig() @@ -211,7 +211,7 @@ func TestUpdateOmAuthentication_EnableX509_FromEmptyDeployment(t *testing.T) { r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) createAgentCSRs(t, ctx, 1, r.client, certsv1.CertificateApproved) - status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) + status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zaptest.NewLogger(t).Sugar()) assert.True(t, status.IsOK(), "configuring x509 and tls when there are no processes should not result in a failed status") assert.False(t, isMultiStageReconciliation, "if we are enabling tls and x509 at once, this should be done in a single reconciliation") } diff --git a/controllers/operator/common_controller_test.go b/controllers/operator/common_controller_test.go index da0928f67..d2070c776 100644 --- a/controllers/operator/common_controller_test.go +++ b/controllers/operator/common_controller_test.go @@ -3,6 +3,7 @@ package operator import ( "context" "fmt" + "go.uber.org/zap/zaptest" "os" "reflect" "strings" @@ -55,11 +56,11 @@ func TestEnsureTagAdded(t *testing.T) { mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) // normal tag - err := connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "myTag", zap.S()) + err := connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "myTag", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) // long tag - err = connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "LOOKATTHISTRINGTHATISTOOLONGFORTHEFIELD", zap.S()) + err = connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "LOOKATTHISTRINGTHATISTOOLONGFORTHEFIELD", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) expected := []string{"EXTERNALLY_MANAGED_BY_KUBERNETES", "MY-NAMESPACE", "MYTAG", "LOOKATTHISTRINGTHATISTOOLONGFORT"} @@ -72,11 +73,11 @@ func TestEnsureTagAddedDuplicates(t *testing.T) { opsManagerController := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, opsManagerController, omConnectionFactory.GetConnectionFunc, t) - err := connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "MYTAG", zap.S()) + err := connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "MYTAG", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - err = connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "MYTAG", zap.S()) + err = connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "MYTAG", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - err = connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "MYOTHERTAG", zap.S()) + err = connection.EnsureTagAdded(mockOm, mockOm.FindGroup(om.TestGroupName), "MYOTHERTAG", zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) expected := []string{"EXTERNALLY_MANAGED_BY_KUBERNETES", "MY-NAMESPACE", "MYTAG", "MYOTHERTAG"} assert.Equal(t, expected, mockOm.FindGroup(om.TestGroupName).Tags) @@ -229,7 +230,7 @@ func TestUpdateStatus_Patched(t *testing.T) { reconciledObject := rs.DeepCopy() // The current reconciled object "has diverged" from the one in API server reconciledObject.Spec.Version = "10.0.0" - _, err := controller.updateStatus(ctx, reconciledObject, workflow.Pending("Waiting for secret..."), zap.S()) + _, err := controller.updateStatus(ctx, reconciledObject, workflow.Pending("Waiting for secret..."), zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) // Verifying that the resource in API server still has the correct spec @@ -289,7 +290,7 @@ func TestFailWhenRoleAndRoleRefsAreConfigured(t *testing.T) { controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zaptest.NewLogger(t).Sugar()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -317,7 +318,7 @@ func TestRoleRefsAreAdded(t *testing.T) { _ = kubeClient.Create(ctx, roleResource) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zaptest.NewLogger(t).Sugar()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) @@ -344,7 +345,7 @@ func TestErrorWhenRoleRefIsWrong(t *testing.T) { _ = kubeClient.Create(ctx, roleResource) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zaptest.NewLogger(t).Sugar()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -370,7 +371,7 @@ func TestErrorWhenRoleDoesNotExist(t *testing.T) { controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zaptest.NewLogger(t).Sugar()) assert.False(t, result.IsOK()) assert.Equal(t, status.PhaseFailed, result.Phase()) @@ -397,7 +398,7 @@ func TestDontSendNilPrivileges(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zaptest.NewLogger(t).Sugar()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) @@ -480,7 +481,7 @@ func prepareConnection(ctx context.Context, controller *ReconcileCommonControlle credsConfig, err := project.ReadCredentials(ctx, controller.SecretClient, kube.ObjectKey(mock.TestNamespace, mock.TestCredentialsSecretName), &zap.SugaredLogger{}) assert.NoError(t, err) - conn, _, e := connection.PrepareOpsManagerConnection(ctx, controller.SecretClient, projectConfig, credsConfig, omConnectionFunc, mock.TestNamespace, zap.S()) + conn, _, e := connection.PrepareOpsManagerConnection(ctx, controller.SecretClient, projectConfig, credsConfig, omConnectionFunc, mock.TestNamespace, zaptest.NewLogger(t).Sugar()) mockOm := conn.(*om.MockedOmConnection) assert.NoError(t, e) return mockOm, newPodVars(conn, projectConfig, mdbv1.Warn) diff --git a/controllers/operator/construct/backup_construction_test.go b/controllers/operator/construct/backup_construction_test.go index 19d4d7ee5..af4da5e3b 100644 --- a/controllers/operator/construct/backup_construction_test.go +++ b/controllers/operator/construct/backup_construction_test.go @@ -2,11 +2,9 @@ package construct import ( "context" + "go.uber.org/zap/zaptest" "testing" - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - omv1 "github.com/mongodb/mongodb-kubernetes/api/v1/om" "github.com/mongodb/mongodb-kubernetes/controllers/operator/mock" "github.com/mongodb/mongodb-kubernetes/controllers/operator/secrets" @@ -14,6 +12,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/multicluster" "github.com/mongodb/mongodb-kubernetes/pkg/util" "github.com/mongodb/mongodb-kubernetes/pkg/vault" + "github.com/stretchr/testify/assert" ) func TestBuildBackupDaemonStatefulSet(t *testing.T) { @@ -23,7 +22,7 @@ func TestBuildBackupDaemonStatefulSet(t *testing.T) { VaultClient: &vault.VaultClient{}, KubeClient: client, } - sts, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetName("test-om").Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zap.S()) + sts, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetName("test-om").Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Equal(t, "test-om-backup-daemon", sts.Name) assert.Equal(t, util.BackupDaemonContainerName, sts.Spec.Template.Spec.Containers[0].Name) @@ -37,7 +36,7 @@ func TestBackupPodTemplate_TerminationTimeout(t *testing.T) { VaultClient: &vault.VaultClient{}, KubeClient: client, } - set, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetName("test-om").Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zap.S()) + set, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetName("test-om").Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) podSpecTemplate := set.Spec.Template assert.Equal(t, int64(4200), *podSpecTemplate.Spec.TerminationGracePeriodSeconds) @@ -50,7 +49,7 @@ func TestBuildBackupDaemonContainer(t *testing.T) { VaultClient: &vault.VaultClient{}, KubeClient: client, } - sts, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetVersion("4.2.0").Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zap.S(), + sts, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetVersion("4.2.0").Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zaptest.NewLogger(t).Sugar(), WithOpsManagerImage("quay.io/mongodb/mongodb-enterprise-ops-manager:4.2.0"), ) assert.NoError(t, err) @@ -80,7 +79,7 @@ func TestMultipleBackupDaemons(t *testing.T) { VaultClient: &vault.VaultClient{}, KubeClient: client, } - sts, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetVersion("4.2.0").SetBackupMembers(3).Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zap.S()) + sts, err := BackupDaemonStatefulSet(ctx, secretsClient, omv1.NewOpsManagerBuilderDefault().SetVersion("4.2.0").SetBackupMembers(3).Build(), multicluster.GetLegacyCentralMemberCluster(1, 0, client, secretsClient), zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Equal(t, 3, int(*sts.Spec.Replicas)) } diff --git a/controllers/operator/construct/construction_test.go b/controllers/operator/construct/construction_test.go index 6108b1c8e..a4dc9418c 100644 --- a/controllers/operator/construct/construction_test.go +++ b/controllers/operator/construct/construction_test.go @@ -1,10 +1,10 @@ package construct import ( + "go.uber.org/zap/zaptest" "testing" "github.com/stretchr/testify/assert" - "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/resource" appsv1 "k8s.io/api/apps/v1" @@ -24,22 +24,23 @@ import ( func TestBuildStatefulSet_PersistentFlagStatic(t *testing.T) { t.Setenv(architectures.DefaultEnvArchitecture, string(architectures.Static)) + logger := zaptest.NewLogger(t).Sugar() mdb := mdbv1.NewReplicaSetBuilder().SetPersistent(nil).Build() - set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), logger) assert.Len(t, set.Spec.VolumeClaimTemplates, 1) assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 7) assert.Len(t, set.Spec.Template.Spec.Containers[1].VolumeMounts, 7) mdb = mdbv1.NewReplicaSetBuilder().SetPersistent(util.BooleanRef(true)).Build() - set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), logger) assert.Len(t, set.Spec.VolumeClaimTemplates, 1) assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 7) assert.Len(t, set.Spec.Template.Spec.Containers[1].VolumeMounts, 7) // If no persistence is set then we still mount init scripts mdb = mdbv1.NewReplicaSetBuilder().SetPersistent(util.BooleanRef(false)).Build() - set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), logger) assert.Len(t, set.Spec.VolumeClaimTemplates, 0) assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 7) assert.Len(t, set.Spec.Template.Spec.Containers[1].VolumeMounts, 7) @@ -49,18 +50,18 @@ func TestBuildStatefulSet_PersistentFlag(t *testing.T) { t.Setenv(architectures.DefaultEnvArchitecture, string(architectures.NonStatic)) mdb := mdbv1.NewReplicaSetBuilder().SetPersistent(nil).Build() - set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) assert.Len(t, set.Spec.VolumeClaimTemplates, 1) assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 8) mdb = mdbv1.NewReplicaSetBuilder().SetPersistent(util.BooleanRef(true)).Build() - set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) assert.Len(t, set.Spec.VolumeClaimTemplates, 1) assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 8) // If no persistence is set then we still mount init scripts mdb = mdbv1.NewReplicaSetBuilder().SetPersistent(util.BooleanRef(false)).Build() - set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) assert.Len(t, set.Spec.VolumeClaimTemplates, 0) assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 8) } @@ -74,7 +75,7 @@ func TestBuildStatefulSet_PersistentVolumeClaimSingle(t *testing.T) { persistence := mdbv1.NewPersistenceBuilder("40G").SetStorageClass("fast").SetLabelSelector(labels) podSpec := mdbv1.NewPodSpecWrapperBuilder().SetSinglePersistence(persistence).Build().MongoDbPodSpec rs := mdbv1.NewReplicaSetBuilder().SetPersistent(nil).SetPodSpec(&podSpec).Build() - set := DatabaseStatefulSet(*rs, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set := DatabaseStatefulSet(*rs, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) checkPvClaims(t, set, []corev1.PersistentVolumeClaim{pvClaim(util.PvcNameData, "40G", stringutil.Ref("fast"), labels)}) @@ -99,7 +100,7 @@ func TestBuildStatefulSet_PersistentVolumeClaimSingleStatic(t *testing.T) { persistence := mdbv1.NewPersistenceBuilder("40G").SetStorageClass("fast").SetLabelSelector(labels) podSpec := mdbv1.NewPodSpecWrapperBuilder().SetSinglePersistence(persistence).Build().MongoDbPodSpec rs := mdbv1.NewReplicaSetBuilder().SetPersistent(nil).SetPodSpec(&podSpec).Build() - set := DatabaseStatefulSet(*rs, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set := DatabaseStatefulSet(*rs, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) checkPvClaims(t, set, []corev1.PersistentVolumeClaim{pvClaim(util.PvcNameData, "40G", stringutil.Ref("fast"), labels)}) @@ -126,7 +127,7 @@ func TestBuildStatefulSet_PersistentVolumeClaimMultiple(t *testing.T) { ).Build() mdb := mdbv1.NewReplicaSetBuilder().SetPersistent(nil).SetPodSpec(&podSpec.MongoDbPodSpec).Build() - set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) checkPvClaims(t, set, []corev1.PersistentVolumeClaim{ pvClaim(util.PvcNameData, "40G", stringutil.Ref("fast"), nil), @@ -155,7 +156,7 @@ func TestBuildStatefulSet_PersistentVolumeClaimMultipleDefaults(t *testing.T) { nil). Build() mdb := mdbv1.NewReplicaSetBuilder().SetPersistent(nil).SetPodSpec(&podSpec.MongoDbPodSpec).Build() - set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) checkPvClaims(t, set, []corev1.PersistentVolumeClaim{ pvClaim(util.PvcNameData, "40G", stringutil.Ref("fast"), nil), @@ -201,7 +202,7 @@ func TestBasePodSpec_Affinity(t *testing.T) { SetName("s"). SetPodSpec(&podSpec.MongoDbPodSpec). Build() - sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) spec := sts.Spec.Template.Spec assert.Equal(t, nodeAffinity, *spec.Affinity.NodeAffinity) @@ -217,7 +218,7 @@ func TestBasePodSpec_Affinity(t *testing.T) { // TestBasePodSpec_AntiAffinityDefaultTopology checks that the default topology key is created if the topology key is // not specified func TestBasePodSpec_AntiAffinityDefaultTopology(t *testing.T) { - sts := DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().SetName("my-standalone").Build(), StandaloneOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().SetName("my-standalone").Build(), StandaloneOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) spec := sts.Spec.Template.Spec term := spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0] @@ -232,14 +233,14 @@ func TestBasePodSpec_ImagePullSecrets(t *testing.T) { // Cleaning the state (there is no tear down in go test :( ) defer mock.InitDefaultEnvVariables() - sts := DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) template := sts.Spec.Template assert.Nil(t, template.Spec.ImagePullSecrets) t.Setenv(util.ImagePullSecrets, "foo") - sts = DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zap.S()) + sts = DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) template = sts.Spec.Template assert.Equal(t, []corev1.LocalObjectReference{{Name: "foo"}}, template.Spec.ImagePullSecrets) @@ -247,7 +248,7 @@ func TestBasePodSpec_ImagePullSecrets(t *testing.T) { // TestBasePodSpec_TerminationGracePeriodSeconds verifies that the TerminationGracePeriodSeconds is set to 600 seconds func TestBasePodSpec_TerminationGracePeriodSeconds(t *testing.T) { - sts := DatabaseStatefulSet(*mdbv1.NewReplicaSetBuilder().Build(), ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdbv1.NewReplicaSetBuilder().Build(), ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) assert.Equal(t, util.Int64Ref(600), sts.Spec.Template.Spec.TerminationGracePeriodSeconds) } @@ -290,7 +291,7 @@ func pvClaim(pvName, size string, storageClass *string, labels map[string]string func TestDefaultPodSpec_SecurityContext(t *testing.T) { defer mock.InitDefaultEnvVariables() - sts := DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) spec := sts.Spec.Template.Spec assert.Len(t, spec.InitContainers, 1) @@ -304,7 +305,7 @@ func TestDefaultPodSpec_SecurityContext(t *testing.T) { t.Setenv(util.ManagedSecurityContextEnv, "true") - sts = DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zap.S()) + sts = DatabaseStatefulSet(*mdbv1.NewStandaloneBuilder().Build(), StandaloneOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) assert.Nil(t, sts.Spec.Template.Spec.SecurityContext) } @@ -316,7 +317,7 @@ func TestPodSpec_Requirements(t *testing.T) { SetMemoryLimit("1012M"). Build() - sts := DatabaseStatefulSet(*mdbv1.NewReplicaSetBuilder().SetPodSpec(&podSpec.MongoDbPodSpec).Build(), ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdbv1.NewReplicaSetBuilder().SetPodSpec(&podSpec.MongoDbPodSpec).Build(), ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) podSpecTemplate := sts.Spec.Template container := podSpecTemplate.Spec.Containers[0] @@ -343,7 +344,7 @@ func TestPodAntiAffinityOverride(t *testing.T) { SetName("s"). SetPodSpec(&podSpec.MongoDbPodSpec). Build() - sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) spec := sts.Spec.Template.Spec assert.Equal(t, podAntiAffinity, *spec.Affinity.PodAntiAffinity) } diff --git a/controllers/operator/construct/database_construction_test.go b/controllers/operator/construct/database_construction_test.go index 585a2c140..40165047d 100644 --- a/controllers/operator/construct/database_construction_test.go +++ b/controllers/operator/construct/database_construction_test.go @@ -1,6 +1,7 @@ package construct import ( + "go.uber.org/zap/zaptest" "path" "slices" "testing" @@ -97,13 +98,13 @@ func TestStatefulsetCreationPanicsIfEnvVariablesAreNotSet(t *testing.T) { mongosSpec := createMongosSpec(sc) assert.Panics(t, func() { - DatabaseStatefulSet(*sc, ShardOptions(0, shardSpec, memberCluster.Name), zap.S()) + DatabaseStatefulSet(*sc, ShardOptions(0, shardSpec, memberCluster.Name), zaptest.NewLogger(t).Sugar()) }) assert.Panics(t, func() { - DatabaseStatefulSet(*sc, ConfigServerOptions(configServerSpec, memberCluster.Name), zap.S()) + DatabaseStatefulSet(*sc, ConfigServerOptions(configServerSpec, memberCluster.Name), zaptest.NewLogger(t).Sugar()) }) assert.Panics(t, func() { - DatabaseStatefulSet(*sc, MongosOptions(mongosSpec, memberCluster.Name), zap.S()) + DatabaseStatefulSet(*sc, MongosOptions(mongosSpec, memberCluster.Name), zaptest.NewLogger(t).Sugar()) }) }) } @@ -118,13 +119,13 @@ func TestStatefulsetCreationPanicsIfEnvVariablesAreNotSetStatic(t *testing.T) { configServerSpec := createConfigSrvSpec(sc) mongosSpec := createMongosSpec(sc) assert.Panics(t, func() { - DatabaseStatefulSet(*sc, ShardOptions(0, shardSpec, memberCluster.Name), zap.S()) + DatabaseStatefulSet(*sc, ShardOptions(0, shardSpec, memberCluster.Name), zaptest.NewLogger(t).Sugar()) }) assert.Panics(t, func() { - DatabaseStatefulSet(*sc, ConfigServerOptions(configServerSpec, memberCluster.Name), zap.S()) + DatabaseStatefulSet(*sc, ConfigServerOptions(configServerSpec, memberCluster.Name), zaptest.NewLogger(t).Sugar()) }) assert.Panics(t, func() { - DatabaseStatefulSet(*sc, MongosOptions(mongosSpec, memberCluster.Name), zap.S()) + DatabaseStatefulSet(*sc, MongosOptions(mongosSpec, memberCluster.Name), zaptest.NewLogger(t).Sugar()) }) }) } @@ -133,7 +134,7 @@ func TestStatefulsetCreationSuccessful(t *testing.T) { start := time.Now() rs := mdbv1.NewReplicaSetBuilder().Build() - _ = DatabaseStatefulSet(*rs, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + _ = DatabaseStatefulSet(*rs, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) assert.True(t, time.Since(start) < time.Second*4) // we waited only a little (considering 2 seconds of wait as well) } @@ -195,7 +196,7 @@ func TestAgentFlags(t *testing.T) { } mdb := mdbv1.NewReplicaSetBuilder().SetAgentConfig(mdbv1.AgentConfig{StartupParameters: agentStartupParameters}).Build() - sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) variablesMap := env.ToMap(sts.Spec.Template.Spec.Containers[0].Env...) val, ok := variablesMap["AGENT_FLAGS"] assert.True(t, ok) @@ -208,7 +209,7 @@ func TestLabelsAndAnotations(t *testing.T) { annotations := map[string]string{"a1": "val1", "a2": "val2"} mdb := mdbv1.NewReplicaSetBuilder().SetAnnotations(annotations).SetLabels(labels).Build() - sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) // add the default label to the map labels["app"] = "test-mdb-svc" @@ -372,7 +373,7 @@ func TestDatabaseStatefulSet_StaticContainersEnvVars(t *testing.T) { t.Setenv(architectures.DefaultEnvArchitecture, tt.defaultArchitecture) mdb := mdbv1.NewReplicaSetBuilder().SetAnnotations(tt.annotations).Build() - sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) + sts := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) agentContainerIdx := slices.IndexFunc(sts.Spec.Template.Spec.Containers, func(container corev1.Container) bool { return container.Name == util.AgentContainerName diff --git a/controllers/operator/construct/opsmanager_construction_test.go b/controllers/operator/construct/opsmanager_construction_test.go index d653c617f..e579127e7 100644 --- a/controllers/operator/construct/opsmanager_construction_test.go +++ b/controllers/operator/construct/opsmanager_construction_test.go @@ -2,6 +2,7 @@ package construct import ( "context" + "go.uber.org/zap/zaptest" "testing" "github.com/stretchr/testify/assert" @@ -59,7 +60,7 @@ func TestBuildJvmParamsEnvVars_FromCustomContainerResource(t *testing.T) { Build() om.Spec.JVMParams = []string{"-DFakeOptionEnabled"} - omSts, err := createOpsManagerStatefulset(ctx, om) + omSts, err := createOpsManagerStatefulset(t, ctx, om) assert.NoError(t, err) template := omSts.Spec.Template @@ -92,14 +93,14 @@ func TestBuildJvmParamsEnvVars_FromCustomContainerResource(t *testing.T) { assert.Equal(t, "-DFakeOptionEnabled", envVarsNoLimitsOrReqs[0].Value) } -func createOpsManagerStatefulset(ctx context.Context, om *omv1.MongoDBOpsManager, additionalOpts ...func(*OpsManagerStatefulSetOptions)) (appsv1.StatefulSet, error) { +func createOpsManagerStatefulset(t *testing.T, ctx context.Context, om *omv1.MongoDBOpsManager, additionalOpts ...func(*OpsManagerStatefulSetOptions)) (appsv1.StatefulSet, error) { client, _ := mock.NewDefaultFakeClient() secretsClient := secrets.SecretClient{ VaultClient: &vault.VaultClient{}, KubeClient: client, } - omSts, err := OpsManagerStatefulSet(ctx, secretsClient, om, multicluster.GetLegacyCentralMemberCluster(om.Spec.Replicas, 0, client, secretsClient), zap.S(), additionalOpts...) + omSts, err := OpsManagerStatefulSet(ctx, secretsClient, om, multicluster.GetLegacyCentralMemberCluster(om.Spec.Replicas, 0, client, secretsClient), zaptest.NewLogger(t).Sugar(), additionalOpts...) return omSts, err } @@ -116,7 +117,7 @@ func TestBuildJvmParamsEnvVars_FromDefaultPodSpec(t *testing.T) { KubeClient: client, } - omSts, err := OpsManagerStatefulSet(ctx, secretsClient, om, multicluster.GetLegacyCentralMemberCluster(om.Spec.Replicas, 0, client, secretsClient), zap.S()) + omSts, err := OpsManagerStatefulSet(ctx, secretsClient, om, multicluster.GetLegacyCentralMemberCluster(om.Spec.Replicas, 0, client, secretsClient), zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) template := omSts.Spec.Template @@ -138,7 +139,7 @@ func TestBuildOpsManagerStatefulSet(t *testing.T) { AddConfiguration("mms.adminEmailAddr", "cloud-manager-support@mongodb.com"). Build() - sts, err := createOpsManagerStatefulset(ctx, om) + sts, err := createOpsManagerStatefulset(t, ctx, om) assert.NoError(t, err) @@ -176,7 +177,7 @@ func TestBuildOpsManagerStatefulSet(t *testing.T) { SetStatefulSetSpec(statefulSet.Spec). Build() - sts, err := createOpsManagerStatefulset(ctx, om) + sts, err := createOpsManagerStatefulset(t, ctx, om) assert.NoError(t, err) expectedVars := []corev1.EnvVar{ {Name: "ENABLE_IRP", Value: "true"}, @@ -190,7 +191,7 @@ func TestBuildOpsManagerStatefulSet(t *testing.T) { func Test_buildOpsManagerStatefulSet(t *testing.T) { ctx := context.Background() - sts, err := createOpsManagerStatefulset(ctx, omv1.NewOpsManagerBuilderDefault().SetName("test-om").Build()) + sts, err := createOpsManagerStatefulset(t, ctx, omv1.NewOpsManagerBuilderDefault().SetName("test-om").Build()) assert.NoError(t, err) assert.Equal(t, "test-om", sts.Name) assert.Equal(t, util.OpsManagerContainerName, sts.Spec.Template.Spec.Containers[0].Name) @@ -201,7 +202,7 @@ func Test_buildOpsManagerStatefulSet(t *testing.T) { func Test_buildOpsManagerStatefulSet_Secrets(t *testing.T) { ctx := context.Background() opsManager := omv1.NewOpsManagerBuilderDefault().SetName("test-om").Build() - sts, err := createOpsManagerStatefulset(ctx, opsManager) + sts, err := createOpsManagerStatefulset(t, ctx, opsManager) assert.NoError(t, err) expectedSecretVolumeNames := []string{"test-om-gen-key", opsManager.AppDBMongoConnectionStringSecretName()} @@ -239,7 +240,7 @@ func TestOpsManagerPodTemplate_MergePodTemplate(t *testing.T) { om := omv1.NewOpsManagerBuilderDefault().Build() - omSts, err := createOpsManagerStatefulset(ctx, om) + omSts, err := createOpsManagerStatefulset(t, ctx, om) assert.NoError(t, err) template := omSts.Spec.Template @@ -273,7 +274,7 @@ func TestOpsManagerPodTemplate_MergePodTemplate(t *testing.T) { // TestOpsManagerPodTemplate_PodSpec verifies that StatefulSetSpec is applied correctly to OpsManager/Backup pod template. func TestOpsManagerPodTemplate_PodSpec(t *testing.T) { ctx := context.Background() - omSts, err := createOpsManagerStatefulset(ctx, omv1.NewOpsManagerBuilderDefault().Build()) + omSts, err := createOpsManagerStatefulset(t, ctx, omv1.NewOpsManagerBuilderDefault().Build()) assert.NoError(t, err) resourceLimits := buildSafeResourceList("1.0", "500M") @@ -331,7 +332,7 @@ func TestOpsManagerPodTemplate_SecurityContext(t *testing.T) { ctx := context.Background() defer mock.InitDefaultEnvVariables() - omSts, err := createOpsManagerStatefulset(ctx, omv1.NewOpsManagerBuilderDefault().Build()) + omSts, err := createOpsManagerStatefulset(t, ctx, omv1.NewOpsManagerBuilderDefault().Build()) assert.NoError(t, err) podSpecTemplate := omSts.Spec.Template @@ -343,7 +344,7 @@ func TestOpsManagerPodTemplate_SecurityContext(t *testing.T) { t.Setenv(util.ManagedSecurityContextEnv, "true") - omSts, err = createOpsManagerStatefulset(ctx, omv1.NewOpsManagerBuilderDefault().Build()) + omSts, err = createOpsManagerStatefulset(t, ctx, omv1.NewOpsManagerBuilderDefault().Build()) assert.NoError(t, err) podSpecTemplate = omSts.Spec.Template assert.Nil(t, podSpecTemplate.Spec.SecurityContext) @@ -351,7 +352,7 @@ func TestOpsManagerPodTemplate_SecurityContext(t *testing.T) { func TestOpsManagerPodTemplate_TerminationTimeout(t *testing.T) { ctx := context.Background() - omSts, err := createOpsManagerStatefulset(ctx, omv1.NewOpsManagerBuilderDefault().Build()) + omSts, err := createOpsManagerStatefulset(t, ctx, omv1.NewOpsManagerBuilderDefault().Build()) assert.NoError(t, err) podSpecTemplate := omSts.Spec.Template assert.Equal(t, int64(300), *podSpecTemplate.Spec.TerminationGracePeriodSeconds) @@ -361,7 +362,7 @@ func TestOpsManagerPodTemplate_ImagePullPolicy(t *testing.T) { ctx := context.Background() defer mock.InitDefaultEnvVariables() - omSts, err := createOpsManagerStatefulset(ctx, omv1.NewOpsManagerBuilderDefault().Build()) + omSts, err := createOpsManagerStatefulset(t, ctx, omv1.NewOpsManagerBuilderDefault().Build()) assert.NoError(t, err) podSpecTemplate := omSts.Spec.Template @@ -370,7 +371,7 @@ func TestOpsManagerPodTemplate_ImagePullPolicy(t *testing.T) { assert.Nil(t, spec.ImagePullSecrets) t.Setenv(util.ImagePullSecrets, "my-cool-secret") - omSts, err = createOpsManagerStatefulset(ctx, omv1.NewOpsManagerBuilderDefault().Build()) + omSts, err = createOpsManagerStatefulset(t, ctx, omv1.NewOpsManagerBuilderDefault().Build()) assert.NoError(t, err) podSpecTemplate = omSts.Spec.Template spec = podSpecTemplate.Spec @@ -385,7 +386,7 @@ func TestOpsManagerPodTemplate_Container(t *testing.T) { ctx := context.Background() om := omv1.NewOpsManagerBuilderDefault().SetVersion("4.2.0").Build() - sts, err := createOpsManagerStatefulset(ctx, om, WithOpsManagerImage(opsManagerImage)) + sts, err := createOpsManagerStatefulset(t, ctx, om, WithOpsManagerImage(opsManagerImage)) assert.NoError(t, err) template := sts.Spec.Template diff --git a/controllers/operator/create/create_test.go b/controllers/operator/create/create_test.go index a9c852f3a..b715e8f5d 100644 --- a/controllers/operator/create/create_test.go +++ b/controllers/operator/create/create_test.go @@ -3,6 +3,7 @@ package create import ( "context" "fmt" + "go.uber.org/zap/zaptest" "strings" "sync" "testing" @@ -99,10 +100,10 @@ func TestOpsManagerInKubernetes_InternalConnectivityOverride(t *testing.T) { } memberCluster := multicluster.GetLegacyCentralMemberCluster(testOm.Spec.Replicas, 0, fakeClient, secretsClient) - sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zap.S()) + sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zap.S()) + err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) svc, err := fakeClient.GetService(ctx, kube.ObjectKey(testOm.Namespace, testOm.SvcName())) @@ -138,10 +139,10 @@ func TestOpsManagerInKubernetes_DefaultInternalServiceForMultiCluster(t *testing } memberCluster := multicluster.GetLegacyCentralMemberCluster(testOm.Spec.Replicas, 0, fakeClient, secretsClient) - sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zap.S()) + sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zap.S()) + err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) svc, err := fakeClient.GetService(ctx, kube.ObjectKey(testOm.Namespace, testOm.SvcName())) @@ -473,10 +474,10 @@ func TestOpsManagerInKubernetes_ClusterSpecificExternalConnectivity(t *testing.T for _, memberCluster := range memberClusters { ctx := context.Background() - sts, err := construct.OpsManagerStatefulSet(ctx, memberCluster.SecretClient, testOm, memberCluster, zap.S()) + sts, err := construct.OpsManagerStatefulSet(ctx, memberCluster.SecretClient, testOm, memberCluster, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zap.S()) + err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) expectedService, ok := tc.expectedServices[memberCluster.Name] @@ -508,10 +509,10 @@ func TestBackupServiceCreated_NoExternalConnectivity(t *testing.T) { } memberCluster := multicluster.GetLegacyCentralMemberCluster(testOm.Spec.Replicas, 0, fakeClient, secretsClient) - sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zap.S()) + sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zap.S()) + err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) _, err = fakeClient.GetService(ctx, kube.ObjectKey(testOm.Namespace, testOm.SvcName()+"-ext")) @@ -552,10 +553,10 @@ func TestBackupServiceCreated_ExternalConnectivity(t *testing.T) { KubeClient: fakeClient, } memberCluster := multicluster.GetLegacyCentralMemberCluster(testOm.Spec.Replicas, 0, fakeClient, secretsClient) - sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zap.S()) + sts, err := construct.OpsManagerStatefulSet(ctx, secretsClient, testOm, memberCluster, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) - err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zap.S()) + err = OpsManagerInKubernetes(ctx, memberCluster, testOm, sts, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) externalService, err := fakeClient.GetService(ctx, kube.ObjectKey(testOm.Namespace, testOm.SvcName()+"-ext")) @@ -827,7 +828,7 @@ func TestDatabaseInKubernetes_ExternalServicesWithPlaceholders_WithExternalDomai } func testDatabaseInKubernetesExternalServices(ctx context.Context, t *testing.T, externalAccessConfiguration mdbv1.ExternalAccessConfiguration, expectedServices []corev1.Service) { - log := zap.S() + log := zaptest.NewLogger(t).Sugar() fakeClient, _ := mock.NewDefaultFakeClient() mdb := mdbv1.NewReplicaSetBuilder(). SetName(defaultResourceName). @@ -871,7 +872,7 @@ func testDatabaseInKubernetesExternalServices(ctx context.Context, t *testing.T, func TestDatabaseInKubernetesExternalServicesSharded(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() fakeClient, _ := mock.NewDefaultFakeClient() mdb := mdbv1.NewDefaultShardedClusterBuilder(). SetName("mdb"). diff --git a/controllers/operator/mongodbmultireplicaset_controller_test.go b/controllers/operator/mongodbmultireplicaset_controller_test.go index 806ace443..9d1298f40 100644 --- a/controllers/operator/mongodbmultireplicaset_controller_test.go +++ b/controllers/operator/mongodbmultireplicaset_controller_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "go.uber.org/zap/zaptest" "sort" "testing" @@ -593,7 +594,7 @@ func TestResourceDeletion(t *testing.T) { } }) - err := reconciler.deleteManagedResources(ctx, *mrs, zap.S()) + err := reconciler.deleteManagedResources(ctx, *mrs, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) clusterSpecs, err := mrs.GetClusterSpecItems() diff --git a/controllers/operator/mongodbopsmanager_controller_multi_test.go b/controllers/operator/mongodbopsmanager_controller_multi_test.go index 0e9161d00..063fc9185 100644 --- a/controllers/operator/mongodbopsmanager_controller_multi_test.go +++ b/controllers/operator/mongodbopsmanager_controller_multi_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" + "go.uber.org/zap/zaptest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -104,7 +104,7 @@ func createOMTLSCert(ctx context.Context, t *testing.T, kubeClient client.Client err := kubeClient.Create(ctx, secret) require.NoError(t, err) - pemHash := enterprisepem.ReadHashFromData(secrets.DataToStringData(secret.Data), zap.S()) + pemHash := enterprisepem.ReadHashFromData(secrets.DataToStringData(secret.Data), zaptest.NewLogger(t).Sugar()) require.NotEmpty(t, pemHash) return secret.Name, pemHash diff --git a/controllers/operator/mongodbopsmanager_controller_test.go b/controllers/operator/mongodbopsmanager_controller_test.go index 83b26b4df..02cc1e130 100644 --- a/controllers/operator/mongodbopsmanager_controller_test.go +++ b/controllers/operator/mongodbopsmanager_controller_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" + "go.uber.org/zap/zaptest" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -62,8 +62,8 @@ func TestOpsManagerReconciler_watchedResources(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() reconciler, _, _ := defaultTestOmReconciler(ctx, t, nil, "", "", testOm, nil, omConnectionFactory) - reconciler.watchMongoDBResourcesReferencedByBackup(ctx, testOm, zap.S()) - reconciler.watchMongoDBResourcesReferencedByBackup(ctx, otherTestOm, zap.S()) + reconciler.watchMongoDBResourcesReferencedByBackup(ctx, testOm, zaptest.NewLogger(t).Sugar()) + reconciler.watchMongoDBResourcesReferencedByBackup(ctx, otherTestOm, zaptest.NewLogger(t).Sugar()) key := watch.Object{ ResourceType: watch.MongoDB, @@ -203,7 +203,7 @@ func TestOpsManagerReconciler_removeWatchedResources(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() reconciler, _, _ := defaultTestOmReconciler(ctx, t, nil, "", "", testOm, nil, omConnectionFactory) - reconciler.watchMongoDBResourcesReferencedByBackup(ctx, testOm, zap.S()) + reconciler.watchMongoDBResourcesReferencedByBackup(ctx, testOm, zaptest.NewLogger(t).Sugar()) key := watch.Object{ ResourceType: watch.MongoDB, @@ -215,7 +215,7 @@ func TestOpsManagerReconciler_removeWatchedResources(t *testing.T) { assert.Contains(t, reconciler.resourceWatcher.GetWatchedResources()[key], mock.ObjectKeyFromApiObject(testOm)) // watched resources list is cleared when CR is deleted - reconciler.OnDelete(ctx, testOm, zap.S()) + reconciler.OnDelete(ctx, testOm, zaptest.NewLogger(t).Sugar()) assert.Zero(t, len(reconciler.resourceWatcher.GetWatchedResources())) } @@ -225,7 +225,7 @@ func TestOpsManagerReconciler_prepareOpsManager(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() reconciler, client, initializer := defaultTestOmReconciler(ctx, t, nil, "", "", testOm, nil, omConnectionFactory) - reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zap.S()) + reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zaptest.NewLogger(t).Sugar()) assert.Equal(t, workflow.OK(), reconcileStatus) assert.Equal(t, "jane.doe@g.com", api.CurrMockedAdmin.PublicKey) @@ -263,7 +263,7 @@ func TestOpsManagerReconcilerPrepareOpsManagerWithTLS(t *testing.T) { addOmCACm(ctx, t, testOm, reconciler) - reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zap.S()) + reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zaptest.NewLogger(t).Sugar()) assert.Equal(t, workflow.OK(), reconcileStatus) } @@ -278,7 +278,7 @@ func TestOpsManagerReconcilePrepareOpsManagerWithTLSHostCA(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() reconciler, _, _ := defaultTestOmReconciler(ctx, t, nil, "", "", testOm, nil, omConnectionFactory) - reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zap.S()) + reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zaptest.NewLogger(t).Sugar()) assert.Equal(t, workflow.OK(), reconcileStatus) } @@ -301,7 +301,7 @@ func TestOpsManagerReconciler_prepareOpsManagerTwoCalls(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() reconciler, client, initializer := defaultTestOmReconciler(ctx, t, nil, "", "", testOm, nil, omConnectionFactory) - reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zap.S()) + reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zaptest.NewLogger(t).Sugar()) APIKeySecretName, err := testOm.APIKeySecretName(ctx, secrets.SecretClient{KubeClient: client}, "") assert.NoError(t, err) @@ -313,7 +313,7 @@ func TestOpsManagerReconciler_prepareOpsManagerTwoCalls(t *testing.T) { assert.NoError(t, err) // second call is ok - we just don't create the admin user in OM and don't add new secrets - reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zap.S()) + reconcileStatus, _ := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zaptest.NewLogger(t).Sugar()) assert.Equal(t, workflow.OK(), reconcileStatus) assert.Equal(t, "jane.doe@g.com-key", api.CurrMockedAdmin.PrivateKey) @@ -336,7 +336,7 @@ func TestOpsManagerReconciler_prepareOpsManagerDuplicatedUser(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() reconciler, client, initializer := defaultTestOmReconciler(ctx, t, nil, "", "", testOm, nil, omConnectionFactory) - reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zap.S()) + reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zaptest.NewLogger(t).Sugar()) APIKeySecretName, err := testOm.APIKeySecretName(ctx, secrets.SecretClient{KubeClient: client}, "") assert.NoError(t, err) @@ -347,7 +347,7 @@ func TestOpsManagerReconciler_prepareOpsManagerDuplicatedUser(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Namespace: OperatorNamespace, Name: APIKeySecretName}, }) - reconcileStatus, admin := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zap.S()) + reconcileStatus, admin := reconciler.prepareOpsManager(ctx, testOm, testOm.CentralURL(), zaptest.NewLogger(t).Sugar()) assert.Equal(t, status.PhaseFailed, reconcileStatus.Phase()) option, exists := status.GetOption(reconcileStatus.StatusOptions(), status.MessageOption{}) @@ -372,17 +372,17 @@ func TestOpsManagerGeneratesAppDBPassword_IfNotProvided(t *testing.T) { testOm := DefaultOpsManagerBuilder().Build() kubeManager, omConnectionFactory := mock.NewDefaultFakeClient(testOm) - appDBReconciler, err := newAppDbReconciler(ctx, kubeManager, testOm, omConnectionFactory.GetConnectionFunc, zap.S()) + appDBReconciler, err := newAppDbReconciler(ctx, kubeManager, testOm, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) - password, err := appDBReconciler.ensureAppDbPassword(ctx, testOm, zap.S()) + password, err := appDBReconciler.ensureAppDbPassword(ctx, testOm, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Len(t, password, 12, "auto generated password should have a size of 12") } func TestOpsManagerUsersPassword_SpecifiedInSpec(t *testing.T) { ctx := context.Background() - log := zap.S() + log := zaptest.NewLogger(t).Sugar() testOm := DefaultOpsManagerBuilder().SetAppDBPassword("my-secret", "password").Build() omConnectionFactory := om.NewDefaultCachedOMConnectionFactory() reconciler, client, _ := defaultTestOmReconciler(ctx, t, nil, "", "", testOm, nil, omConnectionFactory) @@ -402,7 +402,7 @@ func TestOpsManagerUsersPassword_SpecifiedInSpec(t *testing.T) { appDBReconciler, err := reconciler.createNewAppDBReconciler(ctx, testOm, log) require.NoError(t, err) - password, err := appDBReconciler.ensureAppDbPassword(ctx, testOm, zap.S()) + password, err := appDBReconciler.ensureAppDbPassword(ctx, testOm, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.Equal(t, password, "my-password", "the password specified by the SecretRef should have been returned when specified") @@ -735,11 +735,11 @@ func TestOpsManagerBackupAssignmentLabels(t *testing.T) { mockedAdmin := api.NewMockedAdminProvider("testUrl", "publicApiKey", "privateApiKey", true) defer mockedAdmin.(*api.MockedOmAdmin).Reset() - reconcilerHelper, err := NewOpsManagerReconcilerHelper(ctx, reconciler, testOm, nil, zap.S()) + reconcilerHelper, err := NewOpsManagerReconcilerHelper(ctx, reconciler, testOm, nil, zaptest.NewLogger(t).Sugar()) require.NoError(t, err) // when - reconciler.prepareBackupInOpsManager(ctx, reconcilerHelper, testOm, mockedAdmin, "", zap.S()) + reconciler.prepareBackupInOpsManager(ctx, reconcilerHelper, testOm, mockedAdmin, "", zaptest.NewLogger(t).Sugar()) blockStoreConfigs, _ := mockedAdmin.ReadBlockStoreConfigs() oplogConfigs, _ := mockedAdmin.ReadOplogStoreConfigs() s3Configs, _ := mockedAdmin.ReadS3Configs() @@ -756,22 +756,22 @@ func TestTriggerOmChangedEventIfNeeded(t *testing.T) { ctx := context.Background() t.Run("Om changed event got triggered, major version update", func(t *testing.T) { nextScheduledTime := agents.NextScheduledUpgradeTime() - assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("5.2.13").SetOMStatusVersion("4.2.13").Build(), nil, zap.S())) + assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("5.2.13").SetOMStatusVersion("4.2.13").Build(), nil, zaptest.NewLogger(t).Sugar())) assert.NotEqual(t, nextScheduledTime, agents.NextScheduledUpgradeTime()) }) t.Run("Om changed event got triggered, minor version update", func(t *testing.T) { nextScheduledTime := agents.NextScheduledUpgradeTime() - assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("4.4.0").SetOMStatusVersion("4.2.13").Build(), nil, zap.S())) + assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("4.4.0").SetOMStatusVersion("4.2.13").Build(), nil, zaptest.NewLogger(t).Sugar())) assert.NotEqual(t, nextScheduledTime, agents.NextScheduledUpgradeTime()) }) t.Run("Om changed event got triggered, minor version update, candidate version", func(t *testing.T) { nextScheduledTime := agents.NextScheduledUpgradeTime() - assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("4.4.0-rc2").SetOMStatusVersion("4.2.13").Build(), nil, zap.S())) + assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("4.4.0-rc2").SetOMStatusVersion("4.2.13").Build(), nil, zaptest.NewLogger(t).Sugar())) assert.NotEqual(t, nextScheduledTime, agents.NextScheduledUpgradeTime()) }) t.Run("Om changed event not triggered, patch version update", func(t *testing.T) { nextScheduledTime := agents.NextScheduledUpgradeTime() - assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("4.4.10").SetOMStatusVersion("4.4.0").Build(), nil, zap.S())) + assert.NoError(t, triggerOmChangedEventIfNeeded(ctx, omv1.NewOpsManagerBuilder().SetVersion("4.4.10").SetOMStatusVersion("4.4.0").Build(), nil, zaptest.NewLogger(t).Sugar())) assert.Equal(t, nextScheduledTime, agents.NextScheduledUpgradeTime()) }) } diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index 4c9abb64c..55111398a 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zaptest" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" @@ -358,7 +359,7 @@ func TestCreateReplicaSet_TLS(t *testing.T) { for _, v := range processes { assert.NotNil(t, v.TLSConfig()) assert.Len(t, v.TLSConfig(), 2) - assert.Equal(t, fmt.Sprintf("%s/%s", util.TLSCertMountPath, pem.ReadHashFromSecret(ctx, reconciler.SecretClient, rs.Namespace, fmt.Sprintf("%s-cert", rs.Name), "", zap.S())), v.TLSConfig()["certificateKeyFile"]) + assert.Equal(t, fmt.Sprintf("%s/%s", util.TLSCertMountPath, pem.ReadHashFromSecret(ctx, reconciler.SecretClient, rs.Namespace, fmt.Sprintf("%s-cert", rs.Name), "", zaptest.NewLogger(t).Sugar())), v.TLSConfig()["certificateKeyFile"]) assert.Equal(t, "requireTLS", v.TLSConfig()["mode"]) } @@ -378,26 +379,26 @@ func TestUpdateDeploymentTLSConfiguration(t *testing.T) { rsNoTLS := mdbv1.NewReplicaSetBuilder().Build() deploymentWithTLS := deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rsWithTLS) deploymentNoTLS := deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rsNoTLS) - stsWithTLS := construct.DatabaseStatefulSet(*rsWithTLS, construct.ReplicaSetOptions(construct.GetPodEnvOptions()), zap.S()) - stsNoTLS := construct.DatabaseStatefulSet(*rsNoTLS, construct.ReplicaSetOptions(construct.GetPodEnvOptions()), zap.S()) + stsWithTLS := construct.DatabaseStatefulSet(*rsWithTLS, construct.ReplicaSetOptions(construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) + stsNoTLS := construct.DatabaseStatefulSet(*rsNoTLS, construct.ReplicaSetOptions(construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) // TLS Disabled -> TLS Disabled - shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsNoTLS, stsNoTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsNoTLS, stsNoTLS, zaptest.NewLogger(t).Sugar(), util.CAFilePathInContainer) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Disabled -> TLS Enabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsWithTLS, stsWithTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsWithTLS, stsWithTLS, zaptest.NewLogger(t).Sugar(), util.CAFilePathInContainer) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Enabled -> TLS Enabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsWithTLS, stsWithTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsWithTLS, stsWithTLS, zaptest.NewLogger(t).Sugar(), util.CAFilePathInContainer) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Enabled -> TLS Disabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsNoTLS, stsNoTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsNoTLS, stsNoTLS, zaptest.NewLogger(t).Sugar(), util.CAFilePathInContainer) assert.NoError(t, err) assert.True(t, shouldLockMembers) } @@ -418,7 +419,7 @@ func TestCreateDeleteReplicaSet(t *testing.T) { mockedOmConn.CleanHistory() // Now delete it - assert.NoError(t, reconciler.OnDelete(ctx, rs, zap.S())) + assert.NoError(t, reconciler.OnDelete(ctx, rs, zaptest.NewLogger(t).Sugar())) // Operator doesn't mutate K8s state, so we don't check its changes, only OM mockedOmConn.CheckResourcesDeleted(t) diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index 158665f3b..e34c28dab 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/require" "github.com/yudai/gojsondiff" "github.com/yudai/gojsondiff/formatter" - "go.uber.org/zap" + "go.uber.org/zap/zaptest" "golang.org/x/exp/constraints" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" @@ -42,9 +42,9 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/util" ) -func newShardedClusterReconcilerForMultiCluster(ctx context.Context, forceEnterprise bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { +func newShardedClusterReconcilerForMultiCluster(ctx context.Context, t *testing.T, forceEnterprise bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { r := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) + reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) if err != nil { return nil, nil, err } @@ -369,7 +369,7 @@ func BlockReconcileScalingBothWaysCase(t *testing.T, tc BlockReconcileScalingBot require.NoError(t, err) // Checking that we don't scale both ways is done when we initiate the reconciler, not in the reconcile loop. - reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) // The validation happens at the beginning of the reconciliation loop. We expect to fail immediately when scaling is // invalid, or stay in pending phase otherwise. @@ -411,7 +411,7 @@ func TestReconcileCreateMultiClusterShardedClusterWithExternalDomain(t *testing. kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { @@ -482,7 +482,7 @@ func TestReconcileCreateMultiClusterShardedClusterWithExternalAccessAndOnlyTopLe kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, memberClusters.MongosDistribution, clusterMapping, memberClusters.ConfigServerDistribution, memberClusters.ShardDistribution, test.ClusterLocalDomains, test.SingleExternalClusterDomains) @@ -549,7 +549,7 @@ func TestReconcileCreateMultiClusterShardedClusterWithExternalAccessAndNoExterna kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, memberClusters.MongosDistribution, clusterMapping, memberClusters.ConfigServerDistribution, memberClusters.ShardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) @@ -616,7 +616,7 @@ func TestReconcileCreateMultiClusterShardedCluster(t *testing.T) { kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, memberClusters.MongosDistribution, clusterMapping, memberClusters.ConfigServerDistribution, memberClusters.ShardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) @@ -814,7 +814,7 @@ func TestReconcileMultiClusterShardedClusterCertsAndSecretsReplication(t *testin memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusterNames, omConnectionFactory, true, false) ctx := context.Background() - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, mongosDistribution, clusterMapping, configSrvDistribution, shardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) @@ -985,7 +985,7 @@ func TestReconcileForComplexMultiClusterYaml(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusterNames, omConnectionFactory) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping require.NoError(t, err) @@ -1076,7 +1076,7 @@ func TestMigrateToNewDeploymentState(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters([]string{multicluster.LegacyCentralClusterName}, omConnectionFactory) - reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) // Migration is performed at reconciliation, when needed @@ -1130,7 +1130,7 @@ func testDesiredConfigurationFromYAML[T *mdbv1.ShardedClusterComponentSpec | map kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusterNames, omConnectionFactory) - _, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + _, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) var actual interface{} @@ -1251,9 +1251,9 @@ func TestMultiClusterShardedSetRace(t *testing.T) { ctx := context.Background() reconciler := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - allHostnames := generateHostsForCluster(ctx, reconciler, false, sc, mongosDistribution, configSrvDistribution, shardDistribution) - allHostnames1 := generateHostsForCluster(ctx, reconciler, false, sc1, mongosDistribution, configSrvDistribution, shardDistribution) - allHostnames2 := generateHostsForCluster(ctx, reconciler, false, sc2, mongosDistribution, configSrvDistribution, shardDistribution) + allHostnames := generateHostsForCluster(ctx, t, reconciler, false, sc, mongosDistribution, configSrvDistribution, shardDistribution) + allHostnames1 := generateHostsForCluster(ctx, t, reconciler, false, sc1, mongosDistribution, configSrvDistribution, shardDistribution) + allHostnames2 := generateHostsForCluster(ctx, t, reconciler, false, sc2, mongosDistribution, configSrvDistribution, shardDistribution) projectHostMapping := map[string][]string{ projectName: allHostnames, @@ -1645,7 +1645,7 @@ func TestMultiClusterShardedMongosDeadlock(t *testing.T) { // TODO: statuses in OM mock // TODO: OM mock: set agent ready depending on a clusterDown parameter ? + set mongos not ready if anything is not ready - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping @@ -1851,7 +1851,7 @@ func TestCheckForMongosDeadlock(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - isDeadLocked, processStates := checkForMongosDeadlock(tc.clusterState, tc.mongosReplicaSetName, tc.isScaling, zap.S()) + isDeadLocked, processStates := checkForMongosDeadlock(tc.clusterState, tc.mongosReplicaSetName, tc.isScaling, zaptest.NewLogger(t).Sugar()) assert.Equal(t, tc.expectedDeadlock, isDeadLocked) assert.Equal(t, tc.expectedProcessStatesSize, len(processStates)) }) @@ -1952,7 +1952,7 @@ func MultiClusterShardedScalingWithOverridesTestCase(t *testing.T, tc MultiClust for _, scalingStep := range tc.scalingSteps { t.Run(scalingStep.name, func(t *testing.T) { - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping @@ -2295,7 +2295,7 @@ func TestMultiClusterShardedScaling(t *testing.T) { memberClusterClients = append(memberClusterClients, c) } - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping addAllHostsWithDistribution := func(connection om.Connection, mongosDistribution map[string]int, clusterMapping map[string]int, configSrvDistribution map[string]int, shardDistribution []map[string]int) { @@ -2341,7 +2341,7 @@ func TestMultiClusterShardedScaling(t *testing.T) { err = kubeClient.Update(ctx, sc) require.NoError(t, err) - reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping = reconcilerHelper.deploymentState.ClusterMapping addAllHostsWithDistribution(omConnectionFactory.GetConnection(), mongosDistribution, clusterMapping, configSrvDistribution, shardDistribution) @@ -2368,7 +2368,7 @@ func TestMultiClusterShardedScaling(t *testing.T) { err = kubeClient.Update(ctx, sc) require.NoError(t, err) - reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping = reconcilerHelper.deploymentState.ClusterMapping addAllHostsWithDistribution(omConnectionFactory.GetConnection(), mongosDistribution, clusterMapping, configSrvDistribution, shardDistribution) @@ -2399,7 +2399,7 @@ func reconcileUntilSuccessful(ctx context.Context, t *testing.T, reconciler reco if expectedReconciles != nil { assert.Equal(t, *expectedReconciles, actualReconciles) } - zap.S().Debugf("Reconcile successful on %d try", actualReconciles) + zaptest.NewLogger(t).Sugar().Debugf("Reconcile successful on %d try", actualReconciles) return } else if object.Status.Phase == status.PhaseFailed { if !ignoreFailures { @@ -2409,8 +2409,8 @@ func reconcileUntilSuccessful(ctx context.Context, t *testing.T, reconciler reco } } -func generateHostsForCluster(ctx context.Context, reconciler *ReconcileMongoDbShardedCluster, forceEnterprise bool, sc *mdbv1.MongoDB, mongosDistribution map[string]int, configSrvDistribution map[string]int, shardDistribution []map[string]int) []string { - reconcileHelper, _ := NewShardedClusterReconcilerHelper(ctx, reconciler.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, reconciler.memberClustersMap, reconciler.omConnectionFactory, zap.S()) +func generateHostsForCluster(ctx context.Context, t *testing.T, reconciler *ReconcileMongoDbShardedCluster, forceEnterprise bool, sc *mdbv1.MongoDB, mongosDistribution map[string]int, configSrvDistribution map[string]int, shardDistribution []map[string]int) []string { + reconcileHelper, _ := NewShardedClusterReconcilerHelper(ctx, reconciler.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, reconciler.memberClustersMap, reconciler.omConnectionFactory, zaptest.NewLogger(t).Sugar()) allHostnames, _ := generateAllHosts(sc, mongosDistribution, reconcileHelper.deploymentState.ClusterMapping, configSrvDistribution, shardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) return allHostnames } @@ -2604,7 +2604,7 @@ func TestComputeMembersToScaleDown(t *testing.T) { _, reconcileHelper, _, _, err := defaultClusterReconciler(ctx, nil, "", "", targetSpec, memberClusterMap) assert.NoError(t, err) - membersToScaleDown := reconcileHelper.computeMembersToScaleDown(tc.cfgServerCurrentClusters, tc.shardsCurrentClusters, zap.S()) + membersToScaleDown := reconcileHelper.computeMembersToScaleDown(tc.cfgServerCurrentClusters, tc.shardsCurrentClusters, zaptest.NewLogger(t).Sugar()) assert.Equal(t, tc.expected, membersToScaleDown) }) @@ -3492,7 +3492,7 @@ func TestMultiClusterShardedServiceCreation_WithExternalName(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusters, omConnectionFactory) - reconciler, reconcileHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcileHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, t, false, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) mongosDistribution := clusterSpecListToDistribution(tc.mongosClusterSpecList) diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index 8404ebabb..9553c0fc4 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -3,6 +3,7 @@ package operator import ( "context" "fmt" + "go.uber.org/zap/zaptest" "reflect" "strings" "testing" @@ -122,7 +123,7 @@ func TestReconcileCreateSingleClusterShardedClusterWithExternalDomainSimplest(t Build() kubeClient := kubernetesClient.NewClient(fakeClient) - reconciler, _, _ := newShardedClusterReconcilerFromResource(ctx, nil, "", "", sc, nil, kubeClient, omConnectionFactory) + reconciler, _, _ := newShardedClusterReconcilerFromResource(t, ctx, nil, "", "", sc, nil, kubeClient, omConnectionFactory) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { var allHostnames []string @@ -509,7 +510,7 @@ func TestAddDeleteShardedCluster(t *testing.T) { checkReconcileSuccessful(ctx, t, reconciler, sc, clusterClient) // Now delete it - assert.NoError(t, reconciler.OnDelete(ctx, sc, zap.S())) + assert.NoError(t, reconciler.OnDelete(ctx, sc, zaptest.NewLogger(t).Sugar())) // Operator doesn't mutate K8s state, so we don't check its changes, only OM mockedOmConnection := omConnectionFactory.GetConnection().(*om.MockedOmConnection) @@ -557,9 +558,9 @@ func TestPrepareScaleDownShardedCluster_ConfigMongodsUp(t *testing.T) { kubeClient, _ := mock.NewDefaultFakeClient(scAfterScale) // Store the initial scaling status in state configmap assert.NoError(t, createMockStateConfigMap(kubeClient, mock.TestNamespace, scBeforeScale.Name, initialState)) - _, reconcileHelper, err := newShardedClusterReconcilerFromResource(ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) + _, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) assert.NoError(t, err) - assert.NoError(t, reconcileHelper.prepareScaleDownShardedCluster(omConnectionFactory.GetConnection(), zap.S())) + assert.NoError(t, reconcileHelper.prepareScaleDownShardedCluster(omConnectionFactory.GetConnection(), zaptest.NewLogger(t).Sugar())) // create the expected deployment from the sharded cluster that has not yet scaled // expected change of state: rs members are marked unvoted @@ -604,7 +605,7 @@ func TestPrepareScaleDownShardedCluster_ShardsUpMongodsDown(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(createDeploymentFromShardedCluster(t, scBeforeScale))) kubeClient, _ := mock.NewDefaultFakeClient(scAfterScale) assert.NoError(t, createMockStateConfigMap(kubeClient, mock.TestNamespace, scBeforeScale.Name, initialState)) - _, reconcileHelper, err := newShardedClusterReconcilerFromResource(ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) + _, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) assert.NoError(t, err) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { @@ -616,7 +617,7 @@ func TestPrepareScaleDownShardedCluster_ShardsUpMongodsDown(t *testing.T) { connection.(*om.MockedOmConnection).CleanHistory() }) - assert.NoError(t, reconcileHelper.prepareScaleDownShardedCluster(omConnectionFactory.GetConnection(), zap.S())) + assert.NoError(t, reconcileHelper.prepareScaleDownShardedCluster(omConnectionFactory.GetConnection(), zaptest.NewLogger(t).Sugar())) // expected change of state: rs members are marked unvoted only for two shards (old state) expectedDeployment := createDeploymentFromShardedCluster(t, scBeforeScale) @@ -641,7 +642,7 @@ func TestConstructConfigSrv(t *testing.T) { sc := test.DefaultClusterBuilder().Build() configSrvSpec := createConfigSrvSpec(sc) assert.NotPanics(t, func() { - construct.DatabaseStatefulSet(*sc, construct.ConfigServerOptions(configSrvSpec, multicluster.LegacyCentralClusterName, construct.GetPodEnvOptions()), zap.S()) + construct.DatabaseStatefulSet(*sc, construct.ConfigServerOptions(configSrvSpec, multicluster.LegacyCentralClusterName, construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) }) } @@ -660,9 +661,9 @@ func TestPrepareScaleDownShardedCluster_OnlyMongos(t *testing.T) { }) // necessary otherwise next omConnectionFactory.GetConnection() will return nil as the connectionFactoryFunc hasn't been called yet - initializeOMConnection(t, ctx, reconcileHelper, sc, zap.S(), omConnectionFactory) + initializeOMConnection(t, ctx, reconcileHelper, sc, zaptest.NewLogger(t).Sugar(), omConnectionFactory) - assert.NoError(t, reconcileHelper.prepareScaleDownShardedCluster(omConnectionFactory.GetConnection(), zap.S())) + assert.NoError(t, reconcileHelper.prepareScaleDownShardedCluster(omConnectionFactory.GetConnection(), zaptest.NewLogger(t).Sugar())) mockedOmConnection := omConnectionFactory.GetConnection().(*om.MockedOmConnection) mockedOmConnection.CheckNumberOfUpdateRequests(t, 0) mockedOmConnection.CheckDeployment(t, createDeploymentFromShardedCluster(t, sc)) @@ -707,7 +708,7 @@ func TestUpdateOmDeploymentShardedCluster_HostsRemovedFromMonitoring(t *testing. omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(createDeploymentFromShardedCluster(t, sc))) kubeClient, _ := mock.NewDefaultFakeClient(sc) assert.NoError(t, createMockStateConfigMap(kubeClient, mock.TestNamespace, sc.Name, initialState)) - _, reconcileHelper, err := newShardedClusterReconcilerFromResource(ctx, nil, "", "", scScaledDown, nil, kubeClient, omConnectionFactory) + _, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", scScaledDown, nil, kubeClient, omConnectionFactory) assert.NoError(t, err) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { @@ -728,7 +729,7 @@ func TestUpdateOmDeploymentShardedCluster_HostsRemovedFromMonitoring(t *testing. }, nil) mockOm := omConnectionFactory.GetConnection().(*om.MockedOmConnection) - assert.Equal(t, workflow.OK(), reconcileHelper.updateOmDeploymentShardedCluster(ctx, mockOm, scScaledDown, deploymentOptions{podEnvVars: &env.PodEnvVars{ProjectID: "abcd"}}, false, zap.S())) + assert.Equal(t, workflow.OK(), reconcileHelper.updateOmDeploymentShardedCluster(ctx, mockOm, scScaledDown, deploymentOptions{podEnvVars: &env.PodEnvVars{ProjectID: "abcd"}}, false, zaptest.NewLogger(t).Sugar())) mockOm.CheckOrderOfOperations(t, reflect.ValueOf(mockOm.ReadUpdateDeployment), reflect.ValueOf(mockOm.RemoveHost)) @@ -748,8 +749,8 @@ func TestPodAntiaffinity_MongodsInsideShardAreSpread(t *testing.T) { kubeClient, _ := mock.NewDefaultFakeClient(sc) shardSpec, memberCluster := createShardSpecAndDefaultCluster(kubeClient, sc) - firstShardSet := construct.DatabaseStatefulSet(*sc, construct.ShardOptions(0, shardSpec, memberCluster.Name, construct.GetPodEnvOptions()), zap.S()) - secondShardSet := construct.DatabaseStatefulSet(*sc, construct.ShardOptions(1, shardSpec, memberCluster.Name, construct.GetPodEnvOptions()), zap.S()) + firstShardSet := construct.DatabaseStatefulSet(*sc, construct.ShardOptions(0, shardSpec, memberCluster.Name, construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) + secondShardSet := construct.DatabaseStatefulSet(*sc, construct.ShardOptions(1, shardSpec, memberCluster.Name, construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) assert.Equal(t, sc.ShardRsName(0), firstShardSet.Spec.Selector.MatchLabels[construct.PodAntiAffinityLabelKey]) assert.Equal(t, sc.ShardRsName(1), secondShardSet.Spec.Selector.MatchLabels[construct.PodAntiAffinityLabelKey]) @@ -833,9 +834,9 @@ func TestShardedCluster_NeedToPublishState(t *testing.T) { assert.Equal(t, expectedResult, actualResult) assert.Nil(t, err) - allConfigs := reconcilerHelper.getAllConfigs(ctx, *sc, getEmptyDeploymentOptions(), zap.S()) + allConfigs := reconcilerHelper.getAllConfigs(ctx, *sc, getEmptyDeploymentOptions(), zaptest.NewLogger(t).Sugar()) - assert.False(t, anyStatefulSetNeedsToPublishStateToOM(ctx, *sc, clusterClient, reconcilerHelper.deploymentState.LastAchievedSpec, allConfigs, zap.S())) + assert.False(t, anyStatefulSetNeedsToPublishStateToOM(ctx, *sc, clusterClient, reconcilerHelper.deploymentState.LastAchievedSpec, allConfigs, zaptest.NewLogger(t).Sugar())) // attempting to set tls to false require.NoError(t, clusterClient.Get(ctx, kube.ObjectKeyFromApiObject(sc), sc)) @@ -846,8 +847,8 @@ func TestShardedCluster_NeedToPublishState(t *testing.T) { assert.NoError(t, err) // Ops Manager state needs to be published first as we want to reach goal state before unmounting certificates - allConfigs = reconcilerHelper.getAllConfigs(ctx, *sc, getEmptyDeploymentOptions(), zap.S()) - assert.True(t, anyStatefulSetNeedsToPublishStateToOM(ctx, *sc, clusterClient, reconcilerHelper.deploymentState.LastAchievedSpec, allConfigs, zap.S())) + allConfigs = reconcilerHelper.getAllConfigs(ctx, *sc, getEmptyDeploymentOptions(), zaptest.NewLogger(t).Sugar()) + assert.True(t, anyStatefulSetNeedsToPublishStateToOM(ctx, *sc, clusterClient, reconcilerHelper.deploymentState.LastAchievedSpec, allConfigs, zaptest.NewLogger(t).Sugar())) } func TestShardedCustomPodSpecTemplate(t *testing.T) { @@ -1081,7 +1082,7 @@ func TestScalingShardedCluster_ScalesOneMemberAtATime_WhenScalingUp(t *testing.T Build() clusterClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) - reconciler, _, err := newShardedClusterReconcilerFromResource(ctx, nil, "", "", sc, nil, clusterClient, omConnectionFactory) + reconciler, _, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", sc, nil, clusterClient, omConnectionFactory) require.NoError(t, err) // perform initial reconciliation, so we are not creating a new resource @@ -1643,7 +1644,7 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc Replicas(sh.Spec.MongodsPerShardCount), construct.GetPodEnvOptions(), ) - shardSts := construct.DatabaseStatefulSet(*sh, shardOptions, zap.S()) + shardSts := construct.DatabaseStatefulSet(*sh, shardOptions, zaptest.NewLogger(t).Sugar()) shards[i], _ = buildReplicaSetFromProcesses(shardSts.Name, createShardProcesses("fake-mongoDBImage", false, shardSts, sh, ""), sh, sh.Spec.GetMemberOptions(), om.NewDeployment()) } @@ -1654,7 +1655,7 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc Replicas(sh.Spec.MongosCount), construct.GetPodEnvOptions(), ) - mongosSts := construct.DatabaseStatefulSet(*sh, mongosOptions, zap.S()) + mongosSts := construct.DatabaseStatefulSet(*sh, mongosOptions, zaptest.NewLogger(t).Sugar()) mongosProcesses := createMongosProcesses("fake-mongoDBImage", false, mongosSts, sh, util.PEMKeyFilePathInContainer) desiredConfigSrvConfig := createConfigSrvSpec(sh) @@ -1664,7 +1665,7 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc Replicas(sh.Spec.ConfigServerCount), construct.GetPodEnvOptions(), ) - configSvrSts := construct.DatabaseStatefulSet(*sh, configServerOptions, zap.S()) + configSvrSts := construct.DatabaseStatefulSet(*sh, configServerOptions, zaptest.NewLogger(t).Sugar()) configRs, _ := buildReplicaSetFromProcesses(configSvrSts.Name, createConfigSrvProcesses("fake-mongoDBImage", false, configSvrSts, sh, ""), sh, sh.Spec.GetMemberOptions(), om.NewDeployment()) d := om.NewDeployment() @@ -1676,22 +1677,22 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc Finalizing: false, }) assert.NoError(t, err) - d.AddMonitoringAndBackup(zap.S(), sh.Spec.GetSecurity().IsTLSEnabled(), util.CAFilePathInContainer) + d.AddMonitoringAndBackup(zaptest.NewLogger(t).Sugar(), sh.Spec.GetSecurity().IsTLSEnabled(), util.CAFilePathInContainer) return d } func defaultClusterReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, kubernetesClient.Client, *om.CachedOMConnectionFactory, error) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) - r, reconcileHelper, err := newShardedClusterReconcilerFromResource(ctx, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, sc, globalMemberClustersMap, kubeClient, omConnectionFactory) + r, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, sc, globalMemberClustersMap, kubeClient, omConnectionFactory) if err != nil { return nil, nil, nil, nil, err } return r, reconcileHelper, kubeClient, omConnectionFactory, nil } -func newShardedClusterReconcilerFromResource(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { +func newShardedClusterReconcilerFromResource(t *testing.T, ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { r := newShardedClusterReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) + reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zaptest.NewLogger(t).Sugar()) if err != nil { return nil, nil, err } diff --git a/controllers/operator/mongodbstandalone_controller_test.go b/controllers/operator/mongodbstandalone_controller_test.go index 1663b24bd..70693e08f 100644 --- a/controllers/operator/mongodbstandalone_controller_test.go +++ b/controllers/operator/mongodbstandalone_controller_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" + "go.uber.org/zap/zaptest" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/interceptor" @@ -36,7 +36,7 @@ import ( func TestCreateOmProcess(t *testing.T) { const mongodbImage = "quay.io/mongodb/mongodb-enterprise-server" - sts := construct.DatabaseStatefulSet(*DefaultReplicaSetBuilder().SetName("dublin").Build(), construct.StandaloneOptions(construct.GetPodEnvOptions()), zap.S()) + sts := construct.DatabaseStatefulSet(*DefaultReplicaSetBuilder().SetName("dublin").Build(), construct.StandaloneOptions(construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) process := createProcess(mongodbImage, false, sts, util.AgentContainerName, DefaultStandaloneBuilder().Build()) // Note, that for standalone the name of process is the name of statefulset - not the pod inside it. assert.Equal(t, "dublin", process.Name()) @@ -48,7 +48,7 @@ func TestCreateOmProcesStatic(t *testing.T) { const mongodbImage = "quay.io/mongodb/mongodb-enterprise-server" t.Setenv(architectures.DefaultEnvArchitecture, string(architectures.Static)) - sts := construct.DatabaseStatefulSet(*DefaultReplicaSetBuilder().SetName("dublin").Build(), construct.StandaloneOptions(construct.GetPodEnvOptions()), zap.S()) + sts := construct.DatabaseStatefulSet(*DefaultReplicaSetBuilder().SetName("dublin").Build(), construct.StandaloneOptions(construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) process := createProcess(mongodbImage, false, sts, util.AgentContainerName, DefaultStandaloneBuilder().Build()) // Note, that for standalone the name of process is the name of statefulset - not the pod inside it. assert.Equal(t, "dublin", process.Name()) @@ -73,7 +73,7 @@ func TestOnAddStandalone(t *testing.T) { assert.Equal(t, *mock.GetMapForObject(kubeClient, &appsv1.StatefulSet{})[st.ObjectKey()].(*appsv1.StatefulSet).Spec.Replicas, int32(1)) assert.Len(t, mock.GetMapForObject(kubeClient, &corev1.Secret{}), 2) - omConn.(*om.MockedOmConnection).CheckDeployment(t, createDeploymentFromStandalone(st), "auth", "tls") + omConn.(*om.MockedOmConnection).CheckDeployment(t, createDeploymentFromStandalone(t, st), "auth", "tls") omConn.(*om.MockedOmConnection).CheckNumberOfUpdateRequests(t, 1) } @@ -171,7 +171,7 @@ func TestAddDeleteStandalone(t *testing.T) { checkReconcileSuccessful(ctx, t, reconciler, st, kubeClient) // Now delete it - assert.NoError(t, reconciler.OnDelete(ctx, st, zap.S())) + assert.NoError(t, reconciler.OnDelete(ctx, st, zaptest.NewLogger(t).Sugar())) mockedConn := omConnectionFactory.GetConnection().(*om.MockedOmConnection) // Operator doesn't mutate K8s state, so we don't check its changes, only OM @@ -400,9 +400,9 @@ func (b *StandaloneBuilder) Build() *mdbv1.MongoDB { return b.DeepCopy() } -func createDeploymentFromStandalone(st *mdbv1.MongoDB) om.Deployment { +func createDeploymentFromStandalone(t *testing.T, st *mdbv1.MongoDB) om.Deployment { d := om.NewDeployment() - sts := construct.DatabaseStatefulSet(*st, construct.StandaloneOptions(construct.GetPodEnvOptions()), zap.S()) + sts := construct.DatabaseStatefulSet(*st, construct.StandaloneOptions(construct.GetPodEnvOptions()), zaptest.NewLogger(t).Sugar()) hostnames, _ := dns.GetDnsForStatefulSet(sts, st.Spec.GetClusterDomain(), nil) process := om.NewMongodProcess(st.Name, hostnames[0], "fake-mongoDBImage", false, st.Spec.AdditionalMongodConfig, st.GetSpec(), "", nil, st.Status.FeatureCompatibilityVersion) @@ -412,6 +412,6 @@ func createDeploymentFromStandalone(st *mdbv1.MongoDB) om.Deployment { } d.MergeStandalone(process, st.Spec.AdditionalMongodConfig.ToMap(), lastConfig.ToMap(), nil) - d.AddMonitoringAndBackup(zap.S(), st.Spec.GetSecurity().IsTLSEnabled(), util.CAFilePathInContainer) + d.AddMonitoringAndBackup(zaptest.NewLogger(t).Sugar(), st.Spec.GetSecurity().IsTLSEnabled(), util.CAFilePathInContainer) return d } diff --git a/controllers/operator/pem_test.go b/controllers/operator/pem_test.go index f7a9e181c..800707244 100644 --- a/controllers/operator/pem_test.go +++ b/controllers/operator/pem_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.uber.org/zap" + "go.uber.org/zap/zaptest" "golang.org/x/xerrors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -146,17 +146,17 @@ func TestReadPemHashFromSecret(t *testing.T) { assert.Empty(t, pem.ReadHashFromSecret(ctx, secrets.SecretClient{ VaultClient: nil, KubeClient: mockSecretGetter{}, - }, mock.TestNamespace, name, "", zap.S()), "secret does not exist so pem hash should be empty") + }, mock.TestNamespace, name, "", zaptest.NewLogger(t).Sugar()), "secret does not exist so pem hash should be empty") hash := pem.ReadHashFromSecret(ctx, secrets.SecretClient{ VaultClient: nil, KubeClient: mockSecretGetter{secret: secret}, - }, mock.TestNamespace, name, "", zap.S()) + }, mock.TestNamespace, name, "", zaptest.NewLogger(t).Sugar()) hash2 := pem.ReadHashFromSecret(ctx, secrets.SecretClient{ VaultClient: nil, KubeClient: mockSecretGetter{secret: secret}, - }, mock.TestNamespace, name, "", zap.S()) + }, mock.TestNamespace, name, "", zaptest.NewLogger(t).Sugar()) assert.NotEmpty(t, hash, "pem hash should be read from the secret") assert.Equal(t, hash, hash2, "hash creation should be idempotent") @@ -175,5 +175,5 @@ func TestReadPemHashFromSecretOpaqueType(t *testing.T) { assert.Empty(t, pem.ReadHashFromSecret(ctx, secrets.SecretClient{ VaultClient: nil, KubeClient: mockSecretGetter{secret: secret}, - }, mock.TestNamespace, name, "", zap.S()), "if secret type is not TLS the empty string should be returned") + }, mock.TestNamespace, name, "", zaptest.NewLogger(t).Sugar()), "if secret type is not TLS the empty string should be returned") } diff --git a/mongodb-community-operator/pkg/agent/agent_readiness_test.go b/mongodb-community-operator/pkg/agent/agent_readiness_test.go index 9807a6a6c..ff2cdcc13 100644 --- a/mongodb-community-operator/pkg/agent/agent_readiness_test.go +++ b/mongodb-community-operator/pkg/agent/agent_readiness_test.go @@ -2,6 +2,7 @@ package agent import ( "context" + "go.uber.org/zap/zaptest" "os" "testing" @@ -30,7 +31,7 @@ func TestAllReachedGoalState(t *testing.T) { assert.NoError(t, err) t.Run("Returns true if all pods are not found", func(t *testing.T) { - ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{}, 3, 3, zap.S()) + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{}, 3, 3, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.True(t, ready) }) @@ -40,7 +41,7 @@ func TestAllReachedGoalState(t *testing.T) { createPodWithAgentAnnotation("3"), createPodWithAgentAnnotation("3"), createPodWithAgentAnnotation("3"), - }}, 3, 3, zap.S()) + }}, 3, 3, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.True(t, ready) }) @@ -50,13 +51,13 @@ func TestAllReachedGoalState(t *testing.T) { createPodWithAgentAnnotation("2"), createPodWithAgentAnnotation("3"), createPodWithAgentAnnotation("3"), - }}, 3, 3, zap.S()) + }}, 3, 3, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.False(t, ready) }) t.Run("Returns true when the pods are not found", func(t *testing.T) { - ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{shouldReturnNotFoundError: true}, 3, 3, zap.S()) + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{shouldReturnNotFoundError: true}, 3, 3, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.True(t, ready) }) @@ -64,19 +65,19 @@ func TestAllReachedGoalState(t *testing.T) { func TestReachedGoalState(t *testing.T) { t.Run("Pod reaches goal state when annotation is present", func(t *testing.T) { - assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("2"), 2, zap.S())) - assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("4"), 4, zap.S())) - assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("20"), 20, zap.S())) + assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("2"), 2, zaptest.NewLogger(t).Sugar())) + assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("4"), 4, zaptest.NewLogger(t).Sugar())) + assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("20"), 20, zaptest.NewLogger(t).Sugar())) }) t.Run("Pod does not reach goal state when there is a mismatch", func(t *testing.T) { - assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("2"), 4, zap.S())) - assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("3"), 7, zap.S())) - assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("10"), 1, zap.S())) + assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("2"), 4, zaptest.NewLogger(t).Sugar())) + assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("3"), 7, zaptest.NewLogger(t).Sugar())) + assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("10"), 1, zaptest.NewLogger(t).Sugar())) }) t.Run("Pod does not reach goal state when annotation is not present", func(t *testing.T) { - assert.False(t, ReachedGoalState(corev1.Pod{}, 10, zap.S())) + assert.False(t, ReachedGoalState(corev1.Pod{}, 10, zaptest.NewLogger(t).Sugar())) }) } diff --git a/mongodb-community-operator/pkg/agent/replica_set_port_manager_test.go b/mongodb-community-operator/pkg/agent/replica_set_port_manager_test.go index 16e7a40e6..3a50fc696 100644 --- a/mongodb-community-operator/pkg/agent/replica_set_port_manager_test.go +++ b/mongodb-community-operator/pkg/agent/replica_set_port_manager_test.go @@ -2,11 +2,11 @@ package agent import ( "fmt" + "go.uber.org/zap/zaptest" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.uber.org/zap" "k8s.io/apimachinery/pkg/types" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/automationconfig" @@ -200,7 +200,7 @@ func TestReplicaSetPortManagerCalculateExpectedPorts(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - portManager := NewReplicaSetPortManager(zap.S(), tc.in.expectedPort, tc.in.currentPodStates, tc.in.currentAC.Processes) + portManager := NewReplicaSetPortManager(zaptest.NewLogger(t).Sugar(), tc.in.expectedPort, tc.in.currentPodStates, tc.in.currentAC.Processes) portMap, portChangeRequired, oldPort := portManager.calculateExpectedPorts() actualOutput := output{ portMap: portMap, diff --git a/mongodb-community-operator/pkg/util/state/statemachine_test.go b/mongodb-community-operator/pkg/util/state/statemachine_test.go index 38b9e6214..116d4ee3f 100644 --- a/mongodb-community-operator/pkg/util/state/statemachine_test.go +++ b/mongodb-community-operator/pkg/util/state/statemachine_test.go @@ -2,26 +2,17 @@ package state import ( "errors" - "os" + "go.uber.org/zap/zaptest" "testing" "time" "github.com/stretchr/testify/assert" - "go.uber.org/zap" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/util/result" ) -func init() { - logger, err := zap.NewDevelopment() - if err != nil { - os.Exit(1) - } - zap.ReplaceGlobals(logger) -} - // inMemorySaveLoader stores and loads states to member fields // and maintains a history of all the fields saved. type inMemorySaveLoader struct { @@ -52,7 +43,7 @@ func newInMemorySaveLoader(startingState string) *inMemorySaveLoader { func TestOrderOfStatesIsCorrect(t *testing.T) { in := newInMemorySaveLoader("State0") - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) state0 := newAlwaysCompletingState("State0") state1 := newAlwaysCompletingState("State1") @@ -70,7 +61,7 @@ func TestOrderOfStatesIsCorrect(t *testing.T) { func TestOrderOfStatesIsCorrectIfAddedInDifferentOrder(t *testing.T) { in := newInMemorySaveLoader("State0") - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) state0 := newAlwaysCompletingState("State0") state1 := newAlwaysCompletingState("State1") @@ -92,7 +83,7 @@ func TestOrderOfStatesIsCorrectIfAddedInDifferentOrder(t *testing.T) { func TestPredicateReturningFalse_PreventsStateTransition(t *testing.T) { in := newInMemorySaveLoader("State0") - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) state0 := newAlwaysCompletingState("State0") state1 := newAlwaysCompletingState("State1") @@ -116,7 +107,7 @@ func TestPredicateReturningFalse_PreventsStateTransition(t *testing.T) { func TestAddTransition(t *testing.T) { in := newInMemorySaveLoader("State0") - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) state0 := newAlwaysCompletingState("State0") state1 := newAlwaysCompletingState("State1") @@ -144,7 +135,7 @@ func TestIfStateFails_ItIsRunAgain(t *testing.T) { succeeds := newAlwaysCompletingState("SucceedsState") in := newInMemorySaveLoader(fails.Name) - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) s.AddDirectTransition(fails, succeeds) @@ -180,7 +171,7 @@ func TestStateReconcileValue_IsReturnedFromStateMachine(t *testing.T) { s1 := newAlwaysCompletingState("State1") in := newInMemorySaveLoader(s0.Name) - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) s.AddDirectTransition(s0, s1) @@ -201,7 +192,7 @@ func TestStateReconcileValue_IsReturnedFromStateMachine(t *testing.T) { s1 := newAlwaysCompletingState("State1") in := newInMemorySaveLoader(s0.Name) - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) s.AddDirectTransition(s0, s1) @@ -220,7 +211,7 @@ func TestCycleInStateMachine(t *testing.T) { s4 := newAlwaysCompletingState("State4") in := newInMemorySaveLoader("State0") - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) flag := true s.AddDirectTransition(s0, s1) @@ -259,7 +250,7 @@ func TestBranchingPath(t *testing.T) { right2 := newAlwaysCompletingState("Right2") in := newInMemorySaveLoader(root.Name) - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) goLeft := true @@ -306,7 +297,7 @@ func TestDetermineStartingState_ReadsFromLoader(t *testing.T) { s1 := newAlwaysCompletingState("State1") in := newInMemorySaveLoader(s0.Name) - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) // State must be added before it can be returned in determine state s.AddDirectTransition(s0, s1) @@ -321,7 +312,7 @@ func TestDetermineStartingState_ReadsFromLoader(t *testing.T) { s0 := newAlwaysCompletingState("State0") in := newInMemorySaveLoader(s0.Name) - s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + s := NewStateMachine(in, types.NamespacedName{}, zaptest.NewLogger(t).Sugar()) assert.Nil(t, s.currentState) err := s.determineState() diff --git a/pkg/multicluster/memberwatch/clusterhealth_test.go b/pkg/multicluster/memberwatch/clusterhealth_test.go index 63d93aa32..9dd99f9ff 100644 --- a/pkg/multicluster/memberwatch/clusterhealth_test.go +++ b/pkg/multicluster/memberwatch/clusterhealth_test.go @@ -1,28 +1,23 @@ package memberwatch import ( + "go.uber.org/zap/zaptest" "net/http" "net/http/httptest" "testing" "time" "github.com/stretchr/testify/assert" - "go.uber.org/zap" ) -func init() { - logger, _ := zap.NewDevelopment() - zap.ReplaceGlobals(logger) -} - func TestIsMemberClusterHealthy(t *testing.T) { // mark cluster as healthy because "200" status code server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(200) })) - memberHealthCheck := NewMemberHealthCheck(server.URL, []byte("ca-data"), "bhjkb", zap.S()) - healthy := memberHealthCheck.IsClusterHealthy(zap.S()) + memberHealthCheck := NewMemberHealthCheck(server.URL, []byte("ca-data"), "bhjkb", zaptest.NewLogger(t).Sugar()) + healthy := memberHealthCheck.IsClusterHealthy(zaptest.NewLogger(t).Sugar()) assert.Equal(t, true, healthy) // mark cluster unhealthy because != "200" status code @@ -36,8 +31,8 @@ func TestIsMemberClusterHealthy(t *testing.T) { DefaultRetryMax = 2 startTime := time.Now() - memberHealthCheck = NewMemberHealthCheck(server.URL, []byte("ca-data"), "hhfhj", zap.S()) - healthy = memberHealthCheck.IsClusterHealthy(zap.S()) + memberHealthCheck = NewMemberHealthCheck(server.URL, []byte("ca-data"), "hhfhj", zaptest.NewLogger(t).Sugar()) + healthy = memberHealthCheck.IsClusterHealthy(zaptest.NewLogger(t).Sugar()) endTime := time.Since(startTime) assert.Equal(t, false, healthy) @@ -45,7 +40,7 @@ func TestIsMemberClusterHealthy(t *testing.T) { assert.LessOrEqual(t, endTime, DefaultRetryWaitMax*2+time.Second) // mark cluster unhealthy because of error - memberHealthCheck = NewMemberHealthCheck("", []byte("ca-data"), "bhdjbh", zap.S()) - healthy = memberHealthCheck.IsClusterHealthy(zap.S()) + memberHealthCheck = NewMemberHealthCheck("", []byte("ca-data"), "bhdjbh", zaptest.NewLogger(t).Sugar()) + healthy = memberHealthCheck.IsClusterHealthy(zaptest.NewLogger(t).Sugar()) assert.Equal(t, false, healthy) } From 6cb204b8b077804a01a5264f0a0dc6db572c682e Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 22 Jul 2025 15:59:45 +0200 Subject: [PATCH 2/2] fix unit test --- controllers/operator/authentication_test.go | 8 +-- ...odbshardedcluster_controller_multi_test.go | 2 +- .../mongodbshardedcluster_controller_test.go | 54 +++++++++---------- scripts/evergreen/unit-tests.sh | 4 +- 4 files changed, 34 insertions(+), 34 deletions(-) diff --git a/controllers/operator/authentication_test.go b/controllers/operator/authentication_test.go index 95551aef7..a7b7527e6 100644 --- a/controllers/operator/authentication_test.go +++ b/controllers/operator/authentication_test.go @@ -65,7 +65,7 @@ func TestX509ClusterAuthentication_CanBeEnabled_IfX509AuthenticationIsEnabled_Sh ctx := context.Background() scWithTls := test.DefaultClusterBuilder().EnableTLS().EnableX509().SetName("sc-with-tls").SetTLSCA("custom-ca").Build() - reconciler, _, client, _, err := defaultClusterReconciler(ctx, nil, "", "", scWithTls, nil) + reconciler, _, client, _, err := defaultClusterReconciler(t, ctx, nil, "", "", scWithTls, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, client, scWithTls) @@ -76,7 +76,7 @@ func TestX509CanBeEnabled_WhenThereAreOnlyTlsDeployments_ShardedCluster(t *testi ctx := context.Background() scWithTls := test.DefaultClusterBuilder().EnableTLS().EnableX509().SetName("sc-with-tls").SetTLSCA("custom-ca").Build() - reconciler, _, client, _, err := defaultClusterReconciler(ctx, nil, "", "", scWithTls, nil) + reconciler, _, client, _, err := defaultClusterReconciler(t, ctx, nil, "", "", scWithTls, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, client, scWithTls) @@ -332,7 +332,7 @@ func TestX509InternalClusterAuthentication_CanBeEnabledWithScram_ShardedCluster( EnableX509InternalClusterAuth(). Build() - r, _, kubeClient, omConnectionFactory, _ := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + r, _, kubeClient, omConnectionFactory, _ := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) addKubernetesTlsResources(ctx, r.client, sc) checkReconcileSuccessful(ctx, t, r, sc, kubeClient) @@ -769,7 +769,7 @@ func Test_NoAdditionalDomainsPresent(t *testing.T) { // The default secret we create does not contain additional domains so it will not be valid for this RS rs.Spec.Security.TLSConfig.AdditionalCertificateDomains = []string{"foo"} - reconciler, _, client, _, err := defaultClusterReconciler(ctx, nil, "", "", rs, nil) + reconciler, _, client, _, err := defaultClusterReconciler(t, ctx, nil, "", "", rs, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, client, rs) diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index e34c28dab..c07b9d781 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -2601,7 +2601,7 @@ func TestComputeMembersToScaleDown(t *testing.T) { _, omConnectionFactory := mock.NewDefaultFakeClient(targetSpec) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusterNames, omConnectionFactory) - _, reconcileHelper, _, _, err := defaultClusterReconciler(ctx, nil, "", "", targetSpec, memberClusterMap) + _, reconcileHelper, _, _, err := defaultClusterReconciler(t, ctx, nil, "", "", targetSpec, memberClusterMap) assert.NoError(t, err) membersToScaleDown := reconcileHelper.computeMembersToScaleDown(tc.cfgServerCurrentClusters, tc.shardsCurrentClusters, zaptest.NewLogger(t).Sugar()) diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index 9553c0fc4..1e69c0fa8 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -55,7 +55,7 @@ import ( func TestChangingFCVShardedCluster(t *testing.T) { ctx := context.Background() sc := test.DefaultClusterBuilder().Build() - reconciler, _, cl, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, cl, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) // Helper function to update and verify FCV @@ -77,7 +77,7 @@ func TestReconcileCreateShardedCluster(t *testing.T) { ctx := context.Background() sc := test.DefaultClusterBuilder().Build() - reconciler, _, kubeClient, omConnectionFactory, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, kubeClient, omConnectionFactory, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) c := kubeClient require.NoError(t, err) @@ -221,7 +221,7 @@ func TestReconcileCreateShardedCluster_ScaleDown(t *testing.T) { ctx := context.Background() // First creation sc := test.DefaultClusterBuilder().SetShardCountSpec(4).SetShardCountStatus(4).Build() - reconciler, _, clusterClient, omConnectionFactory, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, omConnectionFactory, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) checkReconcileSuccessful(ctx, t, reconciler, sc, clusterClient) @@ -262,7 +262,7 @@ func TestShardedClusterReconcileContainerImages(t *testing.T) { ctx := context.Background() sc := test.DefaultClusterBuilder().SetVersion("8.0.0").SetShardCountSpec(1).Build() - reconciler, _, kubeClient, _, err := defaultClusterReconciler(ctx, imageUrlsMock, "2.0.0", "1.0.0", sc, nil) + reconciler, _, kubeClient, _, err := defaultClusterReconciler(t, ctx, imageUrlsMock, "2.0.0", "1.0.0", sc, nil) require.NoError(t, err) checkReconcileSuccessful(ctx, t, reconciler, sc, kubeClient) @@ -300,7 +300,7 @@ func TestShardedClusterReconcileContainerImagesWithStaticArchitecture(t *testing databaseRelatedImageEnv: "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", } - reconciler, _, kubeClient, omConnectionFactory, err := defaultClusterReconciler(ctx, imageUrlsMock, "", "", sc, nil) + reconciler, _, kubeClient, omConnectionFactory, err := defaultClusterReconciler(t, ctx, imageUrlsMock, "", "", sc, nil) require.NoError(t, err) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { @@ -341,7 +341,7 @@ func TestReconcilePVCResizeShardedCluster(t *testing.T) { sc.Spec.Persistent = util.BooleanRef(true) sc.Spec.ConfigSrvPodSpec.Persistence = &persistence sc.Spec.ShardPodSpec.Persistence = &persistence - reconciler, _, c, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, c, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) assert.NoError(t, err) // first, we create the shardedCluster with sts and pvc, @@ -502,7 +502,7 @@ func TestAddDeleteShardedCluster(t *testing.T) { // First we need to create a sharded cluster sc := test.DefaultClusterBuilder().Build() - reconciler, _, clusterClient, omConnectionFactory, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, omConnectionFactory, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { connection.(*om.MockedOmConnection).AgentsDelayCount = 1 }) @@ -558,7 +558,7 @@ func TestPrepareScaleDownShardedCluster_ConfigMongodsUp(t *testing.T) { kubeClient, _ := mock.NewDefaultFakeClient(scAfterScale) // Store the initial scaling status in state configmap assert.NoError(t, createMockStateConfigMap(kubeClient, mock.TestNamespace, scBeforeScale.Name, initialState)) - _, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) + _, reconcileHelper, err := newShardedClusterReconcilerFromResource(t, ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) assert.NoError(t, err) assert.NoError(t, reconcileHelper.prepareScaleDownShardedCluster(omConnectionFactory.GetConnection(), zaptest.NewLogger(t).Sugar())) @@ -605,7 +605,7 @@ func TestPrepareScaleDownShardedCluster_ShardsUpMongodsDown(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(createDeploymentFromShardedCluster(t, scBeforeScale))) kubeClient, _ := mock.NewDefaultFakeClient(scAfterScale) assert.NoError(t, createMockStateConfigMap(kubeClient, mock.TestNamespace, scBeforeScale.Name, initialState)) - _, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) + _, reconcileHelper, err := newShardedClusterReconcilerFromResource(t, ctx, nil, "", "", scAfterScale, nil, kubeClient, omConnectionFactory) assert.NoError(t, err) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { @@ -651,7 +651,7 @@ func TestConstructConfigSrv(t *testing.T) { func TestPrepareScaleDownShardedCluster_OnlyMongos(t *testing.T) { ctx := context.Background() sc := test.DefaultClusterBuilder().SetMongosCountStatus(4).SetMongosCountSpec(2).Build() - _, reconcileHelper, _, omConnectionFactory, _ := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + _, reconcileHelper, _, omConnectionFactory, _ := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) oldDeployment := createDeploymentFromShardedCluster(t, sc) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { if _, err := connection.UpdateDeployment(oldDeployment); err != nil { @@ -708,7 +708,7 @@ func TestUpdateOmDeploymentShardedCluster_HostsRemovedFromMonitoring(t *testing. omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(createDeploymentFromShardedCluster(t, sc))) kubeClient, _ := mock.NewDefaultFakeClient(sc) assert.NoError(t, createMockStateConfigMap(kubeClient, mock.TestNamespace, sc.Name, initialState)) - _, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", scScaledDown, nil, kubeClient, omConnectionFactory) + _, reconcileHelper, err := newShardedClusterReconcilerFromResource(t, ctx, nil, "", "", scScaledDown, nil, kubeClient, omConnectionFactory) assert.NoError(t, err) omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { @@ -806,7 +806,7 @@ func TestShardedCluster_WithTLSEnabled_AndX509Enabled_Succeeds(t *testing.T) { SetTLSCA("custom-ca"). Build() - reconciler, _, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, clusterClient, sc) @@ -825,7 +825,7 @@ func TestShardedCluster_NeedToPublishState(t *testing.T) { Build() // perform successful reconciliation to populate all the stateful sets in the mocked client - reconciler, reconcilerHelper, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, reconcilerHelper, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, clusterClient, sc) actualResult, err := reconciler.Reconcile(ctx, requestFromObject(sc)) @@ -901,7 +901,7 @@ func TestShardedCustomPodSpecTemplate(t *testing.T) { Spec: configSrvPodSpec, }).Build() - reconciler, _, kubeClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, kubeClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, kubeClient, sc) @@ -1000,7 +1000,7 @@ func TestShardedCustomPodStaticSpecTemplate(t *testing.T) { Spec: configSrvPodSpec, }).Build() - reconciler, _, kubeClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, kubeClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, kubeClient, sc) @@ -1082,7 +1082,7 @@ func TestScalingShardedCluster_ScalesOneMemberAtATime_WhenScalingUp(t *testing.T Build() clusterClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) - reconciler, _, err := newShardedClusterReconcilerFromResource(nil, ctx, nil, "", "", sc, nil, clusterClient, omConnectionFactory) + reconciler, _, err := newShardedClusterReconcilerFromResource(t, ctx, nil, "", "", sc, nil, clusterClient, omConnectionFactory) require.NoError(t, err) // perform initial reconciliation, so we are not creating a new resource @@ -1171,7 +1171,7 @@ func TestScalingShardedCluster_ScalesOneMemberAtATime_WhenScalingDown(t *testing SetShardCountStatus(3). Build() - reconciler, _, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) // perform initial reconciliation so we are not creating a new resource checkReconcileSuccessful(ctx, t, reconciler, sc, clusterClient) @@ -1289,7 +1289,7 @@ func TestShardedClusterPortsAreConfigurable_WithAdditionalMongoConfig(t *testing SetShardAdditionalConfig(shardConfig). Build() - reconciler, _, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) checkReconcileSuccessful(ctx, t, reconciler, sc, clusterClient) @@ -1319,7 +1319,7 @@ func TestShardedCluster_ConfigMapAndSecretWatched(t *testing.T) { ctx := context.Background() sc := test.DefaultClusterBuilder().Build() - reconciler, _, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) checkReconcileSuccessful(ctx, t, reconciler, sc, clusterClient) @@ -1338,7 +1338,7 @@ func TestShardedClusterTLSAndInternalAuthResourcesWatched(t *testing.T) { ctx := context.Background() sc := test.DefaultClusterBuilder().SetShardCountSpec(1).EnableTLS().SetTLSCA("custom-ca").Build() sc.Spec.Security.Authentication.InternalCluster = "x509" - reconciler, _, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, clusterClient, sc) @@ -1386,7 +1386,7 @@ func TestBackupConfiguration_ShardedCluster(t *testing.T) { }). Build() - reconciler, _, clusterClient, omConnectionFactory, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, omConnectionFactory, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) omConnectionFactory.SetPostCreateHook(func(c om.Connection) { // 4 because config server + num shards + 1 for entity to represent the sharded cluster itself @@ -1511,7 +1511,7 @@ func TestTlsConfigPrefix_ForShardedCluster(t *testing.T) { }). Build() - reconciler, _, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) createShardedClusterTLSSecretsFromCustomCerts(ctx, sc, "my-prefix", clusterClient) @@ -1555,7 +1555,7 @@ func TestShardSpecificPodSpec(t *testing.T) { }, }).Build() - reconciler, _, clusterClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, clusterClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) addKubernetesTlsResources(ctx, clusterClient, sc) checkReconcileSuccessful(ctx, t, reconciler, sc, clusterClient) @@ -1579,7 +1579,7 @@ func TestShardedClusterAgentVersionMapping(t *testing.T) { reconcilerFactory := func(sc *mdbv1.MongoDB) (reconcile.Reconciler, kubernetesClient.Client) { // Go couldn't infer correctly that *ReconcileMongoDbShardedCluster implemented *reconciler.Reconciler interface // without this anonymous function - reconciler, _, mockClient, _, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, _, mockClient, _, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) require.NoError(t, err) return reconciler, mockClient } @@ -1681,9 +1681,9 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc return d } -func defaultClusterReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, kubernetesClient.Client, *om.CachedOMConnectionFactory, error) { +func defaultClusterReconciler(t *testing.T, ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, kubernetesClient.Client, *om.CachedOMConnectionFactory, error) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) - r, reconcileHelper, err := newShardedClusterReconcilerFromResource(nil, ctx, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, sc, globalMemberClustersMap, kubeClient, omConnectionFactory) + r, reconcileHelper, err := newShardedClusterReconcilerFromResource(t, ctx, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, sc, globalMemberClustersMap, kubeClient, omConnectionFactory) if err != nil { return nil, nil, nil, nil, err } @@ -1767,7 +1767,7 @@ func SingleClusterShardedScalingWithOverridesTestCase(t *testing.T, tc SingleClu for _, scalingStep := range tc.scalingSteps { t.Run(scalingStep.name, func(t *testing.T) { - reconciler, reconcilerHelper, kubeClient, omConnectionFactory, err := defaultClusterReconciler(ctx, nil, "", "", sc, nil) + reconciler, reconcilerHelper, kubeClient, omConnectionFactory, err := defaultClusterReconciler(t, ctx, nil, "", "", sc, nil) _ = omConnectionFactory.GetConnectionFunc(&om.OMContext{GroupName: om.TestGroupName}) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping diff --git a/scripts/evergreen/unit-tests.sh b/scripts/evergreen/unit-tests.sh index 1c005e8e2..3774d2a70 100755 --- a/scripts/evergreen/unit-tests.sh +++ b/scripts/evergreen/unit-tests.sh @@ -11,10 +11,10 @@ echo "testing $0" rm -f result.suite if [ "$USE_RACE" = "true" ]; then echo "running test with race enabled" - GO_TEST_CMD="go test -v -coverprofile cover.out \$(go list ./... | grep -v \"mongodb-community-operator/test/e2e\")" + GO_TEST_CMD="go test -coverprofile cover.out \$(go list ./... | grep -v \"mongodb-community-operator/test/e2e\")" else echo "running test without race enabled" - GO_TEST_CMD="go test -v -coverprofile cover.out \$(go list ./... | grep -v \"mongodb-community-operator/test/e2e\")" + GO_TEST_CMD="go test -coverprofile cover.out \$(go list ./... | grep -v \"mongodb-community-operator/test/e2e\")" fi echo "running $GO_TEST_CMD" eval "$GO_TEST_CMD" | tee -a result.suite