@@ -46,6 +46,7 @@ import (
46
46
corev1 "k8s.io/api/core/v1"
47
47
apiErrors "k8s.io/apimachinery/pkg/api/errors"
48
48
"k8s.io/apimachinery/pkg/types"
49
+
49
50
"sigs.k8s.io/controller-runtime/pkg/client"
50
51
"sigs.k8s.io/controller-runtime/pkg/reconcile"
51
52
@@ -73,6 +74,7 @@ import (
73
74
"github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret"
74
75
"github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset"
75
76
"go.uber.org/zap"
77
+ "k8s.io/utils/ptr"
76
78
)
77
79
78
80
type agentType string
@@ -647,7 +649,7 @@ func (r *ReconcileAppDbReplicaSet) ReconcileAppDB(ctx context.Context, opsManage
647
649
// this doesn't requeue the reconciliation immediately, the calling OM controller
648
650
// requeues after Ops Manager has been fully configured.
649
651
log .Infof ("Requeuing reconciliation to configure Monitoring in Ops Manager." )
650
- // FIXME: use correct MembersOption for scaler
652
+
651
653
return r .updateStatus (ctx , opsManager , workflow .Pending ("Enabling monitoring" ).Requeue (), log , appDbStatusOption , status .AppDBMemberOptions (appDBScalers ... ))
652
654
}
653
655
@@ -990,12 +992,9 @@ func (r *ReconcileAppDbReplicaSet) buildAppDbAutomationConfig(ctx context.Contex
990
992
991
993
}
992
994
993
- // get member options from appDB spec
994
- appDBSpec := opsManager .Spec .AppDB
995
- memberOptions := appDBSpec .GetMemberOptions ()
996
-
997
995
processList := r .generateProcessList (opsManager )
998
- existingAutomationMemberIds , nextId := getExistingAutomationMemberIds (existingAutomationConfig )
996
+ existingAutomationMembers , nextId := getExistingAutomationReplicaSetMembers (existingAutomationConfig )
997
+ memberOptions := r .generateMemberOptions (opsManager , existingAutomationMembers )
999
998
replicasThisReconciliation := 0
1000
999
// we want to use all member clusters to maintain the same process list despite having some clusters down
1001
1000
for _ , memberCluster := range r .getAllMemberClusters () {
@@ -1075,8 +1074,8 @@ func (r *ReconcileAppDbReplicaSet) buildAppDbAutomationConfig(ctx context.Contex
1075
1074
AddModifications (func (automationConfig * automationconfig.AutomationConfig ) {
1076
1075
if len (automationConfig .ReplicaSets ) == 1 {
1077
1076
for idx , member := range automationConfig .ReplicaSets [0 ].Members {
1078
- if existingId , ok := existingAutomationMemberIds [member .Host ]; ok {
1079
- automationConfig .ReplicaSets [0 ].Members [idx ].Id = existingId
1077
+ if existingMember , ok := existingAutomationMembers [member .Host ]; ok {
1078
+ automationConfig .ReplicaSets [0 ].Members [idx ].Id = existingMember . Id
1080
1079
} else {
1081
1080
automationConfig .ReplicaSets [0 ].Members [idx ].Id = nextId
1082
1081
nextId = nextId + 1
@@ -1145,48 +1144,95 @@ func shouldPerformForcedReconfigure(annotations map[string]string) bool {
1145
1144
return false
1146
1145
}
1147
1146
1148
- func getExistingAutomationMemberIds (automationConfig automationconfig.AutomationConfig ) (map [string ]int , int ) {
1147
+ func getExistingAutomationReplicaSetMembers (automationConfig automationconfig.AutomationConfig ) (map [string ]automationconfig. ReplicaSetMember , int ) {
1149
1148
nextId := 0
1150
- existingIds := map [string ]int {}
1149
+ existingMembers := map [string ]automationconfig. ReplicaSetMember {}
1151
1150
if len (automationConfig .ReplicaSets ) != 1 {
1152
- return existingIds , nextId
1151
+ return existingMembers , nextId
1153
1152
}
1154
1153
for _ , member := range automationConfig .ReplicaSets [0 ].Members {
1155
- existingIds [member .Host ] = member . Id
1154
+ existingMembers [member .Host ] = member
1156
1155
if member .Id >= nextId {
1157
1156
nextId = member .Id + 1
1158
1157
}
1159
1158
}
1160
- return existingIds , nextId
1159
+ return existingMembers , nextId
1160
+ }
1161
+
1162
+ func (r * ReconcileAppDbReplicaSet ) generateProcessHostnames (opsManager * omv1.MongoDBOpsManager , memberCluster multicluster.MemberCluster ) []string {
1163
+ members := scale .ReplicasThisReconciliation (scalers .GetAppDBScaler (opsManager , memberCluster .Name , r .getMemberClusterIndex (memberCluster .Name ), r .memberClusters ))
1164
+ var hostnames []string
1165
+ if opsManager .Spec .AppDB .IsMultiCluster () {
1166
+ hostnames = dns .GetMultiClusterProcessHostnames (opsManager .Spec .AppDB .GetName (), opsManager .GetNamespace (), memberCluster .Index , members , opsManager .Spec .GetClusterDomain (), nil )
1167
+ } else {
1168
+ hostnames , _ = dns .GetDNSNames (opsManager .Spec .AppDB .GetName (), opsManager .Spec .AppDB .ServiceName (), opsManager .GetNamespace (), opsManager .Spec .GetClusterDomain (), members , nil )
1169
+ }
1170
+ return hostnames
1161
1171
}
1162
1172
1163
1173
func (r * ReconcileAppDbReplicaSet ) generateProcessList (opsManager * omv1.MongoDBOpsManager ) []automationconfig.Process {
1164
1174
var processList []automationconfig.Process
1165
1175
// We want all clusters to generate stable process list in case of some clusters being down. Process list cannot change regardless of the cluster health.
1166
1176
for _ , memberCluster := range r .getAllMemberClusters () {
1167
- members := scale .ReplicasThisReconciliation (scalers .GetAppDBScaler (opsManager , memberCluster .Name , r .getMemberClusterIndex (memberCluster .Name ), r .memberClusters ))
1168
- var hostnames []string
1169
- if opsManager .Spec .AppDB .IsMultiCluster () {
1170
- hostnames = dns .GetMultiClusterProcessHostnames (opsManager .Spec .AppDB .GetName (), opsManager .GetNamespace (), memberCluster .Index , members , opsManager .Spec .GetClusterDomain (), nil )
1171
- } else {
1172
- hostnames , _ = dns .GetDNSNames (opsManager .Spec .AppDB .GetName (), opsManager .Spec .AppDB .ServiceName (), opsManager .GetNamespace (), opsManager .Spec .GetClusterDomain (), members , nil )
1173
- }
1174
-
1177
+ hostnames := r .generateProcessHostnames (opsManager , memberCluster )
1175
1178
for idx , hostname := range hostnames {
1176
- processList = append ( processList , automationconfig.Process {
1179
+ process := automationconfig.Process {
1177
1180
Name : fmt .Sprintf ("%s-%d" , opsManager .Spec .AppDB .NameForCluster (memberCluster .Index ), idx ),
1178
1181
HostName : hostname ,
1179
- })
1182
+ }
1183
+ processList = append (processList , process )
1180
1184
}
1181
1185
}
1182
1186
return processList
1183
1187
}
1184
1188
1189
+ func (r * ReconcileAppDbReplicaSet ) generateMemberOptions (opsManager * omv1.MongoDBOpsManager , previousMembers map [string ]automationconfig.ReplicaSetMember ) []automationconfig.MemberOptions {
1190
+ var memberOptionsList []automationconfig.MemberOptions
1191
+ for _ , memberCluster := range r .getAllMemberClusters () {
1192
+ hostnames := r .generateProcessHostnames (opsManager , memberCluster )
1193
+ memberConfig := make ([]automationconfig.MemberOptions , 0 )
1194
+ if memberCluster .Active {
1195
+ memberConfigForCluster := opsManager .Spec .AppDB .GetMemberClusterSpecByName (memberCluster .Name ).MemberConfig
1196
+ if memberConfigForCluster != nil {
1197
+ memberConfig = append (memberConfig , memberConfigForCluster ... )
1198
+ }
1199
+ }
1200
+ for idx , hostname := range hostnames {
1201
+ memberOptions := automationconfig.MemberOptions {}
1202
+ if idx < len (memberConfig ) { // There are member options configured in the spec
1203
+ memberOptions .Votes = memberConfig [idx ].Votes
1204
+ memberOptions .Priority = memberConfig [idx ].Priority
1205
+ memberOptions .Tags = memberConfig [idx ].Tags
1206
+ } else {
1207
+ // There are three cases we might not have memberOptions in spec:
1208
+ // 1. user never specified member config in the spec
1209
+ // 2. user scaled down members e.g. from 5 to 2 removing memberConfig elements at the same time
1210
+ // 3. user removed whole clusterSpecItem from the list (removing cluster entirely)
1211
+ // For 2. and 3. we should have those members in existing AC
1212
+ if replicaSetMember , ok := previousMembers [hostname ]; ok {
1213
+ memberOptions .Votes = replicaSetMember .Votes
1214
+ if replicaSetMember .Priority != nil {
1215
+ memberOptions .Priority = ptr .To (fmt .Sprintf ("%f" , * replicaSetMember .Priority ))
1216
+ }
1217
+ memberOptions .Tags = replicaSetMember .Tags
1218
+
1219
+ } else {
1220
+ // If the member does not exist in the previous automation config, we populate the member options with defaults
1221
+ memberOptions .Votes = ptr .To (1 )
1222
+ memberOptions .Priority = ptr .To ("1.0" )
1223
+ }
1224
+ }
1225
+ memberOptionsList = append (memberOptionsList , memberOptions )
1226
+ }
1227
+
1228
+ }
1229
+ return memberOptionsList
1230
+ }
1231
+
1185
1232
func (r * ReconcileAppDbReplicaSet ) generateHeadlessHostnamesForMonitoring (opsManager * omv1.MongoDBOpsManager ) []string {
1186
1233
var hostnames []string
1187
1234
// We want all clusters to generate stable process list in case of some clusters being down. Process list cannot change regardless of the cluster health.
1188
1235
for _ , memberCluster := range r .getAllMemberClusters () {
1189
- // TODO for now scaling is disabled - we create all desired processes
1190
1236
members := scale .ReplicasThisReconciliation (scalers .GetAppDBScaler (opsManager , memberCluster .Name , r .getMemberClusterIndex (memberCluster .Name ), r .memberClusters ))
1191
1237
if opsManager .Spec .AppDB .IsMultiCluster () {
1192
1238
hostnames = append (hostnames , dns .GetMultiClusterHostnamesForMonitoring (opsManager .Spec .AppDB .GetName (), opsManager .GetNamespace (), memberCluster .Index , members )... )
0 commit comments