Skip to content

Commit

Permalink
unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
ykakarap committed Apr 14, 2023
1 parent 4fe5d0c commit 051ef5d
Show file tree
Hide file tree
Showing 9 changed files with 307 additions and 210 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
s := scope.New(cluster)

defer func() {
if err := r.reconcileConditions(s, cluster, reterr); err != nil {
if err := r.reconcileConditions(ctx, s, cluster, reterr); err != nil {
reterr = kerrors.NewAggregate([]error{reterr, errors.Wrap(err, "failed to reconcile cluster topology conditions")})
return
}
Expand Down
16 changes: 13 additions & 3 deletions internal/controllers/topology/cluster/conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package cluster

import (
"context"
"fmt"
"strings"

Expand All @@ -28,8 +29,8 @@ import (
"sigs.k8s.io/cluster-api/util/conditions"
)

func (r *Reconciler) reconcileConditions(s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error {
return r.reconcileTopologyReconciledCondition(s, cluster, reconcileErr)
func (r *Reconciler) reconcileConditions(ctx context.Context, s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error {
return r.reconcileTopologyReconciledCondition(ctx, s, cluster, reconcileErr)
}

// reconcileTopologyReconciledCondition sets the TopologyReconciled condition on the cluster.
Expand All @@ -42,7 +43,7 @@ func (r *Reconciler) reconcileConditions(s *scope.Scope, cluster *clusterv1.Clus
// - For a managed topology cluster the version upgrade is propagated one component at a time.
// In such a case, since some of the component's spec would be adrift from the topology the
// topology cannot be considered fully reconciled.
func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error {
func (r *Reconciler) reconcileTopologyReconciledCondition(ctx context.Context, s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error {
// If an error occurred during reconciliation set the TopologyReconciled condition to false.
// Add the error message from the reconcile function to the message of the condition.
if reconcileErr != nil {
Expand Down Expand Up @@ -119,6 +120,10 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste
reason = clusterv1.TopologyReconciledMachineDeploymentsUpgradeDeferredReason
}

isAnyMachineDeploymentsUpgrading, err := s.Current.MachineDeployments.IsAnyUpgrading(ctx, r.Client)
if err != nil {
return errors.Wrap(err, "failed to check if any MachineDeployments are upgrading")
}
switch {
case s.UpgradeTracker.ControlPlane.IsProvisioning:
msgBuilder.WriteString(" Control plane is completing initial provisioning")
Expand All @@ -133,6 +138,11 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste
case s.UpgradeTracker.ControlPlane.IsScaling:
msgBuilder.WriteString(" Control plane is reconciling desired replicas")

case isAnyMachineDeploymentsUpgrading:
fmt.Fprintf(msgBuilder, " MachineDeployment(s) %s are upgrading",
computeMachineDeploymentNameList(s.UpgradeTracker.MachineDeployments.UpgradingNames()),
)

case s.Current.MachineDeployments.IsAnyRollingOut():
fmt.Fprintf(msgBuilder, " MachineDeployment(s) %s are rolling out",
computeMachineDeploymentNameList(s.UpgradeTracker.MachineDeployments.RolloutNames()),
Expand Down
108 changes: 43 additions & 65 deletions internal/controllers/topology/cluster/conditions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ import (
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
Expand All @@ -32,11 +35,16 @@ import (
)

func TestReconcileTopologyReconciledCondition(t *testing.T) {
g := NewWithT(t)
scheme := runtime.NewScheme()
g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed())

tests := []struct {
name string
reconcileErr error
s *scope.Scope
cluster *clusterv1.Cluster
machines []*clusterv1.Machine
wantConditionStatus corev1.ConditionStatus
wantConditionReason string
wantConditionMessage string
Expand Down Expand Up @@ -382,7 +390,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) {
wantConditionStatus: corev1.ConditionTrue,
},
{
name: "should set the condition to false is some machine deployments have not picked the new version because other machine deployments are rolling out (not all replicas ready)",
name: "should set the condition to false is some machine deployments have not picked the new version because other machine deployments are upgrading",
reconcileErr: nil,
cluster: &clusterv1.Cluster{},
s: &scope.Scope{
Expand All @@ -404,6 +412,11 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) {
Object: builder.MachineDeployment("ns1", "md0-abc123").
WithReplicas(2).
WithVersion("v1.22.0").
WithSelector(metav1.LabelSelector{
MatchLabels: map[string]string{
clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md0",
},
}).
WithStatus(clusterv1.MachineDeploymentStatus{
// MD is not ready because we don't have 2 updated, ready and available replicas.
Replicas: int32(2),
Expand All @@ -418,67 +431,11 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) {
Object: builder.MachineDeployment("ns1", "md1-abc123").
WithReplicas(2).
WithVersion("v1.21.2").
WithStatus(clusterv1.MachineDeploymentStatus{
Replicas: int32(2),
UpdatedReplicas: int32(2),
ReadyReplicas: int32(2),
AvailableReplicas: int32(2),
UnavailableReplicas: int32(0),
WithSelector(metav1.LabelSelector{
MatchLabels: map[string]string{
clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md1",
},
}).
Build(),
},
},
},
UpgradeTracker: func() *scope.UpgradeTracker {
ut := scope.NewUpgradeTracker()
ut.ControlPlane.PendingUpgrade = false
ut.MachineDeployments.MarkRollingOut("md0-abc123")
ut.MachineDeployments.MarkPendingUpgrade("md1-abc123")
return ut
}(),
HookResponseTracker: scope.NewHookResponseTracker(),
},
wantConditionStatus: corev1.ConditionFalse,
wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason,
wantConditionMessage: "MachineDeployment(s) md1-abc123 upgrade to version v1.22.0 on hold. MachineDeployment(s) md0-abc123 are rolling out",
},
{
name: "should set the condition to false if some machine deployments have not picked the new version because other machine deployments are rolling out (unavailable replica)",
reconcileErr: nil,
cluster: &clusterv1.Cluster{},
s: &scope.Scope{
Blueprint: &scope.ClusterBlueprint{
Topology: &clusterv1.Topology{
Version: "v1.22.0",
},
},
Current: &scope.ClusterState{
Cluster: &clusterv1.Cluster{},
ControlPlane: &scope.ControlPlaneState{
Object: builder.ControlPlane("ns1", "controlplane1").
WithVersion("v1.22.0").
WithReplicas(3).
Build(),
},
MachineDeployments: scope.MachineDeploymentsStateMap{
"md0": &scope.MachineDeploymentState{
Object: builder.MachineDeployment("ns1", "md0-abc123").
WithReplicas(2).
WithVersion("v1.22.0").
WithStatus(clusterv1.MachineDeploymentStatus{
// MD is not ready because we still have an unavailable replica.
Replicas: int32(2),
UpdatedReplicas: int32(2),
ReadyReplicas: int32(2),
AvailableReplicas: int32(2),
UnavailableReplicas: int32(1),
}).
Build(),
},
"md1": &scope.MachineDeploymentState{
Object: builder.MachineDeployment("ns1", "md1-abc123").
WithReplicas(2).
WithVersion("v1.21.2").
WithStatus(clusterv1.MachineDeploymentStatus{
Replicas: int32(2),
UpdatedReplicas: int32(2),
Expand All @@ -493,15 +450,25 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) {
UpgradeTracker: func() *scope.UpgradeTracker {
ut := scope.NewUpgradeTracker()
ut.ControlPlane.PendingUpgrade = false
ut.MachineDeployments.MarkRollingOut("md0-abc123")
ut.MachineDeployments.MarkUpgrading("md0-abc123")
ut.MachineDeployments.MarkPendingUpgrade("md1-abc123")
return ut
}(),
HookResponseTracker: scope.NewHookResponseTracker(),
},
machines: []*clusterv1.Machine{
builder.Machine("ns1", "md0-machine0").
WithLabels(map[string]string{clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md0"}).
WithVersion("v1.21.2"). // Machine's version does not match MachineDeployment's version
Build(),
builder.Machine("ns1", "md1-machine0").
WithLabels(map[string]string{clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md1"}).
WithVersion("v1.21.2").
Build(),
},
wantConditionStatus: corev1.ConditionFalse,
wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason,
wantConditionMessage: "MachineDeployment(s) md1-abc123 upgrade to version v1.22.0 on hold. MachineDeployment(s) md0-abc123 are rolling out",
wantConditionMessage: "MachineDeployment(s) md1-abc123 upgrade to version v1.22.0 on hold. MachineDeployment(s) md0-abc123 are upgrading",
},
{
name: "should set the condition to false if some machine deployments have not picked the new version because their upgrade has been deferred",
Expand Down Expand Up @@ -624,8 +591,19 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

r := &Reconciler{}
err := r.reconcileTopologyReconciledCondition(tt.s, tt.cluster, tt.reconcileErr)
objs := []client.Object{}
if tt.s != nil && tt.s.Current != nil {
for _, mds := range tt.s.Current.MachineDeployments {
objs = append(objs, mds.Object)
}
}
for _, m := range tt.machines {
objs = append(objs, m)
}
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()

r := &Reconciler{Client: fakeClient}
err := r.reconcileTopologyReconciledCondition(ctx, tt.s, tt.cluster, tt.reconcileErr)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
} else {
Expand Down
26 changes: 16 additions & 10 deletions internal/controllers/topology/cluster/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -523,18 +523,24 @@ func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredRe

// computeMachineDeployments computes the desired state of the list of MachineDeployments.
func (r *Reconciler) computeMachineDeployments(ctx context.Context, s *scope.Scope, desiredControlPlaneState *scope.ControlPlaneState) (scope.MachineDeploymentsStateMap, error) {
// Mark all the machine deployments that are currently rolling out.
// This captured information will be used for
// - Building the TopologyReconciled condition.
// - Making upgrade decisions on machine deployments.
// Mark all the MachineDeployments that are currently upgrading.
// This captured information is used for:
// - Building the TopologyReconciled condition.
// - Making upgrade decisions on machine deployments.
upgradingMDs, err := s.Current.MachineDeployments.Upgrading(ctx, r.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to list upgrading MachineDeployments")
}
s.UpgradeTracker.MachineDeployments.MarkRollingOut(upgradingMDs...)
s.UpgradeTracker.MachineDeployments.MarkUpgrading(upgradingMDs...)

// Mark all MachineDeployments that are currently rolling out.
// This captured information is used for:
// - Building the TopologyReconciled condition (when control plane upgrade is pending)
s.UpgradeTracker.MachineDeployments.MarkRollingOut(s.Current.MachineDeployments.RollingOut()...)

machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap)
for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments {
desiredMachineDeployment, err := r.computeMachineDeployment(ctx, s, desiredControlPlaneState, mdTopology)
desiredMachineDeployment, err := computeMachineDeployment(ctx, s, desiredControlPlaneState, mdTopology)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name)
}
Expand All @@ -546,7 +552,7 @@ func (r *Reconciler) computeMachineDeployments(ctx context.Context, s *scope.Sco
// computeMachineDeployment computes the desired state for a MachineDeploymentTopology.
// The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and
// the machineDeployment class.
func (r *Reconciler) computeMachineDeployment(ctx context.Context, s *scope.Scope, desiredControlPlaneState *scope.ControlPlaneState, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) {
func computeMachineDeployment(_ context.Context, s *scope.Scope, desiredControlPlaneState *scope.ControlPlaneState, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) {
desiredMachineDeployment := &scope.MachineDeploymentState{}

// Gets the blueprint for the MachineDeployment class.
Expand Down Expand Up @@ -618,7 +624,7 @@ func (r *Reconciler) computeMachineDeployment(ctx context.Context, s *scope.Scop
// Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template
infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels)
version, err := r.computeMachineDeploymentVersion(ctx, s, machineDeploymentTopology, desiredControlPlaneState, currentMachineDeployment)
version, err := computeMachineDeploymentVersion(s, machineDeploymentTopology, desiredControlPlaneState, currentMachineDeployment)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute version for %s", machineDeploymentTopology.Name)
}
Expand Down Expand Up @@ -754,7 +760,7 @@ func (r *Reconciler) computeMachineDeployment(ctx context.Context, s *scope.Scop
// Nb: No MachineDeployment upgrades will be triggered while any MachineDeployment is in the middle
// of an upgrade. Even if the number of MachineDeployments that are being upgraded is less
// than the number of allowed concurrent upgrades.
func (r *Reconciler) computeMachineDeploymentVersion(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, desiredControlPlaneState *scope.ControlPlaneState, currentMDState *scope.MachineDeploymentState) (string, error) {
func computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, desiredControlPlaneState *scope.ControlPlaneState, currentMDState *scope.MachineDeploymentState) (string, error) {
desiredVersion := s.Blueprint.Topology.Version
// If creating a new machine deployment, we can pick up the desired version
// Note: We are not blocking the creation of new machine deployments when
Expand Down Expand Up @@ -847,7 +853,7 @@ func (r *Reconciler) computeMachineDeploymentVersion(ctx context.Context, s *sco

// Control plane and machine deployments are stable.
// Ready to pick up the topology version.
s.UpgradeTracker.MachineDeployments.MarkRollingOut(currentMDState.Object.Name)
s.UpgradeTracker.MachineDeployments.MarkUpgrading(currentMDState.Object.Name)
return desiredVersion, nil
}

Expand Down

0 comments on commit 051ef5d

Please sign in to comment.