Skip to content
Draft

Ssa #302

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 70 additions & 16 deletions internal/controller/aggregates_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,24 @@ package controller

import (
"context"
"encoding/json"
"errors"
"fmt"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
k8sacmetav1 "k8s.io/client-go/applyconfigurations/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"

"github.com/gophercloud/gophercloud/v2"

kvmv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
apiv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/applyconfigurations/api/v1"
"github.com/cobaltcore-dev/openstack-hypervisor-operator/internal/openstack"
"github.com/cobaltcore-dev/openstack-hypervisor-operator/internal/utils"
)
Expand Down Expand Up @@ -76,17 +80,30 @@ func (ac *AggregatesController) Reconcile(ctx context.Context, req ctrl.Request)
// Apply aggregates to OpenStack and update status
aggregates, err := openstack.ApplyAggregates(ctx, ac.computeClient, hv.Name, desiredAggregateNames)
if err != nil {
// Set error condition with retry on conflict
condition := metav1.Condition{
Type: kvmv1.ConditionTypeAggregatesUpdated,
Status: metav1.ConditionFalse,
Reason: kvmv1.ConditionReasonFailed,
Message: fmt.Errorf("failed to apply aggregates: %w", err).Error(),
// Set error condition, preserving current aggregate ownership
statusCfg := apiv1.HypervisorStatus()
statusCfg.Conditions = utils.ConditionsFromStatus(hv.Status.Conditions)
utils.SetApplyConfigurationStatusCondition(&statusCfg.Conditions,
*k8sacmetav1.Condition().
WithType(kvmv1.ConditionTypeAggregatesUpdated).
WithStatus(metav1.ConditionFalse).
WithReason(kvmv1.ConditionReasonFailed).
WithMessage(fmt.Errorf("failed to apply aggregates: %w", err).Error()))
if len(hv.Status.Aggregates) > 0 {
aggCfgs := make([]apiv1.AggregateApplyConfiguration, len(hv.Status.Aggregates))
for i, agg := range hv.Status.Aggregates {
a := apiv1.Aggregate().WithName(agg.Name).WithUUID(agg.UUID)
if len(agg.Metadata) > 0 {
a.WithMetadata(agg.Metadata)
}
aggCfgs[i] = *a
}
statusCfg.Aggregates = aggCfgs
}

if err2 := utils.PatchHypervisorStatusWithRetry(ctx, ac.Client, hv.Name, AggregatesControllerName, func(h *kvmv1.Hypervisor) {
meta.SetStatusCondition(&h.Status.Conditions, condition)
}); err2 != nil {
if err2 := ac.Status().Apply(ctx,
apiv1.Hypervisor(hv.Name, "").WithStatus(statusCfg),
k8sclient.ForceOwnership, k8sclient.FieldOwner(AggregatesControllerName)); err2 != nil {
return ctrl.Result{}, errors.Join(err, err2)
}
return ctrl.Result{}, err
Expand All @@ -106,15 +123,52 @@ func (ac *AggregatesController) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, nil
}

// Patch status with retry on conflict
err := utils.PatchHypervisorStatusWithRetry(ctx, ac.Client, hv.Name, AggregatesControllerName, func(h *kvmv1.Hypervisor) {
if aggregatesChanged {
h.Status.Aggregates = newAggregates
statusCfg := apiv1.HypervisorStatus()
statusCfg.Conditions = utils.ConditionsFromStatus(hv.Status.Conditions)
utils.SetApplyConfigurationStatusCondition(&statusCfg.Conditions,
*k8sacmetav1.Condition().
WithType(desiredCondition.Type).
WithStatus(desiredCondition.Status).
WithReason(desiredCondition.Reason).
WithMessage(desiredCondition.Message))

if aggregatesChanged && len(newAggregates) > 0 {
aggCfgs := make([]apiv1.AggregateApplyConfiguration, len(newAggregates))
for i, agg := range newAggregates {
a := apiv1.Aggregate().WithName(agg.Name).WithUUID(agg.UUID)
if len(agg.Metadata) > 0 {
a.WithMetadata(agg.Metadata)
}
aggCfgs[i] = *a
}
statusCfg.Aggregates = aggCfgs
}

if err := ac.Status().Apply(ctx,
apiv1.Hypervisor(hv.Name, "").WithStatus(statusCfg),
k8sclient.ForceOwnership, k8sclient.FieldOwner(AggregatesControllerName)); err != nil {
return ctrl.Result{}, err
}
Comment on lines +126 to +151
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major | ⚡ Quick win

🧩 Analysis chain

🏁 Script executed:

cd /tmp && find . -name "aggregates_controller.go" -type f 2>/dev/null

Repository: cobaltcore-dev/openstack-hypervisor-operator

Length of output: 70


🏁 Script executed:

# Search for the aggregates_controller.go file
git ls-files | grep -i aggregates_controller

Repository: cobaltcore-dev/openstack-hypervisor-operator

Length of output: 183


🏁 Script executed:

# Let's get the file path and read the relevant sections
git ls-files | grep controller | head -20

Repository: cobaltcore-dev/openstack-hypervisor-operator

Length of output: 1117


🏁 Script executed:

wc -l internal/controller/aggregates_controller.go

Repository: cobaltcore-dev/openstack-hypervisor-operator

Length of output: 137


🏁 Script executed:

cat -n internal/controller/aggregates_controller.go

Repository: cobaltcore-dev/openstack-hypervisor-operator

Length of output: 12968


Aggregates may be inconsistently omitted from apply when condition alone changes.

When aggregatesChanged == false but conditionUnchanged == false (e.g., the handover phase transitions from Reason=TestAggregates to Reason=WaitingForTraits while aggregate names [zone, testAggregate] remain the same), the success path at lines 126–151 builds and applies a statusCfg with only conditions. The aggregates field is not set because the condition at line 135 (if aggregatesChanged && len(newAggregates) > 0) evaluates to false.

This creates an inconsistency with the error path (lines 92–102), which explicitly preserves hv.Status.Aggregates in the apply even when aggregates haven't changed. Under SSA + ForceOwnership where this controller is the sole owner of aggregate entries, the success path's omission could cause issues. For consistency and robustness, always populate statusCfg.Aggregates from the effective aggregate list (new ones if changed, otherwise current hv.Status.Aggregates) so ownership is maintained throughout reconciliation.

Additionally, the aggregate-building logic at lines 93–100 and 136–143 is duplicated; extracting it as a helper would improve maintainability. The Get() call at line 165 is redundant in the merge-patch flow (line 168 uses the already-fetched hv object).

🤖 Prompt for AI Agents
Verify each finding against current code. Fix only still-valid issues, skip the
rest with a brief reason, keep changes minimal, and validate.

In `@internal/controller/aggregates_controller.go` around lines 126 - 151, The
success apply path omits Aggregates when aggregatesChanged==false causing
ownership loss; always set statusCfg.Aggregates to the effective list by
building aggregate apply configs from newAggregates when aggregatesChanged &&
len(newAggregates)>0, otherwise from hv.Status.Aggregates, then assign that
slice to statusCfg.Aggregates before calling ac.Status().Apply; extract the
duplicated construction logic into a helper (e.g., buildAggregateApplyConfigs
that returns []apiv1.AggregateApplyConfiguration given the input aggregate list
and uses apiv1.Aggregate().WithName(...).WithUUID(...).WithMetadata(...) as
needed) and replace both places that currently duplicate the loop with calls to
that helper; finally remove the redundant Get() in the merge-patch flow (the
already-fetched hv should be used instead of re-calling Get).


// The Aggregates field uses omitempty in the generated apply config, so an
// empty slice cannot be sent via SSA. Use a targeted merge patch to clear it.
if aggregatesChanged && len(newAggregates) == 0 {
patch, err := json.Marshal(map[string]any{
"status": map[string]any{
"aggregates": []kvmv1.Aggregate{},
},
})
if err != nil {
return ctrl.Result{}, err
}
meta.SetStatusCondition(&h.Status.Conditions, desiredCondition)
})
fresh := &kvmv1.Hypervisor{}
if err := ac.Get(ctx, k8sclient.ObjectKey{Name: hv.Name}, fresh); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, ac.Status().Patch(ctx, fresh, k8sclient.RawPatch(types.MergePatchType, patch))
}

return ctrl.Result{}, err
return ctrl.Result{}, nil
}

// determineDesiredState returns the desired aggregates and the corresponding condition
Expand Down
Loading
Loading