Skip to content

Commit 168e681

Browse files
authored
Merge pull request #1339 from fluxcd/backport-1338-to-release/v1.4.x
[release/v1.4.x] Fix status reporting for `RetryOnFailure` strategy
2 parents 310cd46 + fdce985 commit 168e681

File tree

2 files changed

+23
-19
lines changed

2 files changed

+23
-19
lines changed

internal/controller/helmrelease_controller_test.go

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -692,12 +692,12 @@ func TestHelmReleaseReconciler_reconcileRelease(t *testing.T) {
692692
ObjectMeta: metav1.ObjectMeta{
693693
Name: "release",
694694
Namespace: "mock",
695-
Generation: 2,
695+
Generation: 1,
696696
},
697697
Spec: v2.HelmReleaseSpec{
698698
// Trigger a failure by setting an invalid storage namespace,
699699
// preventing the release from actually being installed.
700-
// This allows us to just test the failure count reset, without
700+
// This allows us to just test the RetryOnFailure strategy, without
701701
// having to facilitate a full install.
702702
StorageNamespace: "not-exist",
703703
Install: &v2.Install{
@@ -708,11 +708,8 @@ func TestHelmReleaseReconciler_reconcileRelease(t *testing.T) {
708708
},
709709
},
710710
Status: v2.HelmReleaseStatus{
711-
HelmChart: "mock/chart",
712-
InstallFailures: 2,
713-
UpgradeFailures: 3,
714-
Failures: 5,
715-
LastAttemptedGeneration: 2,
711+
HelmChart: "mock/chart",
712+
ObservedGeneration: 1,
716713
},
717714
}
718715

@@ -734,11 +731,17 @@ func TestHelmReleaseReconciler_reconcileRelease(t *testing.T) {
734731
g.Expect(res.RequeueAfter).To(BeNumerically("==", time.Minute))
735732

736733
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
734+
{
735+
Type: "Reconciling",
736+
Status: "True",
737+
Reason: "ProgressingWithRetry",
738+
Message: "retrying after 1m0s",
739+
},
737740
{
738741
Type: "Ready",
739742
Status: "False",
740-
Reason: "RetryAfterInterval",
741-
Message: "Will retry after 1m0s",
743+
Reason: "InstallFailed",
744+
Message: "Helm install failed for release mock/release with chart [email protected]: create: failed to create: namespaces \"not-exist\" not found",
742745
},
743746
{
744747
Type: "Released",

internal/reconcile/atomic_release.go

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,9 @@ func (r *AtomicRelease) Reconcile(ctx context.Context, req *Request) error {
195195
}
196196

197197
// Determine the next action to run based on the current state.
198-
log.V(logger.DebugLevel).Info("determining next Helm action based on current state")
198+
log.V(logger.DebugLevel).Info(
199+
fmt.Sprintf("determining next Helm action based on state: '%s' reason '%s'", state.Status, state.Reason),
200+
)
199201
if next, err = r.actionForState(ctx, req, state); err != nil {
200202
if errors.Is(err, ErrExceededMaxRetries) {
201203
conditions.MarkStalled(req.Object, "RetriesExceeded", "Failed to %s after %d attempt(s)",
@@ -211,6 +213,7 @@ func (r *AtomicRelease) Reconcile(ctx context.Context, req *Request) error {
211213

212214
// If there is no next action, we are done.
213215
if next == nil {
216+
log.V(logger.DebugLevel).Info("no further action to take, atomic release completed")
214217
conditions.Delete(req.Object, meta.ReconcilingCondition)
215218

216219
// Always summarize; this ensures we restore transient errors
@@ -241,9 +244,7 @@ func (r *AtomicRelease) Reconcile(ctx context.Context, req *Request) error {
241244
)
242245

243246
if retry := req.Object.GetActiveRetry(); retry != nil {
244-
conditions.Delete(req.Object, meta.ReconcilingCondition)
245-
conditions.MarkFalse(req.Object, meta.ReadyCondition, "RetryAfterInterval",
246-
"Will retry after %s", retry.GetRetryInterval().String())
247+
conditions.MarkReconciling(req.Object, meta.ProgressingWithRetryReason, "retrying after %s", retry.GetRetryInterval().String())
247248
return ErrRetryAfterInterval
248249
}
249250

@@ -278,11 +279,13 @@ func (r *AtomicRelease) Reconcile(ctx context.Context, req *Request) error {
278279
// Run the action sub-reconciler.
279280
log.Info(fmt.Sprintf("running '%s' action with timeout of %s", next.Name(), timeoutForAction(next, req.Object).String()))
280281
if err = next.Reconcile(ctx, req); err != nil {
282+
log.V(logger.DebugLevel).Info(
283+
fmt.Sprintf("action reconciler %s of type %s returned error: %s", next.Name(), next.Type(), err),
284+
)
285+
281286
if retry := req.Object.GetActiveRetry(); retry != nil {
282287
log.Error(err, fmt.Sprintf("failed to run '%s' action", next.Name()))
283-
conditions.Delete(req.Object, meta.ReconcilingCondition)
284-
conditions.MarkFalse(req.Object, meta.ReadyCondition, "RetryAfterInterval",
285-
"Will retry after %s", retry.GetRetryInterval().String())
288+
conditions.MarkReconciling(req.Object, meta.ProgressingWithRetryReason, "retrying after %s", retry.GetRetryInterval().String())
286289
return ErrRetryAfterInterval
287290
}
288291

@@ -299,9 +302,7 @@ func (r *AtomicRelease) Reconcile(ctx context.Context, req *Request) error {
299302
)
300303

301304
if retry := req.Object.GetActiveRetry(); retry != nil {
302-
conditions.Delete(req.Object, meta.ReconcilingCondition)
303-
conditions.MarkFalse(req.Object, meta.ReadyCondition, "RetryAfterInterval",
304-
"Will retry after %s", retry.GetRetryInterval().String())
305+
conditions.MarkReconciling(req.Object, meta.ProgressingWithRetryReason, "retrying after %s", retry.GetRetryInterval().String())
305306
return ErrRetryAfterInterval
306307
}
307308

0 commit comments

Comments
 (0)