Skip to content

Commit 4472c38

Browse files
fix: Preserve user changes to deployment pod templates
Fixes OLM reverting user changes like kubectl rollout restart. OLM no longer stores pod template metadata, allowing user changes o annotations, labels, and other fields to persist. Generate-by: Cursor/Claude
1 parent 10c2841 commit 4472c38

File tree

3 files changed

+403
-0
lines changed

3 files changed

+403
-0
lines changed

internal/operator-controller/applier/boxcutter.go

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
"slices"
1313
"strings"
1414

15+
"github.com/go-logr/logr"
1516
"helm.sh/helm/v3/pkg/release"
1617
"helm.sh/helm/v3/pkg/storage/driver"
1718
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -153,8 +154,53 @@ func (r *SimpleRevisionGenerator) GenerateRevision(
153154
return r.buildClusterExtensionRevision(objs, ext, revisionAnnotations), nil
154155
}
155156

157+
// removePodTemplateMetadata removes annotations from a Deployment's pod template metadata.
158+
// This prevents OLM from claiming ownership of pod template annotations via Server-Side Apply,
159+
// while preserving labels (which are required for selector matching and chart functionality).
160+
func removePodTemplateMetadata(unstr *unstructured.Unstructured, l logr.Logger) {
161+
if unstr.GetKind() != "Deployment" || unstr.GroupVersionKind().Group != "apps" {
162+
return
163+
}
164+
165+
obj := unstr.Object
166+
spec, ok := obj["spec"].(map[string]any)
167+
if !ok {
168+
return
169+
}
170+
171+
template, ok := spec["template"].(map[string]any)
172+
if !ok {
173+
return
174+
}
175+
176+
templateMeta, ok := template["metadata"].(map[string]any)
177+
if !ok {
178+
return
179+
}
180+
181+
// Remove ONLY annotations from pod template metadata.
182+
// Keep labels because they are needed for:
183+
// 1. Deployment selector matching (API server validates this)
184+
// 2. Chart-provided labels that other resources may depend on
185+
// Removing annotations allows users to add custom annotations (like kubectl rollout restart)
186+
// without OLM overwriting them.
187+
if _, hasAnnotations := templateMeta["annotations"]; hasAnnotations {
188+
delete(templateMeta, "annotations")
189+
l.V(1).Info("removed pod template annotations from Deployment to preserve user changes",
190+
"deployment", unstr.GetName())
191+
}
192+
}
193+
156194
// sanitizedUnstructured takes an unstructured obj, removes status if present, and returns a sanitized copy containing only the allowed metadata entries set below.
157195
// If any unallowed entries are removed, a warning will be logged.
196+
//
197+
// For Deployment objects, this function removes pod template annotations (but keeps labels).
198+
// This prevents OLM from overwriting user-added annotations while preserving required labels.
199+
// Examples: "kubectl rollout restart", "kubectl annotate", or any custom annotations users add.
200+
// Labels are kept because: (1) deployment selector must match template labels, and
201+
// (2) chart-provided labels may be referenced by other resources.
202+
// This fixes the issue where user changes to pod template annotations would be undone by OLM.
203+
// See: https://github.com/operator-framework/operator-lifecycle-manager/issues/3392
158204
func sanitizedUnstructured(ctx context.Context, unstr *unstructured.Unstructured) {
159205
l := log.FromContext(ctx)
160206
obj := unstr.Object
@@ -193,6 +239,13 @@ func sanitizedUnstructured(ctx context.Context, unstr *unstructured.Unstructured
193239
l.Info("warning: extraneous values removed from manifest metadata", "allowed metadata", allowedMetadata)
194240
}
195241
obj["metadata"] = metadataSanitized
242+
243+
// For Deployment objects, remove pod template annotations (but keep labels).
244+
// This prevents OLM from overwriting user-added annotations.
245+
// Examples: kubectl rollout restart, kubectl annotate, custom annotations, etc.
246+
// Labels are preserved because the deployment selector must match template labels,
247+
// and chart-provided labels may be needed by other resources.
248+
removePodTemplateMetadata(unstr, l)
196249
}
197250

198251
func (r *SimpleRevisionGenerator) buildClusterExtensionRevision(
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
Feature: Rollout Restart User Changes
2+
3+
# This test checks that OLMv1 does not undo user-added annotations on deployed resources.
4+
# It uses "kubectl rollout restart" as an example, but the fix works for ANY pod template annotations.
5+
#
6+
# Background:
7+
# - In OLMv0, user changes to pod template annotations would be undone by OLM.
8+
# - In OLMv1, OLM should only manage the fields it owns, allowing user changes to stay.
9+
#
10+
# This test makes sure OLMv1 works correctly and does not have the OLMv0 problem.
11+
# See: https://github.com/operator-framework/operator-lifecycle-manager/issues/3392
12+
13+
Background:
14+
Given OLM is available
15+
And ClusterCatalog "test" serves bundles
16+
And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}
17+
18+
# WHAT THIS TEST DOES:
19+
# This test checks that the fix for issue #3392 works correctly.
20+
# The test uses "kubectl rollout restart" as an example, but the fix works for ANY annotations.
21+
#
22+
# THE PROBLEM (now fixed):
23+
# When users added annotations to pod templates, Boxcutter would undo the changes:
24+
# 1. User adds annotation (kubectl rollout restart, kubectl annotate, custom annotations)
25+
# 2. Boxcutter runs and removes the annotation
26+
# 3. User changes are lost
27+
#
28+
# THE FIX:
29+
# We changed the code in applier/boxcutter.go to NOT save pod template annotations.
30+
# This way, OLM won't overwrite user annotations when it runs.
31+
# Now ANY user annotations added to pod templates will stay in place.
32+
# (Labels are kept because they're needed for deployment selector matching.)
33+
#
34+
# UPSTREAM ISSUE: https://github.com/operator-framework/operator-lifecycle-manager/issues/3392
35+
@BoxcutterRuntime
36+
Scenario: User-initiated deployment changes persist after OLM reconciliation
37+
When ClusterExtension is applied
38+
"""
39+
apiVersion: olm.operatorframework.io/v1
40+
kind: ClusterExtension
41+
metadata:
42+
name: ${NAME}
43+
spec:
44+
namespace: ${TEST_NAMESPACE}
45+
serviceAccount:
46+
name: olm-sa
47+
source:
48+
sourceType: Catalog
49+
catalog:
50+
packageName: test
51+
selector:
52+
matchLabels:
53+
"olm.operatorframework.io/metadata.name": test-catalog
54+
"""
55+
Then ClusterExtension is rolled out
56+
And ClusterExtension is available
57+
And resource "deployment/test-operator" is installed
58+
And deployment "test-operator" is ready
59+
60+
# User runs "kubectl rollout restart deployment/test-operator"
61+
# This adds a restart annotation to trigger a rolling restart of pods.
62+
# Note: This test uses restart as an example, but the fix works for ANY pod template annotations.
63+
# In OLMv0, OLM would undo this annotation and stop the rollout.
64+
# In OLMv1, the annotation should stay because kubectl owns it.
65+
When user performs rollout restart on "deployment/test-operator"
66+
67+
# Wait for the rollout to finish - new pods created and running
68+
Then deployment "test-operator" rollout completes successfully
69+
And resource "deployment/test-operator" has restart annotation
70+
71+
# Wait for OLM to run its checks (OLM checks every 10 seconds)
72+
# This is the important part: does OLM undo what the user did?
73+
And I wait for "30" seconds
74+
75+
# After OLM runs, check that the rollout is STILL working
76+
# In OLMv0, this would fail because OLM undoes the annotation
77+
# and the new pods would be stopped and old pods would come back
78+
Then deployment "test-operator" rollout is still successful
79+
And resource "deployment/test-operator" has restart annotation
80+
And deployment "test-operator" has expected number of ready replicas

0 commit comments

Comments
 (0)