Skip to content

Commit 0cc5fcf

Browse files
pierDipimatzew
authored andcommitted
TODO: HPA config needs to be removed when scaling to 0
Signed-off-by: Pierangelo Di Pilato <[email protected]>
1 parent 96b29d8 commit 0cc5fcf

File tree

4 files changed

+52
-5
lines changed

4 files changed

+52
-5
lines changed

olm-catalog/serverless-operator/manifests/serverless-operator.clusterserviceversion.yaml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -499,6 +499,22 @@ spec:
499499
rules:
500500
# These are needed to create the various different resources.
501501
# Upstream manifests
502+
- apiGroups:
503+
- eventing.knative.dev
504+
resources:
505+
- brokers
506+
verbs:
507+
- get
508+
- list
509+
- watch
510+
- apiGroups:
511+
- messaging.knative.dev
512+
resources:
513+
- inmemorychannels
514+
verbs:
515+
- get
516+
- list
517+
- watch
502518
- apiGroups:
503519
- ""
504520
resources:

openshift-knative-operator/pkg/eventing/extension.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,8 @@ func (e *extension) Reconcile(ctx context.Context, comp base.KComponent) error {
143143
eventingistio.ScaleIstioController(requiredNs, ke, 1)
144144
}
145145

146+
e.logger.Debugw("resource spec", zap.Any("resource", ke.Spec))
147+
146148
return monitoring.ReconcileMonitoringForEventing(ctx, e.kubeclient, ke)
147149
}
148150

openshift-knative-operator/pkg/eventing/scale.go

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import (
2222
"knative.dev/operator/pkg/apis/operator/base"
2323
operatorv1beta1 "knative.dev/operator/pkg/apis/operator/v1beta1"
2424
knativeeventinginformer "knative.dev/operator/pkg/client/injection/informers/operator/v1beta1/knativeeventing"
25-
kubeclient "knative.dev/pkg/client/injection/kube/client"
2625
"knative.dev/pkg/controller"
26+
"knative.dev/pkg/injection"
2727
"knative.dev/pkg/logging"
2828
"knative.dev/pkg/ptr"
2929
)
@@ -39,6 +39,8 @@ type coreScaler struct {
3939
hasCRDsInstalled atomic.Bool
4040
cancel context.CancelFunc
4141
factory externalversions.SharedInformerFactory
42+
43+
logger *zap.Logger
4244
}
4345

4446
type CoreScalerWrapper struct {
@@ -98,18 +100,22 @@ func newInternalScaler(ctx context.Context, resync cache.ResourceEventHandler) *
98100

99101
logger := logging.FromContext(ctx).With(zap.String("component", "scaler"))
100102

103+
apiExtensionClient, _ := apiextension.NewForConfig(injection.GetConfig(ctx))
104+
101105
s := &coreScaler{
102106
BrokerLister: f.Eventing().V1().Brokers().Lister(),
103107

104108
InMemoryChannelLister: f.Messaging().V1().InMemoryChannels().Lister(),
105109

106-
apiExtensionClient: apiextension.New(kubeclient.Get(ctx).AppsV1().RESTClient()),
110+
apiExtensionClient: apiExtensionClient,
107111

108112
cacheSynced: sync.WaitGroup{},
109113
hasCRDsInstalled: atomic.Bool{},
110114

111115
cancel: cancel,
112116
factory: f,
117+
118+
logger: logger.Desugar(),
113119
}
114120
_, _ = f.Eventing().V1().Brokers().Informer().AddEventHandler(resync)
115121

@@ -121,6 +127,7 @@ func newInternalScaler(ctx context.Context, resync cache.ResourceEventHandler) *
121127
hasCRDsInstalled, err := s.verifyCRDsInstalled(ctx)
122128
logger.Debugw("Waiting for CRDs to be installed", zap.Bool("hasCRDsInstalled", hasCRDsInstalled))
123129
if err != nil {
130+
logger.Debugw("Failed to wait for CRDs to be installed", zap.Error(err))
124131
return false, nil
125132
}
126133
return hasCRDsInstalled, nil
@@ -152,6 +159,7 @@ func (s *coreScaler) scale(ke *operatorv1beta1.KnativeEventing) error {
152159

153160
hasMTChannelBrokers, err := s.hasMTChannelBrokers()
154161
if err != nil {
162+
s.logger.Warn("failed to verify if there are MT Channel Based Brokers", zap.Error(err))
155163
return err
156164
}
157165
if hasMTChannelBrokers {
@@ -166,6 +174,7 @@ func (s *coreScaler) scale(ke *operatorv1beta1.KnativeEventing) error {
166174

167175
hasInMemoryChannels, err := s.hasInMemoryChannels()
168176
if err != nil {
177+
s.logger.Warn("failed to verify if there are in memory channels", zap.Error(err))
169178
return err
170179
}
171180
if hasInMemoryChannels {
@@ -198,11 +207,11 @@ func (s *coreScaler) hasMTChannelBrokers() (bool, error) {
198207
}
199208

200209
func (s *coreScaler) hasInMemoryChannels() (bool, error) {
201-
eventTypes, err := s.InMemoryChannelLister.List(labels.Everything())
210+
imcs, err := s.InMemoryChannelLister.List(labels.Everything())
202211
if err != nil {
203-
return false, fmt.Errorf("failed to list eventtypes: %w", err)
212+
return false, fmt.Errorf("failed to list inmemorychannels: %w", err)
204213
}
205-
return len(eventTypes) > 0, nil
214+
return len(imcs) > 0, nil
206215
}
207216

208217
func (s *coreScaler) ensureAtLeastOneReplica(ke *operatorv1beta1.KnativeEventing, name string) {
@@ -211,6 +220,8 @@ func (s *coreScaler) ensureAtLeastOneReplica(ke *operatorv1beta1.KnativeEventing
211220
replicas = ke.Spec.HighAvailability.Replicas
212221
}
213222

223+
s.logger.Info("Scaling up component", zap.String("name", name), zap.Int32("replicas", *replicas))
224+
214225
for i, w := range ke.Spec.Workloads {
215226
if w.Name == name {
216227
if w.Replicas == nil {
@@ -227,6 +238,8 @@ func (s *coreScaler) ensureAtLeastOneReplica(ke *operatorv1beta1.KnativeEventing
227238
}
228239

229240
func (s *coreScaler) scaleToZero(ke *operatorv1beta1.KnativeEventing, name string) {
241+
s.logger.Info("Scaling down component", zap.String("name", name))
242+
230243
replicas := pointer.Int32(0)
231244
for i, w := range ke.Spec.Workloads {
232245
if w.Name == name {

templates/csv.yaml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -501,6 +501,22 @@ spec:
501501
rules:
502502
# These are needed to create the various different resources.
503503
# Upstream manifests
504+
- apiGroups:
505+
- eventing.knative.dev
506+
resources:
507+
- brokers
508+
verbs:
509+
- get
510+
- list
511+
- watch
512+
- apiGroups:
513+
- messaging.knative.dev
514+
resources:
515+
- inmemorychannels
516+
verbs:
517+
- get
518+
- list
519+
- watch
504520
- apiGroups:
505521
- ""
506522
resources:

0 commit comments

Comments
 (0)