From b64eb54ca84295897417b9f10f56da4c10769409 Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Fri, 24 Oct 2025 16:09:08 +0200 Subject: [PATCH 1/4] feat: Optimize SLO performance with subquery-based calculations Introduce subquery-based calculations for improved performance while maintaining ~99% accuracy. This adds a new PerformanceOverAccuracy option that, when enabled, uses Prometheus subqueries with 5-minute step intervals to reduce query complexity and improve performance. Changes: - Add PerformanceOverAccuracy field to ServiceLevelObjective spec - Implement subquery expression handling in PromQL replacer - Add LessAccurate field to track optimization state in SLO objects This optimization sacrifices minimal accuracy for significant performance gains in resource-intensive SLO calculations. Closes #1440 --- .../v1alpha1/servicelevelobjective_types.go | 4 + slo/promql.go | 4 + slo/promql_test.go | 15 + slo/rules.go | 814 +++++++++++------- slo/rules_test.go | 83 +- slo/slo.go | 2 + 6 files changed, 596 insertions(+), 326 deletions(-) diff --git a/kubernetes/api/v1alpha1/servicelevelobjective_types.go b/kubernetes/api/v1alpha1/servicelevelobjective_types.go index fd17babd0..7c4e502a1 100644 --- a/kubernetes/api/v1alpha1/servicelevelobjective_types.go +++ b/kubernetes/api/v1alpha1/servicelevelobjective_types.go @@ -99,6 +99,10 @@ type ServiceLevelObjectiveSpec struct { // be ignored by Prometheus instances. // More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response PartialResponseStrategy string `json:"partial_response_strategy,omitempty"` + + // +optional + // +kubebuilder:default:=false + PerformanceOverAccuracy bool `json:"performance_over_accuracy,omitempty"` } // ServiceLevelIndicator defines the underlying indicator that is a Prometheus metric. diff --git a/slo/promql.go b/slo/promql.go index b54b1f75a..e0fc1734f 100644 --- a/slo/promql.go +++ b/slo/promql.go @@ -589,6 +589,10 @@ func (r objectiveReplacer) replace(node parser.Node) { n.Range = r.window } r.replace(n.VectorSelector) + case *parser.SubqueryExpr: + n.Range = r.window + n.Step = 5 * time.Minute + r.replace(n.Expr) case *parser.VectorSelector: if n.Name == "errorMetric" { n.Name = r.errorMetric diff --git a/slo/promql_test.go b/slo/promql_test.go index 4e59d7e8f..3e61a02cd 100644 --- a/slo/promql_test.go +++ b/slo/promql_test.go @@ -42,6 +42,11 @@ var ( }, } } + objectiveHTTPRatioGroupingLessAccurate = func() Objective { + o := objectiveHTTPRatio() + o.LessAccurate = true + return o + } objectiveHTTPRatioGrouping = func() Objective { o := objectiveHTTPRatio() o.Indicator.Ratio.Grouping = []string{"job", "handler"} @@ -98,6 +103,11 @@ var ( o.Indicator.Ratio.Grouping = []string{"job", "handler"} return o } + objectiveGRPCRatioGroupingLessAccuracy = func() Objective { + o := objectiveGRPCRatioGrouping() + o.LessAccurate = true + return o + } objectiveHTTPLatency = func() Objective { return Objective{ Labels: labels.FromStrings(labels.MetricName, "monitoring-http-latency"), @@ -168,6 +178,11 @@ var ( o.Indicator.Latency.Total.LabelMatchers = append(o.Indicator.Latency.Total.LabelMatchers, matcher) return o } + objectiveHTTPLatencyGroupingRegexLessAccuracy = func() Objective { + o := objectiveHTTPLatencyGroupingRegex() + o.LessAccurate = true + return o + } objectiveGRPCLatency = func() Objective { return Objective{ Labels: labels.FromStrings(labels.MetricName, "monitoring-grpc-latency"), diff --git a/slo/rules.go b/slo/rules.go index 85bc66dd3..12f74524c 100644 --- a/slo/rules.go +++ b/slo/rules.go @@ -619,67 +619,178 @@ func (o Objective) commonRuleAnnotations() map[string]string { return annotations } -func (o Objective) IncreaseRules() (monitoringv1.RuleGroup, error) { - sloName := o.Labels.Get(labels.MetricName) +func (o Objective) countExpr() (parser.Expr, error) { // Returns a new instance of Expr with this query each time called + return parser.ParseExpr(`sum by (grouping) (count_over_time(metric{matchers="total"}[1s]))`) +} - countExpr := func() (parser.Expr, error) { // Returns a new instance of Expr with this query each time called - return parser.ParseExpr(`sum by (grouping) (count_over_time(metric{matchers="total"}[1s]))`) - } +func (o Objective) sumExpr() (parser.Expr, error) { // Returns a new instance of Expr with this query each time called + return parser.ParseExpr(`sum by (grouping) (sum_over_time(metric{matchers="total"}[1s]))`) +} - sumExpr := func() (parser.Expr, error) { // Returns a new instance of Expr with this query each time called - return parser.ParseExpr(`sum by (grouping) (sum_over_time(metric{matchers="total"}[1s]))`) - } +func (o Objective) increaseExpr() (parser.Expr, error) { // Returns a new instance of Expr with this query each time called + return parser.ParseExpr(`sum by (grouping) (increase(metric{matchers="total"}[1s]))`) +} - increaseExpr := func() (parser.Expr, error) { // Returns a new instance of Expr with this query each time called - return parser.ParseExpr(`sum by (grouping) (increase(metric{matchers="total"}[1s]))`) - } +func (o Objective) increaseSubqueryExpr() (parser.Expr, error) { // Returns a new instance of Expr with this query each time called + return parser.ParseExpr(`sum by (grouping) (sum_over_time(metric{matchers="total"}[1s:2s]))`) +} - absentExpr := func() (parser.Expr, error) { - return parser.ParseExpr(`absent(metric{matchers="total"}) == 1`) - } +func (o Objective) absentExpr() (parser.Expr, error) { + return parser.ParseExpr(`absent(metric{matchers="total"}) == 1`) +} + +func (o Objective) IncreaseRules() (monitoringv1.RuleGroup, error) { + sloName := o.Labels.Get(labels.MetricName) var rules []monitoringv1.Rule switch o.IndicatorType() { + case Unknown: + return monitoringv1.RuleGroup{}, nil case Ratio: - ruleLabels := o.commonRuleLabels(sloName) - for _, m := range o.Indicator.Ratio.Total.LabelMatchers { - if m.Type == labels.MatchEqual && m.Name != labels.MetricName { - ruleLabels[m.Name] = m.Value - } + rulesRatio, err := o.increaseRulesRatio(sloName) + if err != nil { + return monitoringv1.RuleGroup{}, err } - - groupingMap := map[string]struct{}{} - for _, s := range o.Indicator.Ratio.Grouping { - groupingMap[s] = struct{}{} + rules = append(rules, rulesRatio...) + case Latency: + rulesLatency, err := o.increaseRuleLatency(sloName) + if err != nil { + return monitoringv1.RuleGroup{}, err } - for _, s := range groupingLabels( - o.Indicator.Ratio.Errors.LabelMatchers, - o.Indicator.Ratio.Total.LabelMatchers, - ) { - groupingMap[s] = struct{}{} + rules = append(rules, rulesLatency...) + case LatencyNative: + rulesLatencyNative, err := o.increaseRuleLatencyNative(sloName) + if err != nil { + return monitoringv1.RuleGroup{}, err } - for _, m := range o.Indicator.Ratio.Total.LabelMatchers { - if m.Type == labels.MatchRegexp || m.Type == labels.MatchNotRegexp { - groupingMap[m.Name] = struct{}{} - } + rules = append(rules, rulesLatencyNative...) + case BoolGauge: + rulesBoolGauge, err := o.increaseRuleBoolGauge(sloName) + if err != nil { + return monitoringv1.RuleGroup{}, err } - // Delete labels that are grouped, as their value is part of the recording rule anyway - for g := range groupingMap { - delete(ruleLabels, g) + rules = append(rules, rulesBoolGauge...) + } + + day := 24 * time.Hour + + var interval model.Duration + window := time.Duration(o.Window) + + // TODO: Make this a function with an equation + if window < 7*day { + interval = model.Duration(30 * time.Second) + } else if window < 14*day { + interval = model.Duration(60 * time.Second) + } else if window < 21*day { + interval = model.Duration(90 * time.Second) + } else if window < 28*day { + interval = model.Duration(120 * time.Second) + } else if window < 35*day { + interval = model.Duration(150 * time.Second) + } else if window < 42*day { + interval = model.Duration(180 * time.Second) + } else if window < 49*day { + interval = model.Duration(210 * time.Second) + } else { // 8w + interval = model.Duration(240 * time.Second) + } + + return monitoringv1.RuleGroup{ + Name: sloName + "-increase", + Interval: monitoringDuration(interval.String()), + Rules: rules, + }, nil +} + +func (o Objective) increaseRulesRatio(sloName string) ([]monitoringv1.Rule, error) { + var rules []monitoringv1.Rule + + ruleLabels := o.commonRuleLabels(sloName) + for _, m := range o.Indicator.Ratio.Total.LabelMatchers { + if m.Type == labels.MatchEqual && m.Name != labels.MetricName { + ruleLabels[m.Name] = m.Value } + } - grouping := make([]string, 0, len(groupingMap)) - for s := range groupingMap { - grouping = append(grouping, s) + groupingMap := map[string]struct{}{} + for _, s := range o.Indicator.Ratio.Grouping { + groupingMap[s] = struct{}{} + } + for _, s := range groupingLabels( + o.Indicator.Ratio.Errors.LabelMatchers, + o.Indicator.Ratio.Total.LabelMatchers, + ) { + groupingMap[s] = struct{}{} + } + for _, m := range o.Indicator.Ratio.Total.LabelMatchers { + if m.Type == labels.MatchRegexp || m.Type == labels.MatchNotRegexp { + groupingMap[m.Name] = struct{}{} } - sort.Strings(grouping) + } + // Delete labels that are grouped, as their value is part of the recording rule anyway + for g := range groupingMap { + delete(ruleLabels, g) + } + + grouping := make([]string, 0, len(groupingMap)) + for s := range groupingMap { + grouping = append(grouping, s) + } + sort.Strings(grouping) + + expr, err := o.increaseExpr() + if err != nil { + return rules, err + } + + if o.LessAccurate { + objectiveReplacer{ + metric: o.Indicator.Ratio.Total.Name, + matchers: o.Indicator.Ratio.Total.LabelMatchers, + grouping: grouping, + window: 5 * time.Minute, + }.replace(expr) - expr, err := increaseExpr() + subExpr, err := o.increaseSubqueryExpr() if err != nil { - return monitoringv1.RuleGroup{}, err + return rules, err + } + + subqueryName := increaseName(o.Indicator.Ratio.Total.Name, model.Duration(5*time.Minute)) + subqueryLabelMatchers := make([]*labels.Matcher, 0, len(o.Indicator.Ratio.Total.LabelMatchers)) + for _, m := range o.Indicator.Ratio.Total.LabelMatchers { + value := m.Value + if m.Name == labels.MetricName { + value = subqueryName + } + subqueryLabelMatchers = append(subqueryLabelMatchers, &labels.Matcher{ + Type: m.Type, + Name: m.Name, + Value: value, + }) } + objectiveReplacer{ + metric: subqueryName, + matchers: subqueryLabelMatchers, + grouping: grouping, + window: time.Duration(o.Window), + }.replace(subExpr) + + rules = append(rules, + monitoringv1.Rule{ + Record: subqueryName, + Expr: intstr.FromString(expr.String()), + Labels: ruleLabels, + }, monitoringv1.Rule{ + Record: increaseName(o.Indicator.Ratio.Total.Name, o.Window), + Expr: intstr.FromString(subExpr.String()), + Labels: ruleLabels, + }, + ) + } else { objectiveReplacer{ metric: o.Indicator.Ratio.Total.Name, matchers: o.Indicator.Ratio.Total.LabelMatchers, @@ -692,24 +803,65 @@ func (o Objective) IncreaseRules() (monitoringv1.RuleGroup, error) { Expr: intstr.FromString(expr.String()), Labels: ruleLabels, }) + } - alertLabels := make(map[string]string, len(ruleLabels)+1) - for k, v := range ruleLabels { - alertLabels[k] = v + alertLabels := make(map[string]string, len(ruleLabels)+1) + for k, v := range ruleLabels { + alertLabels[k] = v + } + // Add severity label for alerts + alertLabels["severity"] = string(critical) + + // add the absent alert if configured + if o.Alerting.Absent { + expr, err = o.absentExpr() + if err != nil { + return rules, err } - // Add severity label for alerts - alertLabels["severity"] = string(critical) + + objectiveReplacer{ + metric: o.Indicator.Ratio.Total.Name, + matchers: o.Indicator.Ratio.Total.LabelMatchers, + }.replace(expr) + + rules = append(rules, monitoringv1.Rule{ + Alert: o.AlertNameAbsent(), + Expr: intstr.FromString(expr.String()), + For: monitoringDuration(o.AbsentDuration().String()), + Labels: alertLabels, + Annotations: o.commonRuleAnnotations(), + }) + } + + if o.Indicator.Ratio.Total.Name != o.Indicator.Ratio.Errors.Name { + expr, err := o.increaseExpr() + if err != nil { + return rules, err + } + + objectiveReplacer{ + metric: o.Indicator.Ratio.Errors.Name, + matchers: o.Indicator.Ratio.Errors.LabelMatchers, + grouping: grouping, + window: time.Duration(o.Window), + }.replace(expr) + + rules = append(rules, monitoringv1.Rule{ + Record: increaseName(o.Indicator.Ratio.Errors.Name, o.Window), + Expr: intstr.FromString(expr.String()), + Labels: ruleLabels, + }) // add the absent alert if configured if o.Alerting.Absent { - expr, err = absentExpr() + expr, err = o.absentExpr() if err != nil { - return monitoringv1.RuleGroup{}, err + return rules, err } objectiveReplacer{ - metric: o.Indicator.Ratio.Total.Name, - matchers: o.Indicator.Ratio.Total.LabelMatchers, + metric: o.Indicator.Ratio.Errors.Name, + matchers: o.Indicator.Ratio.Errors.LabelMatchers, }.replace(expr) rules = append(rules, monitoringv1.Rule{ @@ -720,89 +872,124 @@ func (o Objective) IncreaseRules() (monitoringv1.RuleGroup, error) { Annotations: o.commonRuleAnnotations(), }) } + } - if o.Indicator.Ratio.Total.Name != o.Indicator.Ratio.Errors.Name { - expr, err := increaseExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err - } - - objectiveReplacer{ - metric: o.Indicator.Ratio.Errors.Name, - matchers: o.Indicator.Ratio.Errors.LabelMatchers, - grouping: grouping, - window: time.Duration(o.Window), - }.replace(expr) - - rules = append(rules, monitoringv1.Rule{ - Record: increaseName(o.Indicator.Ratio.Errors.Name, o.Window), - Expr: intstr.FromString(expr.String()), - Labels: ruleLabels, - }) + return rules, nil +} - // add the absent alert if configured - if o.Alerting.Absent { - expr, err = absentExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err - } +func (o Objective) increaseRuleLatency(sloName string) ([]monitoringv1.Rule, error) { + var rules []monitoringv1.Rule - objectiveReplacer{ - metric: o.Indicator.Ratio.Errors.Name, - matchers: o.Indicator.Ratio.Errors.LabelMatchers, - }.replace(expr) - - rules = append(rules, monitoringv1.Rule{ - Alert: o.AlertNameAbsent(), - Expr: intstr.FromString(expr.String()), - For: monitoringDuration(o.AbsentDuration().String()), - Labels: alertLabels, - Annotations: o.commonRuleAnnotations(), - }) - } + ruleLabels := o.commonRuleLabels(sloName) + for _, m := range o.Indicator.Latency.Total.LabelMatchers { + if m.Type == labels.MatchEqual && m.Name != labels.MetricName { + ruleLabels[m.Name] = m.Value } - case Latency: - ruleLabels := o.commonRuleLabels(sloName) - for _, m := range o.Indicator.Latency.Total.LabelMatchers { - if m.Type == labels.MatchEqual && m.Name != labels.MetricName { - ruleLabels[m.Name] = m.Value - } + } + + groupingMap := map[string]struct{}{} + for _, s := range o.Indicator.Latency.Grouping { + groupingMap[s] = struct{}{} + } + for _, s := range groupingLabels( + o.Indicator.Latency.Success.LabelMatchers, + o.Indicator.Latency.Total.LabelMatchers, + ) { + groupingMap[s] = struct{}{} + } + for _, m := range o.Indicator.Latency.Total.LabelMatchers { + if m.Type == labels.MatchRegexp || m.Type == labels.MatchNotRegexp { + groupingMap[m.Name] = struct{}{} } + } + // Delete labels that are grouped, as their value is part of the recording rule anyway + for g := range groupingMap { + delete(ruleLabels, g) + } - groupingMap := map[string]struct{}{} - for _, s := range o.Indicator.Latency.Grouping { - groupingMap[s] = struct{}{} + grouping := make([]string, 0, len(groupingMap)) + for s := range groupingMap { + grouping = append(grouping, s) + } + sort.Strings(grouping) + + window := time.Duration(o.Window) + if o.LessAccurate { + window = 5 * time.Minute + } + + expr, err := o.increaseExpr() + if err != nil { + return rules, err + } + + objectiveReplacer{ + metric: o.Indicator.Latency.Total.Name, + matchers: o.Indicator.Latency.Total.LabelMatchers, + grouping: grouping, + window: window, + }.replace(expr) + + rules = append(rules, monitoringv1.Rule{ + Record: increaseName(o.Indicator.Latency.Total.Name, model.Duration(window)), + Expr: intstr.FromString(expr.String()), + Labels: ruleLabels, + }) + + expr, err = o.increaseExpr() + if err != nil { + return rules, err + } + + objectiveReplacer{ + metric: o.Indicator.Latency.Success.Name, + matchers: o.Indicator.Latency.Success.LabelMatchers, + grouping: grouping, + window: window, + }.replace(expr) + + var le string + for _, m := range o.Indicator.Latency.Success.LabelMatchers { + if m.Name == "le" { + le = m.Value + break } - for _, s := range groupingLabels( - o.Indicator.Latency.Success.LabelMatchers, - o.Indicator.Latency.Total.LabelMatchers, - ) { - groupingMap[s] = struct{}{} + } + ruleLabelsLe := map[string]string{"le": le} + for k, v := range ruleLabels { + ruleLabelsLe[k] = v + } + + rules = append(rules, monitoringv1.Rule{ + Record: increaseName(o.Indicator.Latency.Success.Name, model.Duration(window)), + Expr: intstr.FromString(expr.String()), + Labels: ruleLabelsLe, + }) + + if o.LessAccurate { + expr, err := o.increaseSubqueryExpr() + if err != nil { + return rules, err } + + subqueryMetricName := increaseName(o.Indicator.Latency.Total.Name, model.Duration(5*time.Minute)) + matchers := make([]*labels.Matcher, 0, len(o.Indicator.Latency.Total.LabelMatchers)) for _, m := range o.Indicator.Latency.Total.LabelMatchers { - if m.Type == labels.MatchRegexp || m.Type == labels.MatchNotRegexp { - groupingMap[m.Name] = struct{}{} + value := m.Value + if m.Name == labels.MetricName { + value = subqueryMetricName } - } - // Delete labels that are grouped, as their value is part of the recording rule anyway - for g := range groupingMap { - delete(ruleLabels, g) - } - - grouping := make([]string, 0, len(groupingMap)) - for s := range groupingMap { - grouping = append(grouping, s) - } - sort.Strings(grouping) - expr, err := increaseExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err + matchers = append(matchers, &labels.Matcher{ + Type: m.Type, + Name: m.Name, + Value: value, + }) } objectiveReplacer{ - metric: o.Indicator.Latency.Total.Name, - matchers: o.Indicator.Latency.Total.LabelMatchers, + metric: subqueryMetricName, + matchers: matchers, grouping: grouping, window: time.Duration(o.Window), }.replace(expr) @@ -813,260 +1000,247 @@ func (o Objective) IncreaseRules() (monitoringv1.RuleGroup, error) { Labels: ruleLabels, }) - expr, err = increaseExpr() + expr, err = o.increaseSubqueryExpr() if err != nil { - return monitoringv1.RuleGroup{}, err + return rules, err + } + + subqueryMetricName = increaseName(o.Indicator.Latency.Success.Name, model.Duration(5*time.Minute)) + matchers = make([]*labels.Matcher, 0, len(o.Indicator.Latency.Success.LabelMatchers)) + for _, m := range o.Indicator.Latency.Success.LabelMatchers { + value := m.Value + if m.Name == labels.MetricName { + value = subqueryMetricName + } + + matchers = append(matchers, &labels.Matcher{ + Type: m.Type, + Name: m.Name, + Value: value, + }) } objectiveReplacer{ - metric: o.Indicator.Latency.Success.Name, - matchers: o.Indicator.Latency.Success.LabelMatchers, + metric: subqueryMetricName, + matchers: matchers, grouping: grouping, window: time.Duration(o.Window), }.replace(expr) - var le string - for _, m := range o.Indicator.Latency.Success.LabelMatchers { - if m.Name == "le" { - le = m.Value - break - } - } - ruleLabelsLe := map[string]string{"le": le} - for k, v := range ruleLabels { - ruleLabelsLe[k] = v - } - rules = append(rules, monitoringv1.Rule{ Record: increaseName(o.Indicator.Latency.Success.Name, o.Window), Expr: intstr.FromString(expr.String()), Labels: ruleLabelsLe, }) + } - // add the absent alert if configured - if o.Alerting.Absent { - expr, err = absentExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err - } - - objectiveReplacer{ - metric: o.Indicator.Latency.Total.Name, - matchers: o.Indicator.Latency.Total.LabelMatchers, - }.replace(expr) - - alertLabels := make(map[string]string, len(ruleLabels)+1) - for k, v := range ruleLabels { - alertLabels[k] = v - } - // Add severity label for alerts - alertLabels["severity"] = string(critical) - - rules = append(rules, monitoringv1.Rule{ - Alert: o.AlertNameAbsent(), - Expr: intstr.FromString(expr.String()), - For: monitoringDuration(o.AbsentDuration().String()), - Labels: alertLabels, - Annotations: o.commonRuleAnnotations(), - }) - - expr, err = absentExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err - } - - objectiveReplacer{ - metric: o.Indicator.Latency.Success.Name, - matchers: o.Indicator.Latency.Success.LabelMatchers, - }.replace(expr) - - alertLabelsLe := make(map[string]string, len(ruleLabelsLe)+1) - for k, v := range ruleLabelsLe { - alertLabelsLe[k] = v - } - // Add severity label for alerts - alertLabelsLe["severity"] = string(critical) - - rules = append(rules, monitoringv1.Rule{ - Alert: o.AlertNameAbsent(), - Expr: intstr.FromString(expr.String()), - For: monitoringDuration(o.AbsentDuration().String()), - Labels: alertLabelsLe, - Annotations: o.commonRuleAnnotations(), - }) - } - case LatencyNative: - ruleLabels := o.commonRuleLabels(sloName) - for _, m := range o.Indicator.LatencyNative.Total.LabelMatchers { - if m.Type == labels.MatchEqual && m.Name != labels.MetricName { - ruleLabels[m.Name] = m.Value - } - } - - expr, err := parser.ParseExpr(`histogram_count(sum by (grouping) (increase(metric{matchers="total"}[1s])))`) + // add the absent alert if configured + if o.Alerting.Absent { + expr, err = o.absentExpr() if err != nil { - return monitoringv1.RuleGroup{}, err + return rules, err } objectiveReplacer{ - metric: o.Indicator.LatencyNative.Total.Name, - matchers: slices.Clone(o.Indicator.LatencyNative.Total.LabelMatchers), - grouping: slices.Clone(o.Indicator.LatencyNative.Grouping), - window: time.Duration(o.Window), + metric: o.Indicator.Latency.Total.Name, + matchers: o.Indicator.Latency.Total.LabelMatchers, }.replace(expr) + alertLabels := make(map[string]string, len(ruleLabels)+1) + for k, v := range ruleLabels { + alertLabels[k] = v + } + // Add severity label for alerts + alertLabels["severity"] = string(critical) + rules = append(rules, monitoringv1.Rule{ - Record: increaseName(o.Indicator.LatencyNative.Total.Name, o.Window), - Expr: intstr.FromString(expr.String()), - Labels: ruleLabels, + Alert: o.AlertNameAbsent(), + Expr: intstr.FromString(expr.String()), + For: monitoringDuration(o.AbsentDuration().String()), + Labels: alertLabels, + Annotations: o.commonRuleAnnotations(), }) - expr, err = parser.ParseExpr(`histogram_fraction(0, 0.696969, sum by (grouping) (increase(metric{matchers="total"}[1s]))) * histogram_count(sum by (grouping) (increase(metric{matchers="total"}[1s])))`) + expr, err = o.absentExpr() if err != nil { - return monitoringv1.RuleGroup{}, err + return rules, err } - latencySeconds := time.Duration(o.Indicator.LatencyNative.Latency).Seconds() objectiveReplacer{ - metric: o.Indicator.LatencyNative.Total.Name, - matchers: slices.Clone(o.Indicator.LatencyNative.Total.LabelMatchers), - grouping: slices.Clone(o.Indicator.LatencyNative.Grouping), - window: time.Duration(o.Window), - target: latencySeconds, + metric: o.Indicator.Latency.Success.Name, + matchers: o.Indicator.Latency.Success.LabelMatchers, }.replace(expr) - ruleLabels = maps.Clone(ruleLabels) - ruleLabels["le"] = fmt.Sprintf("%g", latencySeconds) + alertLabelsLe := make(map[string]string, len(ruleLabelsLe)+1) + for k, v := range ruleLabelsLe { + alertLabelsLe[k] = v + } + // Add severity label for alerts + alertLabelsLe["severity"] = string(critical) rules = append(rules, monitoringv1.Rule{ - Record: increaseName(o.Indicator.LatencyNative.Total.Name, o.Window), - Expr: intstr.FromString(expr.String()), - Labels: ruleLabels, + Alert: o.AlertNameAbsent(), + Expr: intstr.FromString(expr.String()), + For: monitoringDuration(o.AbsentDuration().String()), + Labels: alertLabelsLe, + Annotations: o.commonRuleAnnotations(), }) - case BoolGauge: - ruleLabels := o.commonRuleLabels(sloName) - for _, m := range o.Indicator.BoolGauge.LabelMatchers { - if m.Type == labels.MatchEqual && m.Name != labels.MetricName { - ruleLabels[m.Name] = m.Value - } - } + } - groupingMap := map[string]struct{}{} - for _, s := range o.Indicator.BoolGauge.Grouping { - groupingMap[s] = struct{}{} - } - for _, s := range o.Indicator.BoolGauge.LabelMatchers { - groupingMap[s.Name] = struct{}{} - } - for _, m := range o.Indicator.BoolGauge.LabelMatchers { - if m.Type == labels.MatchRegexp || m.Type == labels.MatchNotRegexp { - groupingMap[m.Name] = struct{}{} - } - } - // Delete labels that are grouped, as their value is part of the recording rule anyway - for g := range groupingMap { - delete(ruleLabels, g) - } + return rules, nil +} - grouping := make([]string, 0, len(groupingMap)) - for s := range groupingMap { - grouping = append(grouping, s) +func (o Objective) increaseRuleLatencyNative(sloName string) ([]monitoringv1.Rule, error) { + var rules []monitoringv1.Rule + + ruleLabels := o.commonRuleLabels(sloName) + for _, m := range o.Indicator.LatencyNative.Total.LabelMatchers { + if m.Type == labels.MatchEqual && m.Name != labels.MetricName { + ruleLabels[m.Name] = m.Value } - sort.Strings(grouping) + } - count, err := countExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err + expr, err := parser.ParseExpr(`histogram_count(sum by (grouping) (increase(metric{matchers="total"}[1s])))`) + if err != nil { + return rules, err + } + + objectiveReplacer{ + metric: o.Indicator.LatencyNative.Total.Name, + matchers: slices.Clone(o.Indicator.LatencyNative.Total.LabelMatchers), + grouping: slices.Clone(o.Indicator.LatencyNative.Grouping), + window: time.Duration(o.Window), + }.replace(expr) + + rules = append(rules, monitoringv1.Rule{ + Record: increaseName(o.Indicator.LatencyNative.Total.Name, o.Window), + Expr: intstr.FromString(expr.String()), + Labels: ruleLabels, + }) + + expr, err = parser.ParseExpr(`histogram_fraction(0, 0.696969, sum by (grouping) (increase(metric{matchers="total"}[1s]))) * histogram_count(sum by (grouping) (increase(metric{matchers="total"}[1s])))`) + if err != nil { + return rules, err + } + + latencySeconds := time.Duration(o.Indicator.LatencyNative.Latency).Seconds() + objectiveReplacer{ + metric: o.Indicator.LatencyNative.Total.Name, + matchers: slices.Clone(o.Indicator.LatencyNative.Total.LabelMatchers), + grouping: slices.Clone(o.Indicator.LatencyNative.Grouping), + window: time.Duration(o.Window), + target: latencySeconds, + }.replace(expr) + + ruleLabels = maps.Clone(ruleLabels) + ruleLabels["le"] = fmt.Sprintf("%g", latencySeconds) + + rules = append(rules, monitoringv1.Rule{ + Record: increaseName(o.Indicator.LatencyNative.Total.Name, o.Window), + Expr: intstr.FromString(expr.String()), + Labels: ruleLabels, + }) + + return rules, nil +} + +func (o Objective) increaseRuleBoolGauge(sloName string) ([]monitoringv1.Rule, error) { + var rules []monitoringv1.Rule + + ruleLabels := o.commonRuleLabels(sloName) + for _, m := range o.Indicator.BoolGauge.LabelMatchers { + if m.Type == labels.MatchEqual && m.Name != labels.MetricName { + ruleLabels[m.Name] = m.Value } + } - sum, err := sumExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err + groupingMap := map[string]struct{}{} + for _, s := range o.Indicator.BoolGauge.Grouping { + groupingMap[s] = struct{}{} + } + for _, s := range o.Indicator.BoolGauge.LabelMatchers { + groupingMap[s.Name] = struct{}{} + } + for _, m := range o.Indicator.BoolGauge.LabelMatchers { + if m.Type == labels.MatchRegexp || m.Type == labels.MatchNotRegexp { + groupingMap[m.Name] = struct{}{} } + } + // Delete labels that are grouped, as their value is part of the recording rule anyway + for g := range groupingMap { + delete(ruleLabels, g) + } - objectiveReplacer{ - metric: o.Indicator.BoolGauge.Name, - matchers: o.Indicator.BoolGauge.LabelMatchers, - grouping: grouping, - window: time.Duration(o.Window), - }.replace(count) + grouping := make([]string, 0, len(groupingMap)) + for s := range groupingMap { + grouping = append(grouping, s) + } + sort.Strings(grouping) - objectiveReplacer{ - metric: o.Indicator.BoolGauge.Name, - matchers: o.Indicator.BoolGauge.LabelMatchers, - grouping: grouping, - window: time.Duration(o.Window), - }.replace(sum) + count, err := o.countExpr() + if err != nil { + return rules, err + } - rules = append(rules, monitoringv1.Rule{ - Record: countName(o.Indicator.BoolGauge.Name, o.Window), - Expr: intstr.FromString(count.String()), - Labels: ruleLabels, - }) + sum, err := o.sumExpr() + if err != nil { + return rules, err + } - rules = append(rules, monitoringv1.Rule{ - Record: sumName(o.Indicator.BoolGauge.Name, o.Window), - Expr: intstr.FromString(sum.String()), - Labels: ruleLabels, - }) + objectiveReplacer{ + metric: o.Indicator.BoolGauge.Name, + matchers: o.Indicator.BoolGauge.LabelMatchers, + grouping: grouping, + window: time.Duration(o.Window), + }.replace(count) - if o.Alerting.Absent { - expr, err := absentExpr() - if err != nil { - return monitoringv1.RuleGroup{}, err - } + objectiveReplacer{ + metric: o.Indicator.BoolGauge.Name, + matchers: o.Indicator.BoolGauge.LabelMatchers, + grouping: grouping, + window: time.Duration(o.Window), + }.replace(sum) - objectiveReplacer{ - metric: o.Indicator.BoolGauge.Name, - matchers: o.Indicator.BoolGauge.LabelMatchers, - }.replace(expr) + rules = append(rules, monitoringv1.Rule{ + Record: countName(o.Indicator.BoolGauge.Name, o.Window), + Expr: intstr.FromString(count.String()), + Labels: ruleLabels, + }) - alertLabels := make(map[string]string, len(ruleLabels)+1) - for k, v := range ruleLabels { - alertLabels[k] = v - } - // Add severity label for alerts - alertLabels["severity"] = string(critical) + rules = append(rules, monitoringv1.Rule{ + Record: sumName(o.Indicator.BoolGauge.Name, o.Window), + Expr: intstr.FromString(sum.String()), + Labels: ruleLabels, + }) - rules = append(rules, monitoringv1.Rule{ - Alert: o.AlertNameAbsent(), - Expr: intstr.FromString(expr.String()), - For: monitoringDuration(o.AbsentDuration().String()), - Labels: alertLabels, - Annotations: o.commonRuleAnnotations(), - }) + if o.Alerting.Absent { + expr, err := o.absentExpr() + if err != nil { + return rules, err } - } - day := 24 * time.Hour + objectiveReplacer{ + metric: o.Indicator.BoolGauge.Name, + matchers: o.Indicator.BoolGauge.LabelMatchers, + }.replace(expr) - var interval model.Duration - window := time.Duration(o.Window) + alertLabels := make(map[string]string, len(ruleLabels)+1) + for k, v := range ruleLabels { + alertLabels[k] = v + } + // Add severity label for alerts + alertLabels["severity"] = string(critical) - // TODO: Make this a function with an equation - if window < 7*day { - interval = model.Duration(30 * time.Second) - } else if window < 14*day { - interval = model.Duration(60 * time.Second) - } else if window < 21*day { - interval = model.Duration(90 * time.Second) - } else if window < 28*day { - interval = model.Duration(120 * time.Second) - } else if window < 35*day { - interval = model.Duration(150 * time.Second) - } else if window < 42*day { - interval = model.Duration(180 * time.Second) - } else if window < 49*day { - interval = model.Duration(210 * time.Second) - } else { // 8w - interval = model.Duration(240 * time.Second) + rules = append(rules, monitoringv1.Rule{ + Alert: o.AlertNameAbsent(), + Expr: intstr.FromString(expr.String()), + For: monitoringDuration(o.AbsentDuration().String()), + Labels: alertLabels, + Annotations: o.commonRuleAnnotations(), + }) } - return monitoringv1.RuleGroup{ - Name: sloName + "-increase", - Interval: monitoringDuration(interval.String()), - Rules: rules, - }, nil + return rules, nil } type severity string diff --git a/slo/rules_test.go b/slo/rules_test.go index fdfa494c1..d18ed0027 100644 --- a/slo/rules_test.go +++ b/slo/rules_test.go @@ -1284,6 +1284,27 @@ func TestObjective_IncreaseRules(t *testing.T) { Labels: map[string]string{"slo": "monitoring-http-errors", "severity": "critical"}, }}, }, + }, { + name: "http-ratio-less-accuracy", + slo: objectiveHTTPRatioGroupingLessAccurate(), + rules: monitoringv1.RuleGroup{ + Name: "monitoring-http-errors-increase", + Interval: monitoringDuration("2m30s"), + Rules: []monitoringv1.Rule{{ + Record: "http_requests:increase5m", + Expr: intstr.FromString(`sum by (code) (increase(http_requests_total{job="thanos-receive-default"}[5m]))`), + Labels: map[string]string{"job": "thanos-receive-default", "slo": "monitoring-http-errors"}, + }, { + Record: "http_requests:increase4w", + Expr: intstr.FromString(`sum by (code) (sum_over_time(http_requests:increase5m{job="thanos-receive-default"}[4w:5m]))`), + Labels: map[string]string{"job": "thanos-receive-default", "slo": "monitoring-http-errors"}, + }, { + Alert: "SLOMetricAbsent", + Expr: intstr.FromString(`absent(http_requests_total{job="thanos-receive-default"}) == 1`), + For: monitoringDuration("10m"), + Labels: map[string]string{"job": "thanos-receive-default", "slo": "monitoring-http-errors", "severity": "critical"}, + }}, + }, }, { name: "http-ratio-grouping-regex", slo: objectiveHTTPRatioGroupingRegex(), @@ -1335,6 +1356,27 @@ func TestObjective_IncreaseRules(t *testing.T) { Labels: map[string]string{"grpc_method": "Write", "grpc_service": "conprof.WritableProfileStore", "slo": "monitoring-grpc-errors", "severity": "critical"}, }}, }, + }, { + name: "grpc-errors-grouping-less-accuracy", + slo: objectiveGRPCRatioGroupingLessAccuracy(), + rules: monitoringv1.RuleGroup{ + Name: "monitoring-grpc-errors-increase", + Interval: monitoringDuration("2m30s"), + Rules: []monitoringv1.Rule{{ + Record: "grpc_server_handled:increase5m", + Expr: intstr.FromString(`sum by (grpc_code, handler, job) (increase(grpc_server_handled_total{grpc_method="Write",grpc_service="conprof.WritableProfileStore",job="api"}[5m]))`), + Labels: map[string]string{"grpc_method": "Write", "grpc_service": "conprof.WritableProfileStore", "slo": "monitoring-grpc-errors"}, + }, { + Record: "grpc_server_handled:increase4w", + Expr: intstr.FromString(`sum by (grpc_code, handler, job) (sum_over_time(grpc_server_handled:increase5m{grpc_method="Write",grpc_service="conprof.WritableProfileStore",job="api"}[4w:5m]))`), + Labels: map[string]string{"grpc_method": "Write", "grpc_service": "conprof.WritableProfileStore", "slo": "monitoring-grpc-errors"}, + }, { + Alert: "SLOMetricAbsent", + Expr: intstr.FromString(`absent(grpc_server_handled_total{grpc_method="Write",grpc_service="conprof.WritableProfileStore",job="api"}) == 1`), + For: monitoringDuration("3m"), + Labels: map[string]string{"grpc_method": "Write", "grpc_service": "conprof.WritableProfileStore", "slo": "monitoring-grpc-errors", "severity": "critical"}, + }}, + }, }, { name: "http-latency", slo: objectiveHTTPLatency(), @@ -1375,11 +1417,6 @@ func TestObjective_IncreaseRules(t *testing.T) { Record: "http_request_duration_seconds:increase4w", Expr: intstr.FromString(`histogram_fraction(0, 1, sum(increase(http_request_duration_seconds{code=~"2..",job="metrics-service-thanos-receive-default"}[4w]))) * histogram_count(sum(increase(http_request_duration_seconds{code=~"2..",job="metrics-service-thanos-receive-default"}[4w])))`), Labels: map[string]string{"job": "metrics-service-thanos-receive-default", "slo": "monitoring-http-latency", "le": "1"}, - // }, { - // Alert: "SLOMetricAbsent", - // Expr: intstr.FromString(`absent(http_request_duration_seconds{code=~"2..",job="metrics-service-thanos-receive-default"}) == 1`), - // For: monitoringDuration("2m"), - // Labels: map[string]string{"job": "metrics-service-thanos-receive-default", "slo": "monitoring-http-latency", "severity": "critical"}, }}, }, }, { @@ -1434,6 +1471,40 @@ func TestObjective_IncreaseRules(t *testing.T) { Labels: map[string]string{"slo": "monitoring-http-latency", "le": "1", "severity": "critical"}, }}, }, + }, { + name: "http-latency-grouping-regex-less-accuracy", + slo: objectiveHTTPLatencyGroupingRegexLessAccuracy(), + rules: monitoringv1.RuleGroup{ + Name: "monitoring-http-latency-increase", + Interval: monitoringDuration("2m30s"), + Rules: []monitoringv1.Rule{{ + Record: "http_request_duration_seconds:increase5m", + Expr: intstr.FromString(`sum by (code, handler, job) (increase(http_request_duration_seconds_count{code=~"2..",handler=~"/api.*",job="metrics-service-thanos-receive-default"}[5m]))`), + Labels: map[string]string{"slo": "monitoring-http-latency"}, + }, { + Record: "http_request_duration_seconds:increase5m", + Expr: intstr.FromString(`sum by (code, handler, job) (increase(http_request_duration_seconds_bucket{code=~"2..",handler=~"/api.*",job="metrics-service-thanos-receive-default",le="1"}[5m]))`), + Labels: map[string]string{"slo": "monitoring-http-latency", "le": "1"}, + }, { + Record: "http_request_duration_seconds:increase4w", + Expr: intstr.FromString(`sum by (code, handler, job) (sum_over_time(http_request_duration_seconds:increase5m{code=~"2..",handler=~"/api.*",job="metrics-service-thanos-receive-default"}[4w:5m]))`), + Labels: map[string]string{"slo": "monitoring-http-latency"}, + }, { + Record: "http_request_duration_seconds:increase4w", + Expr: intstr.FromString(`sum by (code, handler, job) (sum_over_time(http_request_duration_seconds:increase5m{code=~"2..",handler=~"/api.*",job="metrics-service-thanos-receive-default",le="1"}[4w:5m]))`), + Labels: map[string]string{"slo": "monitoring-http-latency", "le": "1"}, + }, { + Alert: "SLOMetricAbsent", + Expr: intstr.FromString(`absent(http_request_duration_seconds_count{code=~"2..",handler=~"/api.*",job="metrics-service-thanos-receive-default"}) == 1`), + For: monitoringDuration("6m"), + Labels: map[string]string{"slo": "monitoring-http-latency", "severity": "critical"}, + }, { + Alert: "SLOMetricAbsent", + Expr: intstr.FromString(`absent(http_request_duration_seconds_bucket{code=~"2..",handler=~"/api.*",job="metrics-service-thanos-receive-default",le="1"}) == 1`), + For: monitoringDuration("6m"), + Labels: map[string]string{"slo": "monitoring-http-latency", "le": "1", "severity": "critical"}, + }}, + }, }, { name: "grpc-latency", slo: objectiveGRPCLatency(), @@ -1625,7 +1696,7 @@ func TestObjective_IncreaseRules(t *testing.T) { }, }} - require.Len(t, testcases, 17) + require.Len(t, testcases, 20) for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { diff --git a/slo/slo.go b/slo/slo.go index 045779936..5149438d2 100644 --- a/slo/slo.go +++ b/slo/slo.go @@ -24,6 +24,8 @@ type Objective struct { Window model.Duration Config string + LessAccurate bool + Alerting Alerting Indicator Indicator } From cc293b7deea8481cf150038d7c3f99da97e8d943 Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Fri, 24 Oct 2025 16:33:10 +0200 Subject: [PATCH 2/4] fix: Rename internal field and align struct initialization - Rename LessAccurate to PerformanceOverAccuracy for consistency with the public API - Add proper field alignment in Objective struct initialization - Update all references throughout the codebase --- .../api/v1alpha1/servicelevelobjective_types.go | 16 +++++++++------- slo/promql_test.go | 6 +++--- slo/rules.go | 6 +++--- slo/slo.go | 2 +- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/kubernetes/api/v1alpha1/servicelevelobjective_types.go b/kubernetes/api/v1alpha1/servicelevelobjective_types.go index 7c4e502a1..2dcdc662e 100644 --- a/kubernetes/api/v1alpha1/servicelevelobjective_types.go +++ b/kubernetes/api/v1alpha1/servicelevelobjective_types.go @@ -382,6 +382,7 @@ func (in *ServiceLevelObjective) Internal() (slo.Objective, error) { if err != nil { return slo.Objective{}, fmt.Errorf("failed to parse objective window: %w", err) } + var alerting slo.Alerting alerting.Disabled = false if in.Spec.Alerting.Disabled != nil { @@ -579,13 +580,14 @@ func (in *ServiceLevelObjective) Internal() (slo.Objective, error) { } return slo.Objective{ - Labels: ls, - Annotations: in.Annotations, - Description: in.Spec.Description, - Target: target / 100, - Window: window, - Config: string(config), - Alerting: alerting, + Labels: ls, + Annotations: in.Annotations, + Description: in.Spec.Description, + Target: target / 100, + Window: window, + PerformanceOverAccuracy: in.Spec.PerformanceOverAccuracy, + Config: string(config), + Alerting: alerting, Indicator: slo.Indicator{ Ratio: ratio, Latency: latency, diff --git a/slo/promql_test.go b/slo/promql_test.go index 3e61a02cd..e31767e78 100644 --- a/slo/promql_test.go +++ b/slo/promql_test.go @@ -44,7 +44,7 @@ var ( } objectiveHTTPRatioGroupingLessAccurate = func() Objective { o := objectiveHTTPRatio() - o.LessAccurate = true + o.PerformanceOverAccuracy = true return o } objectiveHTTPRatioGrouping = func() Objective { @@ -105,7 +105,7 @@ var ( } objectiveGRPCRatioGroupingLessAccuracy = func() Objective { o := objectiveGRPCRatioGrouping() - o.LessAccurate = true + o.PerformanceOverAccuracy = true return o } objectiveHTTPLatency = func() Objective { @@ -180,7 +180,7 @@ var ( } objectiveHTTPLatencyGroupingRegexLessAccuracy = func() Objective { o := objectiveHTTPLatencyGroupingRegex() - o.LessAccurate = true + o.PerformanceOverAccuracy = true return o } objectiveGRPCLatency = func() Objective { diff --git a/slo/rules.go b/slo/rules.go index 12f74524c..65626a5d8 100644 --- a/slo/rules.go +++ b/slo/rules.go @@ -745,7 +745,7 @@ func (o Objective) increaseRulesRatio(sloName string) ([]monitoringv1.Rule, erro return rules, err } - if o.LessAccurate { + if o.PerformanceOverAccuracy { objectiveReplacer{ metric: o.Indicator.Ratio.Total.Name, matchers: o.Indicator.Ratio.Total.LabelMatchers, @@ -914,7 +914,7 @@ func (o Objective) increaseRuleLatency(sloName string) ([]monitoringv1.Rule, err sort.Strings(grouping) window := time.Duration(o.Window) - if o.LessAccurate { + if o.PerformanceOverAccuracy { window = 5 * time.Minute } @@ -966,7 +966,7 @@ func (o Objective) increaseRuleLatency(sloName string) ([]monitoringv1.Rule, err Labels: ruleLabelsLe, }) - if o.LessAccurate { + if o.PerformanceOverAccuracy { expr, err := o.increaseSubqueryExpr() if err != nil { return rules, err diff --git a/slo/slo.go b/slo/slo.go index 5149438d2..f7496f8e2 100644 --- a/slo/slo.go +++ b/slo/slo.go @@ -24,7 +24,7 @@ type Objective struct { Window model.Duration Config string - LessAccurate bool + PerformanceOverAccuracy bool Alerting Alerting Indicator Indicator From bae5b586afc1f56364c0bc49945439e0522eb6bc Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Sat, 25 Oct 2025 13:10:17 +0200 Subject: [PATCH 3/4] actions: Build container for this branch --- .github/workflows/container.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 1b85460f2..450932118 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -5,6 +5,7 @@ on: branches: - 'release-*' - 'main' + - 'improve-slo-performance-with-subqueries' concurrency: group: ${{ github.workflow }}-${{ github.event.number || github.ref }} From 01b5ac6cc3a7378f49eb257b967b211512f3831d Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Tue, 4 Nov 2025 11:23:18 +0100 Subject: [PATCH 4/4] chore: Regenerate CRD for ServiceLevelObjective --- Makefile | 2 +- .../controller-gen/pyrra.dev_servicelevelobjectives.json | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 384cdefa1..af31e06a2 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,7 @@ docker-push: # download controller-gen if necessary controller-gen: ifeq (, $(shell which controller-gen)) - go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 + go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.19.0 CONTROLLER_GEN=$(GOBIN)/controller-gen else CONTROLLER_GEN=$(shell which controller-gen) diff --git a/jsonnet/controller-gen/pyrra.dev_servicelevelobjectives.json b/jsonnet/controller-gen/pyrra.dev_servicelevelobjectives.json index 3a9aa317d..09a4229f6 100644 --- a/jsonnet/controller-gen/pyrra.dev_servicelevelobjectives.json +++ b/jsonnet/controller-gen/pyrra.dev_servicelevelobjectives.json @@ -3,7 +3,7 @@ "kind": "CustomResourceDefinition", "metadata": { "annotations": { - "controller-gen.kubebuilder.io/version": "v0.18.0" + "controller-gen.kubebuilder.io/version": "v0.19.0" }, "name": "servicelevelobjectives.pyrra.dev" }, @@ -242,6 +242,10 @@ ], "type": "string" }, + "performance_over_accuracy": { + "default": false, + "type": "boolean" + }, "target": { "description": "Target is a string that's casted to a float64 between 0 - 100.\nIt represents the desired availability of the service in the given window.\nfloat64 are not supported: https://github.com/kubernetes-sigs/controller-tools/issues/245", "type": "string"