From f03f9f73650f73b386a6be40fd66b4a8b14b064d Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Mon, 24 Nov 2025 15:52:31 +0530 Subject: [PATCH 01/77] add feature gates Signed-off-by: Mayank Shah --- cmd/manager/main.go | 14 +++++++ go.mod | 3 ++ go.sum | 6 +++ pkg/features/features.go | 75 +++++++++++++++++++++++++++++++++++ pkg/features/features_test.go | 37 +++++++++++++++++ 5 files changed, 135 insertions(+) create mode 100644 pkg/features/features.go create mode 100644 pkg/features/features_test.go diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 30faf8aad4..e4670efa57 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -30,6 +30,7 @@ import ( "github.com/percona/percona-xtradb-cluster-operator/pkg/apis" pxcv1 "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" "github.com/percona/percona-xtradb-cluster-operator/pkg/controller" + "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/version" "github.com/percona/percona-xtradb-cluster-operator/pkg/webhook" @@ -158,6 +159,19 @@ func main() { ctx := k8s.StartStopSignalHandler(mgr.GetClient(), strings.Split(namespace, ",")) + fg := features.NewGate() + ctx = features.NewContextWithGate(ctx, fg) + if err := fg.Set(os.Getenv("PXCO_FEATURE_GATES")); err != nil { + setupLog.Error(err, "failed to set feature gates") + os.Exit(1) + } + setupLog.Info("Feature gates", + // These are set by the user + "PXCO_FEATURE_GATES", features.ShowAssigned(ctx), + // These are enabled, including features that are on by default + "enabled", features.ShowEnabled(ctx), + ) + if err := webhook.SetupWebhook(ctx, mgr); err != nil { setupLog.Error(err, "set up validation webhook") os.Exit(1) diff --git a/go.mod b/go.mod index a7fb890da7..dd82085ad8 100644 --- a/go.mod +++ b/go.mod @@ -29,10 +29,12 @@ require ( go.uber.org/zap v1.27.1 golang.org/x/sync v0.18.0 golang.org/x/sys v0.38.0 + gotest.tools v2.2.0+incompatible k8s.io/api v0.34.2 k8s.io/apiextensions-apiserver v0.34.2 k8s.io/apimachinery v0.34.2 k8s.io/client-go v0.34.2 + k8s.io/component-base v0.34.2 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/controller-runtime v0.22.4 @@ -45,6 +47,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect diff --git a/go.sum b/go.sum index e34dd63572..6a6108469c 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/caarlos0/env v3.5.0+incompatible h1:Yy0UN8o9Wtr/jGHZDpCBLpNrzcFLLM2yixi/rBrKyJs= github.com/caarlos0/env v3.5.0+incompatible/go.mod h1:tdCsowwCzMLdkqRYDlHpZCp2UooDD3MspDBjZ2AD02Y= github.com/cert-manager/cert-manager v1.19.1 h1:Txh8L/nLWTDcb7ZnXuXbTe15BxQnLbLirXmbNk0fGgY= @@ -308,6 +310,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= @@ -316,6 +320,8 @@ k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= +k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= diff --git a/pkg/features/features.go b/pkg/features/features.go new file mode 100644 index 0000000000..8c60884f20 --- /dev/null +++ b/pkg/features/features.go @@ -0,0 +1,75 @@ +package features + +import ( + "context" + "fmt" + "slices" + "strings" + + "k8s.io/component-base/featuregate" +) + +const ( + // BackupXtrabackup is a feature flag for the BackupXtrabackup feature + BackupXtrabackup = "BackupXtrabackup" +) + +// NewGate returns a new FeatureGate. +func NewGate() featuregate.MutableFeatureGate { + gate := featuregate.NewFeatureGate() + + if err := gate.Add(map[featuregate.Feature]featuregate.FeatureSpec{ + BackupXtrabackup: {Default: false, PreRelease: featuregate.Alpha}, + }); err != nil { + panic(err) + } + return gate +} + +type contextKey struct{} + +// Enabled indicates if a Feature is enabled in the Gate contained in ctx. It +// returns false when there is no Gate. +func Enabled(ctx context.Context, f featuregate.Feature) bool { + gate, ok := ctx.Value(contextKey{}).(featuregate.FeatureGate) + return ok && gate.Enabled(f) +} + +// NewContextWithGate returns a copy of ctx containing gate. Check it using [Enabled]. +func NewContextWithGate(ctx context.Context, gate featuregate.FeatureGate) context.Context { + return context.WithValue(ctx, contextKey{}, gate) +} + +// ShowEnabled returns all the features enabled in the Gate contained in ctx. +func ShowEnabled(ctx context.Context) string { + featuresEnabled := []string{} + if gate, ok := ctx.Value(contextKey{}).(interface { + featuregate.FeatureGate + GetAll() map[featuregate.Feature]featuregate.FeatureSpec + }); ok { + specs := gate.GetAll() + for feature := range specs { + // `gate.Enabled` first checks if the feature is enabled; + // then (if not explicitly set by the user), + // it checks if the feature is on/true by default + if gate.Enabled(feature) { + featuresEnabled = append(featuresEnabled, fmt.Sprintf("%s=true", feature)) + } + } + } + slices.Sort(featuresEnabled) + return strings.Join(featuresEnabled, ",") +} + +// ShowAssigned returns the features enabled or disabled by Set and SetFromMap +// in the Gate contained in ctx. +func ShowAssigned(ctx context.Context) string { + featuresAssigned := "" + if gate, ok := ctx.Value(contextKey{}).(interface { + featuregate.FeatureGate + String() string + }); ok { + featuresAssigned = gate.String() + } + return featuresAssigned +} diff --git a/pkg/features/features_test.go b/pkg/features/features_test.go new file mode 100644 index 0000000000..b57b8bb6d9 --- /dev/null +++ b/pkg/features/features_test.go @@ -0,0 +1,37 @@ +package features + +import ( + "context" + "testing" + + "gotest.tools/assert" +) + +func TestDefaults(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.Assert(t, false == gate.Enabled(BackupXtrabackup)) +} + +func TestStringFormat(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.NilError(t, gate.Set("")) + assert.NilError(t, gate.Set("BackupXtrabackup=true")) + assert.Assert(t, true == gate.Enabled(BackupXtrabackup)) + +} + +func TestContext(t *testing.T) { + t.Parallel() + gate := NewGate() + ctx := NewContextWithGate(context.Background(), gate) + + assert.Equal(t, ShowAssigned(ctx), "") + + assert.NilError(t, gate.Set("BackupXtrabackup=true")) + assert.Assert(t, Enabled(ctx, BackupXtrabackup)) + assert.Equal(t, ShowAssigned(ctx), "BackupXtrabackup=true") +} From 83ac65c73c13c9cc54e1527a4938e8af864605f8 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 10:11:06 +0530 Subject: [PATCH 02/77] add boilerplate for xtrabackup sidecar server Signed-off-by: Mayank Shah --- cmd/xtrabackup-server-sidecar/main.go | 28 + go.mod | 4 +- go.sum | 10 + pkg/xtrabackup/api/app.pb.go | 926 ++++++++++++++++++++++++++ pkg/xtrabackup/api/app.proto | 84 +++ pkg/xtrabackup/api/app_grpc.pb.go | 197 ++++++ pkg/xtrabackup/api/gen.go | 3 + pkg/xtrabackup/server/app.go | 35 + 8 files changed, 1286 insertions(+), 1 deletion(-) create mode 100644 cmd/xtrabackup-server-sidecar/main.go create mode 100644 pkg/xtrabackup/api/app.pb.go create mode 100644 pkg/xtrabackup/api/app.proto create mode 100644 pkg/xtrabackup/api/app_grpc.pb.go create mode 100644 pkg/xtrabackup/api/gen.go create mode 100644 pkg/xtrabackup/server/app.go diff --git a/cmd/xtrabackup-server-sidecar/main.go b/cmd/xtrabackup-server-sidecar/main.go new file mode 100644 index 0000000000..cc7122b4d3 --- /dev/null +++ b/cmd/xtrabackup-server-sidecar/main.go @@ -0,0 +1,28 @@ +package xtrabackupserversidecar + +import ( + "fmt" + "log" + "net" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/server" + "google.golang.org/grpc" +) + +func main() { + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", server.DefaultPort)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + defer lis.Close() + + var serverOptions []grpc.ServerOption + grpcServ := grpc.NewServer(serverOptions...) + api.RegisterXtrabackupServiceServer(grpcServ, server.New()) + + log.Printf("server listening at %v", lis.Addr()) + if err := grpcServ.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/go.mod b/go.mod index dd82085ad8..37bdd2aa5f 100644 --- a/go.mod +++ b/go.mod @@ -29,6 +29,8 @@ require ( go.uber.org/zap v1.27.1 golang.org/x/sync v0.18.0 golang.org/x/sys v0.38.0 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.9 gotest.tools v2.2.0+incompatible k8s.io/api v0.34.2 k8s.io/apiextensions-apiserver v0.34.2 @@ -118,7 +120,7 @@ require ( golang.org/x/time v0.13.0 // indirect golang.org/x/tools v0.38.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/protobuf v1.36.9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 6a6108469c..2aee42a672 100644 --- a/go.sum +++ b/go.sum @@ -121,6 +121,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= @@ -240,6 +242,8 @@ go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgf go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -299,6 +303,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/xtrabackup/api/app.pb.go b/pkg/xtrabackup/api/app.pb.go new file mode 100644 index 0000000000..576659aa7a --- /dev/null +++ b/pkg/xtrabackup/api/app.pb.go @@ -0,0 +1,926 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc v6.33.1 +// source: app.proto + +package api + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type BackupStorageType int32 + +const ( + BackupStorageType_S3 BackupStorageType = 0 + BackupStorageType_AZURE BackupStorageType = 1 + BackupStorageType_GCS BackupStorageType = 2 +) + +// Enum value maps for BackupStorageType. +var ( + BackupStorageType_name = map[int32]string{ + 0: "S3", + 1: "AZURE", + 2: "GCS", + } + BackupStorageType_value = map[string]int32{ + "S3": 0, + "AZURE": 1, + "GCS": 2, + } +) + +func (x BackupStorageType) Enum() *BackupStorageType { + p := new(BackupStorageType) + *p = x + return p +} + +func (x BackupStorageType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BackupStorageType) Descriptor() protoreflect.EnumDescriptor { + return file_app_proto_enumTypes[0].Descriptor() +} + +func (BackupStorageType) Type() protoreflect.EnumType { + return &file_app_proto_enumTypes[0] +} + +func (x BackupStorageType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BackupStorageType.Descriptor instead. +func (BackupStorageType) EnumDescriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{0} +} + +type GetCurrentBackupConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetCurrentBackupConfigRequest) Reset() { + *x = GetCurrentBackupConfigRequest{} + mi := &file_app_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetCurrentBackupConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCurrentBackupConfigRequest) ProtoMessage() {} + +func (x *GetCurrentBackupConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCurrentBackupConfigRequest.ProtoReflect.Descriptor instead. +func (*GetCurrentBackupConfigRequest) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{0} +} + +type CreateBackupRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + BackupName string `protobuf:"bytes,1,opt,name=backup_name,json=backupName,proto3" json:"backup_name,omitempty"` + BackupConfig *BackupConfig `protobuf:"bytes,2,opt,name=backup_config,json=backupConfig,proto3" json:"backup_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateBackupRequest) Reset() { + *x = CreateBackupRequest{} + mi := &file_app_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateBackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupRequest) ProtoMessage() {} + +func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead. +func (*CreateBackupRequest) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateBackupRequest) GetBackupName() string { + if x != nil { + return x.BackupName + } + return "" +} + +func (x *CreateBackupRequest) GetBackupConfig() *BackupConfig { + if x != nil { + return x.BackupConfig + } + return nil +} + +type CreateBackupResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateBackupResponse) Reset() { + *x = CreateBackupResponse{} + mi := &file_app_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateBackupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupResponse) ProtoMessage() {} + +func (x *CreateBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupResponse.ProtoReflect.Descriptor instead. +func (*CreateBackupResponse) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{2} +} + +type DeleteBackupRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + BackupName string `protobuf:"bytes,1,opt,name=backup_name,json=backupName,proto3" json:"backup_name,omitempty"` + BackupConfig *BackupConfig `protobuf:"bytes,2,opt,name=backup_config,json=backupConfig,proto3" json:"backup_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteBackupRequest) Reset() { + *x = DeleteBackupRequest{} + mi := &file_app_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteBackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteBackupRequest) ProtoMessage() {} + +func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteBackupRequest.ProtoReflect.Descriptor instead. +func (*DeleteBackupRequest) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{3} +} + +func (x *DeleteBackupRequest) GetBackupName() string { + if x != nil { + return x.BackupName + } + return "" +} + +func (x *DeleteBackupRequest) GetBackupConfig() *BackupConfig { + if x != nil { + return x.BackupConfig + } + return nil +} + +type DeleteBackupResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteBackupResponse) Reset() { + *x = DeleteBackupResponse{} + mi := &file_app_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteBackupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteBackupResponse) ProtoMessage() {} + +func (x *DeleteBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteBackupResponse.ProtoReflect.Descriptor instead. +func (*DeleteBackupResponse) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{4} +} + +type BackupConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + Type BackupStorageType `protobuf:"varint,2,opt,name=type,proto3,enum=api.BackupStorageType" json:"type,omitempty"` + VerifyTls bool `protobuf:"varint,3,opt,name=verify_tls,json=verifyTls,proto3" json:"verify_tls,omitempty"` + ContainerOptions *ContainerOptions `protobuf:"bytes,4,opt,name=container_options,json=containerOptions,proto3" json:"container_options,omitempty"` + S3 *S3Config `protobuf:"bytes,5,opt,name=s3,proto3,oneof" json:"s3,omitempty"` + Gcs *GCSConfig `protobuf:"bytes,6,opt,name=gcs,proto3,oneof" json:"gcs,omitempty"` + Azure *AzureConfig `protobuf:"bytes,7,opt,name=azure,proto3,oneof" json:"azure,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupConfig) Reset() { + *x = BackupConfig{} + mi := &file_app_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupConfig) ProtoMessage() {} + +func (x *BackupConfig) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. +func (*BackupConfig) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{5} +} + +func (x *BackupConfig) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +func (x *BackupConfig) GetType() BackupStorageType { + if x != nil { + return x.Type + } + return BackupStorageType_S3 +} + +func (x *BackupConfig) GetVerifyTls() bool { + if x != nil { + return x.VerifyTls + } + return false +} + +func (x *BackupConfig) GetContainerOptions() *ContainerOptions { + if x != nil { + return x.ContainerOptions + } + return nil +} + +func (x *BackupConfig) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +func (x *BackupConfig) GetGcs() *GCSConfig { + if x != nil { + return x.Gcs + } + return nil +} + +func (x *BackupConfig) GetAzure() *AzureConfig { + if x != nil { + return x.Azure + } + return nil +} + +type S3Config struct { + state protoimpl.MessageState `protogen:"open.v1"` + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"` + EndpointUrl string `protobuf:"bytes,3,opt,name=endpoint_url,json=endpointUrl,proto3" json:"endpoint_url,omitempty"` + AccessKey string `protobuf:"bytes,4,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,5,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` + StorageClass string `protobuf:"bytes,6,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *S3Config) Reset() { + *x = S3Config{} + mi := &file_app_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *S3Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3Config) ProtoMessage() {} + +func (x *S3Config) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3Config.ProtoReflect.Descriptor instead. +func (*S3Config) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{6} +} + +func (x *S3Config) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *S3Config) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + +func (x *S3Config) GetEndpointUrl() string { + if x != nil { + return x.EndpointUrl + } + return "" +} + +func (x *S3Config) GetAccessKey() string { + if x != nil { + return x.AccessKey + } + return "" +} + +func (x *S3Config) GetSecretKey() string { + if x != nil { + return x.SecretKey + } + return "" +} + +func (x *S3Config) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +type GCSConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EndpointUrl string `protobuf:"bytes,2,opt,name=endpoint_url,json=endpointUrl,proto3" json:"endpoint_url,omitempty"` + StorageClass string `protobuf:"bytes,3,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + AccessKey string `protobuf:"bytes,4,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,5,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GCSConfig) Reset() { + *x = GCSConfig{} + mi := &file_app_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GCSConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GCSConfig) ProtoMessage() {} + +func (x *GCSConfig) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GCSConfig.ProtoReflect.Descriptor instead. +func (*GCSConfig) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{7} +} + +func (x *GCSConfig) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *GCSConfig) GetEndpointUrl() string { + if x != nil { + return x.EndpointUrl + } + return "" +} + +func (x *GCSConfig) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *GCSConfig) GetAccessKey() string { + if x != nil { + return x.AccessKey + } + return "" +} + +func (x *GCSConfig) GetSecretKey() string { + if x != nil { + return x.SecretKey + } + return "" +} + +type AzureConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + ContainerName string `protobuf:"bytes,1,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + EndpointUrl string `protobuf:"bytes,2,opt,name=endpoint_url,json=endpointUrl,proto3" json:"endpoint_url,omitempty"` + StorageClass string `protobuf:"bytes,3,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + StorageAccount string `protobuf:"bytes,4,opt,name=storage_account,json=storageAccount,proto3" json:"storage_account,omitempty"` + AccessKey string `protobuf:"bytes,5,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AzureConfig) Reset() { + *x = AzureConfig{} + mi := &file_app_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AzureConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AzureConfig) ProtoMessage() {} + +func (x *AzureConfig) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AzureConfig.ProtoReflect.Descriptor instead. +func (*AzureConfig) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{8} +} + +func (x *AzureConfig) GetContainerName() string { + if x != nil { + return x.ContainerName + } + return "" +} + +func (x *AzureConfig) GetEndpointUrl() string { + if x != nil { + return x.EndpointUrl + } + return "" +} + +func (x *AzureConfig) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *AzureConfig) GetStorageAccount() string { + if x != nil { + return x.StorageAccount + } + return "" +} + +func (x *AzureConfig) GetAccessKey() string { + if x != nil { + return x.AccessKey + } + return "" +} + +type EnvVar struct { + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EnvVar) Reset() { + *x = EnvVar{} + mi := &file_app_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EnvVar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnvVar) ProtoMessage() {} + +func (x *EnvVar) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnvVar.ProtoReflect.Descriptor instead. +func (*EnvVar) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{9} +} + +func (x *EnvVar) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *EnvVar) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type BackupContainerArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Xtrabackup []string `protobuf:"bytes,1,rep,name=xtrabackup,proto3" json:"xtrabackup,omitempty"` + Xbcloud []string `protobuf:"bytes,2,rep,name=xbcloud,proto3" json:"xbcloud,omitempty"` + Xbstream []string `protobuf:"bytes,3,rep,name=xbstream,proto3" json:"xbstream,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupContainerArgs) Reset() { + *x = BackupContainerArgs{} + mi := &file_app_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupContainerArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupContainerArgs) ProtoMessage() {} + +func (x *BackupContainerArgs) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupContainerArgs.ProtoReflect.Descriptor instead. +func (*BackupContainerArgs) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{10} +} + +func (x *BackupContainerArgs) GetXtrabackup() []string { + if x != nil { + return x.Xtrabackup + } + return nil +} + +func (x *BackupContainerArgs) GetXbcloud() []string { + if x != nil { + return x.Xbcloud + } + return nil +} + +func (x *BackupContainerArgs) GetXbstream() []string { + if x != nil { + return x.Xbstream + } + return nil +} + +type ContainerOptions struct { + state protoimpl.MessageState `protogen:"open.v1"` + Env []*EnvVar `protobuf:"bytes,1,rep,name=env,proto3" json:"env,omitempty"` + Args *BackupContainerArgs `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ContainerOptions) Reset() { + *x = ContainerOptions{} + mi := &file_app_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ContainerOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerOptions) ProtoMessage() {} + +func (x *ContainerOptions) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerOptions.ProtoReflect.Descriptor instead. +func (*ContainerOptions) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{11} +} + +func (x *ContainerOptions) GetEnv() []*EnvVar { + if x != nil { + return x.Env + } + return nil +} + +func (x *ContainerOptions) GetArgs() *BackupContainerArgs { + if x != nil { + return x.Args + } + return nil +} + +var File_app_proto protoreflect.FileDescriptor + +const file_app_proto_rawDesc = "" + + "\n" + + "\tapp.proto\x12\x03api\"\x1f\n" + + "\x1dGetCurrentBackupConfigRequest\"n\n" + + "\x13CreateBackupRequest\x12\x1f\n" + + "\vbackup_name\x18\x01 \x01(\tR\n" + + "backupName\x126\n" + + "\rbackup_config\x18\x02 \x01(\v2\x11.api.BackupConfigR\fbackupConfig\"\x16\n" + + "\x14CreateBackupResponse\"n\n" + + "\x13DeleteBackupRequest\x12\x1f\n" + + "\vbackup_name\x18\x01 \x01(\tR\n" + + "backupName\x126\n" + + "\rbackup_config\x18\x02 \x01(\v2\x11.api.BackupConfigR\fbackupConfig\"\x16\n" + + "\x14DeleteBackupResponse\"\xd0\x02\n" + + "\fBackupConfig\x12 \n" + + "\vdestination\x18\x01 \x01(\tR\vdestination\x12*\n" + + "\x04type\x18\x02 \x01(\x0e2\x16.api.BackupStorageTypeR\x04type\x12\x1d\n" + + "\n" + + "verify_tls\x18\x03 \x01(\bR\tverifyTls\x12B\n" + + "\x11container_options\x18\x04 \x01(\v2\x15.api.ContainerOptionsR\x10containerOptions\x12\"\n" + + "\x02s3\x18\x05 \x01(\v2\r.api.S3ConfigH\x00R\x02s3\x88\x01\x01\x12%\n" + + "\x03gcs\x18\x06 \x01(\v2\x0e.api.GCSConfigH\x01R\x03gcs\x88\x01\x01\x12+\n" + + "\x05azure\x18\a \x01(\v2\x10.api.AzureConfigH\x02R\x05azure\x88\x01\x01B\x05\n" + + "\x03_s3B\x06\n" + + "\x04_gcsB\b\n" + + "\x06_azure\"\xc0\x01\n" + + "\bS3Config\x12\x16\n" + + "\x06bucket\x18\x01 \x01(\tR\x06bucket\x12\x16\n" + + "\x06region\x18\x02 \x01(\tR\x06region\x12!\n" + + "\fendpoint_url\x18\x03 \x01(\tR\vendpointUrl\x12\x1d\n" + + "\n" + + "access_key\x18\x04 \x01(\tR\taccessKey\x12\x1d\n" + + "\n" + + "secret_key\x18\x05 \x01(\tR\tsecretKey\x12#\n" + + "\rstorage_class\x18\x06 \x01(\tR\fstorageClass\"\xa9\x01\n" + + "\tGCSConfig\x12\x16\n" + + "\x06bucket\x18\x01 \x01(\tR\x06bucket\x12!\n" + + "\fendpoint_url\x18\x02 \x01(\tR\vendpointUrl\x12#\n" + + "\rstorage_class\x18\x03 \x01(\tR\fstorageClass\x12\x1d\n" + + "\n" + + "access_key\x18\x04 \x01(\tR\taccessKey\x12\x1d\n" + + "\n" + + "secret_key\x18\x05 \x01(\tR\tsecretKey\"\xc4\x01\n" + + "\vAzureConfig\x12%\n" + + "\x0econtainer_name\x18\x01 \x01(\tR\rcontainerName\x12!\n" + + "\fendpoint_url\x18\x02 \x01(\tR\vendpointUrl\x12#\n" + + "\rstorage_class\x18\x03 \x01(\tR\fstorageClass\x12'\n" + + "\x0fstorage_account\x18\x04 \x01(\tR\x0estorageAccount\x12\x1d\n" + + "\n" + + "access_key\x18\x05 \x01(\tR\taccessKey\"0\n" + + "\x06EnvVar\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\"k\n" + + "\x13BackupContainerArgs\x12\x1e\n" + + "\n" + + "xtrabackup\x18\x01 \x03(\tR\n" + + "xtrabackup\x12\x18\n" + + "\axbcloud\x18\x02 \x03(\tR\axbcloud\x12\x1a\n" + + "\bxbstream\x18\x03 \x03(\tR\bxbstream\"_\n" + + "\x10ContainerOptions\x12\x1d\n" + + "\x03env\x18\x01 \x03(\v2\v.api.EnvVarR\x03env\x12,\n" + + "\x04args\x18\x02 \x01(\v2\x18.api.BackupContainerArgsR\x04args*/\n" + + "\x11BackupStorageType\x12\x06\n" + + "\x02S3\x10\x00\x12\t\n" + + "\x05AZURE\x10\x01\x12\a\n" + + "\x03GCS\x10\x022\xee\x01\n" + + "\x11XtrabackupService\x12O\n" + + "\x16GetCurrentBackupConfig\x12\".api.GetCurrentBackupConfigRequest\x1a\x11.api.BackupConfig\x12C\n" + + "\fCreateBackup\x12\x18.api.CreateBackupRequest\x1a\x19.api.CreateBackupResponse\x12C\n" + + "\fDeleteBackup\x12\x18.api.DeleteBackupRequest\x1a\x19.api.DeleteBackupResponseBGZEgithub.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/apib\x06proto3" + +var ( + file_app_proto_rawDescOnce sync.Once + file_app_proto_rawDescData []byte +) + +func file_app_proto_rawDescGZIP() []byte { + file_app_proto_rawDescOnce.Do(func() { + file_app_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_app_proto_rawDesc), len(file_app_proto_rawDesc))) + }) + return file_app_proto_rawDescData +} + +var file_app_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_app_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_app_proto_goTypes = []any{ + (BackupStorageType)(0), // 0: api.BackupStorageType + (*GetCurrentBackupConfigRequest)(nil), // 1: api.GetCurrentBackupConfigRequest + (*CreateBackupRequest)(nil), // 2: api.CreateBackupRequest + (*CreateBackupResponse)(nil), // 3: api.CreateBackupResponse + (*DeleteBackupRequest)(nil), // 4: api.DeleteBackupRequest + (*DeleteBackupResponse)(nil), // 5: api.DeleteBackupResponse + (*BackupConfig)(nil), // 6: api.BackupConfig + (*S3Config)(nil), // 7: api.S3Config + (*GCSConfig)(nil), // 8: api.GCSConfig + (*AzureConfig)(nil), // 9: api.AzureConfig + (*EnvVar)(nil), // 10: api.EnvVar + (*BackupContainerArgs)(nil), // 11: api.BackupContainerArgs + (*ContainerOptions)(nil), // 12: api.ContainerOptions +} +var file_app_proto_depIdxs = []int32{ + 6, // 0: api.CreateBackupRequest.backup_config:type_name -> api.BackupConfig + 6, // 1: api.DeleteBackupRequest.backup_config:type_name -> api.BackupConfig + 0, // 2: api.BackupConfig.type:type_name -> api.BackupStorageType + 12, // 3: api.BackupConfig.container_options:type_name -> api.ContainerOptions + 7, // 4: api.BackupConfig.s3:type_name -> api.S3Config + 8, // 5: api.BackupConfig.gcs:type_name -> api.GCSConfig + 9, // 6: api.BackupConfig.azure:type_name -> api.AzureConfig + 10, // 7: api.ContainerOptions.env:type_name -> api.EnvVar + 11, // 8: api.ContainerOptions.args:type_name -> api.BackupContainerArgs + 1, // 9: api.XtrabackupService.GetCurrentBackupConfig:input_type -> api.GetCurrentBackupConfigRequest + 2, // 10: api.XtrabackupService.CreateBackup:input_type -> api.CreateBackupRequest + 4, // 11: api.XtrabackupService.DeleteBackup:input_type -> api.DeleteBackupRequest + 6, // 12: api.XtrabackupService.GetCurrentBackupConfig:output_type -> api.BackupConfig + 3, // 13: api.XtrabackupService.CreateBackup:output_type -> api.CreateBackupResponse + 5, // 14: api.XtrabackupService.DeleteBackup:output_type -> api.DeleteBackupResponse + 12, // [12:15] is the sub-list for method output_type + 9, // [9:12] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_app_proto_init() } +func file_app_proto_init() { + if File_app_proto != nil { + return + } + file_app_proto_msgTypes[5].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_app_proto_rawDesc), len(file_app_proto_rawDesc)), + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_app_proto_goTypes, + DependencyIndexes: file_app_proto_depIdxs, + EnumInfos: file_app_proto_enumTypes, + MessageInfos: file_app_proto_msgTypes, + }.Build() + File_app_proto = out.File + file_app_proto_goTypes = nil + file_app_proto_depIdxs = nil +} diff --git a/pkg/xtrabackup/api/app.proto b/pkg/xtrabackup/api/app.proto new file mode 100644 index 0000000000..ae6eac88f1 --- /dev/null +++ b/pkg/xtrabackup/api/app.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package api; + +option go_package = "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api"; + +service XtrabackupService { + rpc GetCurrentBackupConfig(GetCurrentBackupConfigRequest) returns (BackupConfig); + rpc CreateBackup(CreateBackupRequest) returns (CreateBackupResponse); + rpc DeleteBackup(DeleteBackupRequest) returns (DeleteBackupResponse); +} + +message GetCurrentBackupConfigRequest {} + +message CreateBackupRequest { + string backup_name = 1; + BackupConfig backup_config = 2; +} + +message CreateBackupResponse {} + +message DeleteBackupRequest { + string backup_name = 1; + BackupConfig backup_config = 2; +} + +message DeleteBackupResponse {} + +enum BackupStorageType { + S3 = 0; + AZURE = 1; + GCS = 2; +} + +message BackupConfig { + string destination = 1; + BackupStorageType type = 2; + bool verify_tls = 3; + ContainerOptions container_options = 4; + optional S3Config s3 = 5; + optional GCSConfig gcs = 6; + optional AzureConfig azure = 7; +} + +message S3Config { + string bucket = 1; + string region = 2; + string endpoint_url = 3; + string access_key = 4; + string secret_key = 5; + string storage_class = 6; +} + +message GCSConfig { + string bucket = 1; + string endpoint_url = 2; + string storage_class = 3; + string access_key = 4; + string secret_key = 5; +} + +message AzureConfig { + string container_name = 1; + string endpoint_url = 2; + string storage_class = 3; + string storage_account = 4; + string access_key = 5; +} + +message EnvVar { + string key = 1; + string value = 2; +} + +message BackupContainerArgs { + repeated string xtrabackup = 1; + repeated string xbcloud = 2; + repeated string xbstream = 3; +} + +message ContainerOptions { + repeated EnvVar env = 1; + BackupContainerArgs args = 2; +} \ No newline at end of file diff --git a/pkg/xtrabackup/api/app_grpc.pb.go b/pkg/xtrabackup/api/app_grpc.pb.go new file mode 100644 index 0000000000..a62ff850f0 --- /dev/null +++ b/pkg/xtrabackup/api/app_grpc.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v6.33.1 +// source: app.proto + +package api + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + XtrabackupService_GetCurrentBackupConfig_FullMethodName = "/api.XtrabackupService/GetCurrentBackupConfig" + XtrabackupService_CreateBackup_FullMethodName = "/api.XtrabackupService/CreateBackup" + XtrabackupService_DeleteBackup_FullMethodName = "/api.XtrabackupService/DeleteBackup" +) + +// XtrabackupServiceClient is the client API for XtrabackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type XtrabackupServiceClient interface { + GetCurrentBackupConfig(ctx context.Context, in *GetCurrentBackupConfigRequest, opts ...grpc.CallOption) (*BackupConfig, error) + CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*CreateBackupResponse, error) + DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*DeleteBackupResponse, error) +} + +type xtrabackupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewXtrabackupServiceClient(cc grpc.ClientConnInterface) XtrabackupServiceClient { + return &xtrabackupServiceClient{cc} +} + +func (c *xtrabackupServiceClient) GetCurrentBackupConfig(ctx context.Context, in *GetCurrentBackupConfigRequest, opts ...grpc.CallOption) (*BackupConfig, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(BackupConfig) + err := c.cc.Invoke(ctx, XtrabackupService_GetCurrentBackupConfig_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *xtrabackupServiceClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*CreateBackupResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateBackupResponse) + err := c.cc.Invoke(ctx, XtrabackupService_CreateBackup_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *xtrabackupServiceClient) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*DeleteBackupResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DeleteBackupResponse) + err := c.cc.Invoke(ctx, XtrabackupService_DeleteBackup_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// XtrabackupServiceServer is the server API for XtrabackupService service. +// All implementations must embed UnimplementedXtrabackupServiceServer +// for forward compatibility. +type XtrabackupServiceServer interface { + GetCurrentBackupConfig(context.Context, *GetCurrentBackupConfigRequest) (*BackupConfig, error) + CreateBackup(context.Context, *CreateBackupRequest) (*CreateBackupResponse, error) + DeleteBackup(context.Context, *DeleteBackupRequest) (*DeleteBackupResponse, error) + mustEmbedUnimplementedXtrabackupServiceServer() +} + +// UnimplementedXtrabackupServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedXtrabackupServiceServer struct{} + +func (UnimplementedXtrabackupServiceServer) GetCurrentBackupConfig(context.Context, *GetCurrentBackupConfigRequest) (*BackupConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") +} +func (UnimplementedXtrabackupServiceServer) CreateBackup(context.Context, *CreateBackupRequest) (*CreateBackupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") +} +func (UnimplementedXtrabackupServiceServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*DeleteBackupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") +} +func (UnimplementedXtrabackupServiceServer) mustEmbedUnimplementedXtrabackupServiceServer() {} +func (UnimplementedXtrabackupServiceServer) testEmbeddedByValue() {} + +// UnsafeXtrabackupServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to XtrabackupServiceServer will +// result in compilation errors. +type UnsafeXtrabackupServiceServer interface { + mustEmbedUnimplementedXtrabackupServiceServer() +} + +func RegisterXtrabackupServiceServer(s grpc.ServiceRegistrar, srv XtrabackupServiceServer) { + // If the following call pancis, it indicates UnimplementedXtrabackupServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&XtrabackupService_ServiceDesc, srv) +} + +func _XtrabackupService_GetCurrentBackupConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCurrentBackupConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(XtrabackupServiceServer).GetCurrentBackupConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: XtrabackupService_GetCurrentBackupConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(XtrabackupServiceServer).GetCurrentBackupConfig(ctx, req.(*GetCurrentBackupConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _XtrabackupService_CreateBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(XtrabackupServiceServer).CreateBackup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: XtrabackupService_CreateBackup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(XtrabackupServiceServer).CreateBackup(ctx, req.(*CreateBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _XtrabackupService_DeleteBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(XtrabackupServiceServer).DeleteBackup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: XtrabackupService_DeleteBackup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(XtrabackupServiceServer).DeleteBackup(ctx, req.(*DeleteBackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// XtrabackupService_ServiceDesc is the grpc.ServiceDesc for XtrabackupService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var XtrabackupService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "api.XtrabackupService", + HandlerType: (*XtrabackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetCurrentBackupConfig", + Handler: _XtrabackupService_GetCurrentBackupConfig_Handler, + }, + { + MethodName: "CreateBackup", + Handler: _XtrabackupService_CreateBackup_Handler, + }, + { + MethodName: "DeleteBackup", + Handler: _XtrabackupService_DeleteBackup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "app.proto", +} diff --git a/pkg/xtrabackup/api/gen.go b/pkg/xtrabackup/api/gen.go new file mode 100644 index 0000000000..7a6cf9b254 --- /dev/null +++ b/pkg/xtrabackup/api/gen.go @@ -0,0 +1,3 @@ +package api + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative app.proto diff --git a/pkg/xtrabackup/server/app.go b/pkg/xtrabackup/server/app.go new file mode 100644 index 0000000000..202a31d44b --- /dev/null +++ b/pkg/xtrabackup/server/app.go @@ -0,0 +1,35 @@ +package server + +import ( + "context" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// DefaultPort is the default port for the app server. +const DefaultPort = 6450 + +type appServer struct { + api.UnimplementedXtrabackupServiceServer +} + +var _ api.XtrabackupServiceServer = (*appServer)(nil) + +// New returns a new app server. +func New() api.XtrabackupServiceServer { + return &appServer{} +} + +func (s *appServer) GetCurrentBackupConfig(ctx context.Context, req *api.GetCurrentBackupConfigRequest) (*api.BackupConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") +} + +func (s *appServer) CreateBackup(ctx context.Context, req *api.CreateBackupRequest) (*api.CreateBackupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") +} + +func (s *appServer) DeleteBackup(ctx context.Context, req *api.DeleteBackupRequest) (*api.DeleteBackupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") +} From 7b53a460b8cfb5d02d82f80d668b622fd11a8553 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 11:36:08 +0530 Subject: [PATCH 03/77] add sidecar to node sts when feature enabled Signed-off-by: Mayank Shah --- cmd/xtrabackup-server-sidecar/main.go | 8 ++--- pkg/apis/pxc/v1/pxc_types.go | 1 + pkg/features/features.go | 2 +- pkg/pxc/app/statefulset/haproxy.go | 4 +++ pkg/pxc/app/statefulset/node.go | 43 +++++++++++++++++++++++++++ pkg/pxc/app/statefulset/proxysql.go | 4 +++ pkg/pxc/statefulset.go | 8 +++++ 7 files changed, 65 insertions(+), 5 deletions(-) diff --git a/cmd/xtrabackup-server-sidecar/main.go b/cmd/xtrabackup-server-sidecar/main.go index cc7122b4d3..475a60937b 100644 --- a/cmd/xtrabackup-server-sidecar/main.go +++ b/cmd/xtrabackup-server-sidecar/main.go @@ -5,13 +5,13 @@ import ( "log" "net" - "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" - "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/server" + xbsidecarapi "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + xbsidecarserver "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/server" "google.golang.org/grpc" ) func main() { - lis, err := net.Listen("tcp", fmt.Sprintf(":%d", server.DefaultPort)) + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", xbsidecarserver.DefaultPort)) if err != nil { log.Fatalf("failed to listen: %v", err) } @@ -19,7 +19,7 @@ func main() { var serverOptions []grpc.ServerOption grpcServ := grpc.NewServer(serverOptions...) - api.RegisterXtrabackupServiceServer(grpcServ, server.New()) + xbsidecarapi.RegisterXtrabackupServiceServer(grpcServ, xbsidecarserver.New()) log.Printf("server listening at %v", lis.Addr()) if err := grpcServ.Serve(lis); err != nil { diff --git a/pkg/apis/pxc/v1/pxc_types.go b/pkg/apis/pxc/v1/pxc_types.go index bf9e0d1515..e20e254830 100644 --- a/pkg/apis/pxc/v1/pxc_types.go +++ b/pkg/apis/pxc/v1/pxc_types.go @@ -891,6 +891,7 @@ type App interface { SidecarContainers(spec *PodSpec, secrets string, cr *PerconaXtraDBCluster) ([]corev1.Container, error) PMMContainer(ctx context.Context, cl client.Client, spec *PMMSpec, secret *corev1.Secret, cr *PerconaXtraDBCluster) (*corev1.Container, error) LogCollectorContainer(spec *LogCollectorSpec, logPsecrets string, logRsecrets string, cr *PerconaXtraDBCluster) ([]corev1.Container, error) + XtrabackupContainer(ctx context.Context, cr *PerconaXtraDBCluster) (*corev1.Container, error) Volumes(podSpec *PodSpec, cr *PerconaXtraDBCluster, vg CustomVolumeGetter) (*Volume, error) Labels() map[string]string } diff --git a/pkg/features/features.go b/pkg/features/features.go index 8c60884f20..a2c0b49755 100644 --- a/pkg/features/features.go +++ b/pkg/features/features.go @@ -11,7 +11,7 @@ import ( const ( // BackupXtrabackup is a feature flag for the BackupXtrabackup feature - BackupXtrabackup = "BackupXtrabackup" + BackupXtrabackup featuregate.Feature = "BackupXtrabackup" ) // NewGate returns a new FeatureGate. diff --git a/pkg/pxc/app/statefulset/haproxy.go b/pkg/pxc/app/statefulset/haproxy.go index b6475ad12c..62b102ca25 100644 --- a/pkg/pxc/app/statefulset/haproxy.go +++ b/pkg/pxc/app/statefulset/haproxy.go @@ -204,6 +204,10 @@ func (c *HAProxy) AppContainer(spec *api.PodSpec, secrets string, cr *api.Percon return appc, nil } +func (c *HAProxy) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBCluster) (*corev1.Container, error) { + return nil, nil +} + func (c *HAProxy) SidecarContainers(spec *api.PodSpec, secrets string, cr *api.PerconaXtraDBCluster) ([]corev1.Container, error) { container := corev1.Container{ Name: "pxc-monit", diff --git a/pkg/pxc/app/statefulset/node.go b/pkg/pxc/app/statefulset/node.go index 089c69dfb1..ad1f440964 100644 --- a/pkg/pxc/app/statefulset/node.go +++ b/pkg/pxc/app/statefulset/node.go @@ -8,15 +8,18 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" app "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/config" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/server" ) const ( @@ -372,6 +375,46 @@ func (c *Node) LogCollectorContainer(spec *api.LogCollectorSpec, logPsecrets str return []corev1.Container{logProcContainer, logRotContainer}, nil } +func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBCluster) (*corev1.Container, error) { + if !features.Enabled(ctx, features.BackupXtrabackup) { + return nil, nil + } + container := &corev1.Container{ + Name: "xtrabackup", + Image: cr.Spec.Backup.Image, + ImagePullPolicy: cr.Spec.Backup.ImagePullPolicy, + Env: []corev1.EnvVar{ + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + }, + Command: []string{"/opt/percona/xtrabackup-server-sidecar"}, + Ports: []corev1.ContainerPort{ + { + Name: "grpc", + ContainerPort: server.DefaultPort, + }, + }, + // TODO: make this configurable from CR + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + }, + } + return container, nil +} + func (c *Node) PMMContainer(ctx context.Context, cl client.Client, spec *api.PMMSpec, secret *corev1.Secret, cr *api.PerconaXtraDBCluster) (*corev1.Container, error) { if cr.Spec.PMM == nil || !cr.Spec.PMM.Enabled { return nil, nil diff --git a/pkg/pxc/app/statefulset/proxysql.go b/pkg/pxc/app/statefulset/proxysql.go index 8eb7bb9fd5..b0db1b42fe 100644 --- a/pkg/pxc/app/statefulset/proxysql.go +++ b/pkg/pxc/app/statefulset/proxysql.go @@ -178,6 +178,10 @@ func (c *Proxy) AppContainer(spec *api.PodSpec, secrets string, cr *api.PerconaX return appc, nil } +func (c *Proxy) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBCluster) (*corev1.Container, error) { + return nil, nil +} + func (c *Proxy) SidecarContainers(spec *api.PodSpec, secrets string, cr *api.PerconaXtraDBCluster) ([]corev1.Container, error) { pxcMonit := corev1.Container{ Name: "pxc-monit", diff --git a/pkg/pxc/statefulset.go b/pkg/pxc/statefulset.go index 4b0169ab7e..87e4e38b00 100644 --- a/pkg/pxc/statefulset.go +++ b/pkg/pxc/statefulset.go @@ -64,6 +64,14 @@ func StatefulSet( return nil, errors.Wrap(err, "app container") } + xbC, err := sfs.XtrabackupContainer(ctx, cr) + if err != nil { + return nil, errors.Wrap(err, "xtrabackup container") + } + if xbC != nil { + pod.Containers = append(pod.Containers, *xbC) + } + pmmC, err := sfs.PMMContainer(ctx, cl, cr.Spec.PMM, secret, cr) if err != nil { log.Info(`"pmm container error"`, "secrets", cr.Spec.SecretsName, "internalSecrets", "internal-"+cr.Name, "error", err) From 22d1cb7f28e760fdb9f65819d75594da277a2496 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 14:12:59 +0530 Subject: [PATCH 04/77] update dockerfile Signed-off-by: Mayank Shah --- build/Dockerfile | 6 ++++++ build/pxc-init-entrypoint.sh | 1 + pkg/pxc/app/statefulset/node.go | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/build/Dockerfile b/build/Dockerfile index acec66a56c..f052b756f0 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -29,6 +29,11 @@ RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFL go build -o build/_output/bin/pitr \ cmd/pitr/main.go \ && cp -r build/_output/bin/pitr /usr/local/bin/pitr + +RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAGS \ + go build -o build/_output/bin/xtrabackup-server-sidecar \ + cmd/xtrabackup-server-sidecar/main.go \ + && cp -r build/_output/bin/xtrabackup-server-sidecar /usr/local/bin/xtrabackup-server-sidecar RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAGS \ go build -ldflags "-w -s -X main.GitCommit=$GIT_COMMIT -X main.GitBranch=$GIT_BRANCH -X main.BuildTime=$BUILD_TIME" \ @@ -55,6 +60,7 @@ COPY --from=go_builder /usr/local/bin/percona-xtradb-cluster-operator /usr/local COPY --from=go_builder /usr/local/bin/peer-list /peer-list COPY --from=go_builder /usr/local/bin/pitr /pitr COPY --from=go_builder /usr/local/bin/mysql-state-monitor /mysql-state-monitor +COPY --from=go_builder /usr/local/bin/xtrabackup-server-sidecar /xtrabackup-server-sidecar COPY build/pxc-entrypoint.sh /pxc-entrypoint.sh COPY build/pxc-init-entrypoint.sh /pxc-init-entrypoint.sh COPY build/pitr-init-entrypoint.sh /pitr-init-entrypoint.sh diff --git a/build/pxc-init-entrypoint.sh b/build/pxc-init-entrypoint.sh index 70cdcdfb34..a6ea680643 100755 --- a/build/pxc-init-entrypoint.sh +++ b/build/pxc-init-entrypoint.sh @@ -8,6 +8,7 @@ install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /pxc-configure-pxc.sh /var/lib/my install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /liveness-check.sh /var/lib/mysql/liveness-check.sh install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /readiness-check.sh /var/lib/mysql/readiness-check.sh install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /peer-list /var/lib/mysql/peer-list +install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /xtrabackup-server-sidecar /var/lib/mysql/xtrabackup-server-sidecar install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /get-pxc-state /var/lib/mysql/get-pxc-state install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /pmm-prerun.sh /var/lib/mysql/pmm-prerun.sh install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /mysql-state-monitor /var/lib/mysql/mysql-state-monitor diff --git a/pkg/pxc/app/statefulset/node.go b/pkg/pxc/app/statefulset/node.go index ad1f440964..c325728f43 100644 --- a/pkg/pxc/app/statefulset/node.go +++ b/pkg/pxc/app/statefulset/node.go @@ -393,7 +393,7 @@ func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBClu }, }, }, - Command: []string{"/opt/percona/xtrabackup-server-sidecar"}, + Command: []string{"/var/lib/mysql/xtrabackup-server-sidecar"}, Ports: []corev1.ContainerPort{ { Name: "grpc", From 42754fb4043fb5ee91a8206edb4e6031833cc306 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 14:13:12 +0530 Subject: [PATCH 05/77] typo Signed-off-by: Mayank Shah --- cmd/xtrabackup-server-sidecar/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/xtrabackup-server-sidecar/main.go b/cmd/xtrabackup-server-sidecar/main.go index 475a60937b..6b8335a505 100644 --- a/cmd/xtrabackup-server-sidecar/main.go +++ b/cmd/xtrabackup-server-sidecar/main.go @@ -1,4 +1,4 @@ -package xtrabackupserversidecar +package main import ( "fmt" From c0c71443d205256c2a21bf68f61703f4aa05b5c0 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 14:13:20 +0530 Subject: [PATCH 06/77] fix base context Signed-off-by: Mayank Shah --- cmd/manager/main.go | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index e4670efa57..a1bfabc7c1 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -90,6 +90,19 @@ func main() { os.Exit(1) } + fg := features.NewGate() + if err := fg.Set(os.Getenv("PXCO_FEATURE_GATES")); err != nil { + setupLog.Error(err, "failed to set feature gates") + os.Exit(1) + } + fgCtx := features.NewContextWithGate(context.Background(), fg) + setupLog.Info("Feature gates", + // These are set by the user + "PXCO_FEATURE_GATES", features.ShowAssigned(fgCtx), + // These are enabled, including features that are on by default + "enabled", features.ShowEnabled(fgCtx), + ) + options := ctrl.Options{ Scheme: scheme, Metrics: metricsServer.Options{ @@ -101,6 +114,9 @@ func main() { WebhookServer: ctrlWebhook.NewServer(ctrlWebhook.Options{ Port: 9443, }), + BaseContext: func() context.Context { + return features.NewContextWithGate(context.Background(), fg) + }, } err = configureGroupKindConcurrency(&options) @@ -159,19 +175,6 @@ func main() { ctx := k8s.StartStopSignalHandler(mgr.GetClient(), strings.Split(namespace, ",")) - fg := features.NewGate() - ctx = features.NewContextWithGate(ctx, fg) - if err := fg.Set(os.Getenv("PXCO_FEATURE_GATES")); err != nil { - setupLog.Error(err, "failed to set feature gates") - os.Exit(1) - } - setupLog.Info("Feature gates", - // These are set by the user - "PXCO_FEATURE_GATES", features.ShowAssigned(ctx), - // These are enabled, including features that are on by default - "enabled", features.ShowEnabled(ctx), - ) - if err := webhook.SetupWebhook(ctx, mgr); err != nil { setupLog.Error(err, "set up validation webhook") os.Exit(1) From 8a17a794f13f9b4fd0389247cebbc3bf35588df1 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 14:17:29 +0530 Subject: [PATCH 07/77] refactoring Signed-off-by: Mayank Shah --- build/Dockerfile | 2 +- cmd/{xtrabackup-server-sidecar => xtrabackup/server}/main.go | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename cmd/{xtrabackup-server-sidecar => xtrabackup/server}/main.go (100%) diff --git a/build/Dockerfile b/build/Dockerfile index f052b756f0..4f03d1e360 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -32,7 +32,7 @@ RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFL RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAGS \ go build -o build/_output/bin/xtrabackup-server-sidecar \ - cmd/xtrabackup-server-sidecar/main.go \ + cmd/xtrabackup/server/main.go \ && cp -r build/_output/bin/xtrabackup-server-sidecar /usr/local/bin/xtrabackup-server-sidecar RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAGS \ diff --git a/cmd/xtrabackup-server-sidecar/main.go b/cmd/xtrabackup/server/main.go similarity index 100% rename from cmd/xtrabackup-server-sidecar/main.go rename to cmd/xtrabackup/server/main.go From 51bd79f24fc0d921809941bc3bdd0eb26eb0f134 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 16:03:40 +0530 Subject: [PATCH 08/77] add run-backup script Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 182 ++++++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 cmd/xtrabackup/run-backup/main.go diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go new file mode 100644 index 0000000000..701fcb908d --- /dev/null +++ b/cmd/xtrabackup/run-backup/main.go @@ -0,0 +1,182 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "os" + "strings" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" +) + +func main() { + req, err := parseFlags() + if err != nil { + log.Fatal("Failed to parse flags: %w", err) + } + + serverHost, ok := os.LookupEnv("HOST") + if !ok { + log.Fatalf("HOST environment variable is not set") + } + + conn, err := grpc.NewClient(serverHost, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatal("Failed to connect to server: %w", err) + } + defer conn.Close() + + client := api.NewXtrabackupServiceClient(conn) + + _, err = client.CreateBackup(context.Background(), req) + if err != nil { + if status.Code(err) == codes.FailedPrecondition { + log.Fatal("Backup is already running") + } + log.Fatal("Failed to create backup: %w", err) + } + log.Println("Backup created successfully") +} + +func parseFlags() (*api.CreateBackupRequest, error) { + var ( + request = &api.CreateBackupRequest{ + BackupConfig: &api.BackupConfig{ + S3: &api.S3Config{}, + Gcs: &api.GCSConfig{}, + Azure: &api.AzureConfig{}, + ContainerOptions: &api.ContainerOptions{ + Env: []*api.EnvVar{}, + Args: &api.BackupContainerArgs{ + Xtrabackup: []string{}, + }, + }, + }, + } + backupType string + ) + + // Backup name + flag.StringVar(&request.BackupName, "backup-name", "", "Name of the backup") + + // BackupConfig fields + flag.StringVar(&request.BackupConfig.Destination, "destination", "", "Backup destination path") + flag.BoolVar(&request.BackupConfig.VerifyTls, "verify-tls", true, "Verify TLS certificates") + flag.StringVar(&backupType, "type", "", "Storage type: s3, azure, or gcs") + switch backupType { + case "s3": + request.BackupConfig.Type = api.BackupStorageType_S3 + case "azure": + request.BackupConfig.Type = api.BackupStorageType_AZURE + case "gcs": + request.BackupConfig.Type = api.BackupStorageType_GCS + default: + return nil, fmt.Errorf("invalid storage type: %s", backupType) + } + + // S3Config fields + flag.StringVar(&request.BackupConfig.S3.Bucket, "s3.bucket", "", "S3 bucket name") + flag.StringVar(&request.BackupConfig.S3.Region, "s3.region", "", "S3 region") + flag.StringVar(&request.BackupConfig.S3.EndpointUrl, "s3.endpoint", "", "S3 endpoint URL") + flag.StringVar(&request.BackupConfig.S3.AccessKey, "s3.access-key", "", "S3 access key") + flag.StringVar(&request.BackupConfig.S3.SecretKey, "s3.secret-key", "", "S3 secret key") + flag.StringVar(&request.BackupConfig.S3.StorageClass, "s3.storage-class", "", "S3 storage class") + + // GCSConfig fields + flag.StringVar(&request.BackupConfig.Gcs.Bucket, "gcs.bucket", "", "GCS bucket name") + flag.StringVar(&request.BackupConfig.Gcs.EndpointUrl, "gcs.endpoint", "", "GCS endpoint URL") + flag.StringVar(&request.BackupConfig.Gcs.StorageClass, "gcs.storage-class", "", "GCS storage class") + flag.StringVar(&request.BackupConfig.Gcs.AccessKey, "gcs.access-key", "", "GCS access key") + flag.StringVar(&request.BackupConfig.Gcs.SecretKey, "gcs.secret-key", "", "GCS secret key") + + // AzureConfig fields + flag.StringVar(&request.BackupConfig.Azure.ContainerName, "azure.container", "", "Azure container name") + flag.StringVar(&request.BackupConfig.Azure.EndpointUrl, "azure.endpoint", "", "Azure endpoint URL") + flag.StringVar(&request.BackupConfig.Azure.StorageClass, "azure.storage-class", "", "Azure storage class") + flag.StringVar(&request.BackupConfig.Azure.StorageAccount, "azure.storage-account", "", "Azure storage account") + flag.StringVar(&request.BackupConfig.Azure.AccessKey, "azure.access-key", "", "Azure access key") + + // ContainerOptions - environment variables (format: KEY=VALUE,KEY2=VALUE2) + var envVars string + flag.StringVar(&envVars, "env", "", "Environment variables as comma-separated KEY=VALUE pairs") + // ContainerOptions - xtrabackup args (comma-separated) + var xtrabackupArgs string + flag.StringVar(&xtrabackupArgs, "xtrabackup-args", "", "Xtrabackup arguments (comma-separated)") + var xbcloudArgs string + flag.StringVar(&xbcloudArgs, "xbcloud-args", "", "Xbcloud arguments (comma-separated)") + var xbstreamArgs string + flag.StringVar(&xbstreamArgs, "xbstream-args", "", "Xbstream arguments (comma-separated)") + + flag.Parse() + + // Parse ContainerOptions after flag parsing + if envVars != "" || xtrabackupArgs != "" || xbcloudArgs != "" || xbstreamArgs != "" { + request.BackupConfig.ContainerOptions = &api.ContainerOptions{} + + // Parse environment variables + if envVars != "" { + pairs := strings.Split(envVars, ",") + for _, pair := range pairs { + parts := strings.SplitN(strings.TrimSpace(pair), "=", 2) + if len(parts) == 2 { + request.BackupConfig.ContainerOptions.Env = append( + request.BackupConfig.ContainerOptions.Env, + &api.EnvVar{ + Key: strings.TrimSpace(parts[0]), + Value: strings.TrimSpace(parts[1]), + }, + ) + } + } + } + + // Parse container args + if xtrabackupArgs != "" || xbcloudArgs != "" || xbstreamArgs != "" { + request.BackupConfig.ContainerOptions.Args = &api.BackupContainerArgs{} + + if xtrabackupArgs != "" { + parts := strings.Split(xtrabackupArgs, ",") + request.BackupConfig.ContainerOptions.Args.Xtrabackup = make([]string, len(parts)) + for i, part := range parts { + request.BackupConfig.ContainerOptions.Args.Xtrabackup[i] = strings.TrimSpace(part) + } + } + + if xbcloudArgs != "" { + parts := strings.Split(xbcloudArgs, ",") + request.BackupConfig.ContainerOptions.Args.Xbcloud = make([]string, len(parts)) + for i, part := range parts { + request.BackupConfig.ContainerOptions.Args.Xbcloud[i] = strings.TrimSpace(part) + } + } + + if xbstreamArgs != "" { + parts := strings.Split(xbstreamArgs, ",") + request.BackupConfig.ContainerOptions.Args.Xbstream = make([]string, len(parts)) + for i, part := range parts { + request.BackupConfig.ContainerOptions.Args.Xbstream[i] = strings.TrimSpace(part) + } + } + } + } + + // Clean up empty nested configs + switch request.BackupConfig.Type { + case api.BackupStorageType_S3: + request.BackupConfig.Azure = nil + request.BackupConfig.Gcs = nil + case api.BackupStorageType_AZURE: + request.BackupConfig.S3 = nil + request.BackupConfig.Gcs = nil + case api.BackupStorageType_GCS: + request.BackupConfig.S3 = nil + request.BackupConfig.Azure = nil + } + return request, nil +} From 9291709a7da3cd9d8fb62e82266ef712b27e66d8 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 16:05:43 +0530 Subject: [PATCH 09/77] update Dockerfile Signed-off-by: Mayank Shah --- build/Dockerfile | 6 ++++++ build/backup-init-entrypoint.sh | 1 + 2 files changed, 7 insertions(+) diff --git a/build/Dockerfile b/build/Dockerfile index 4f03d1e360..ba5fcb7c47 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -34,6 +34,11 @@ RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFL go build -o build/_output/bin/xtrabackup-server-sidecar \ cmd/xtrabackup/server/main.go \ && cp -r build/_output/bin/xtrabackup-server-sidecar /usr/local/bin/xtrabackup-server-sidecar + +RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAGS \ + go build -o build/_output/bin/xtrabackup-run-backup \ + cmd/xtrabackup/run-backup/main.go \ + && cp -r build/_output/bin/xtrabackup-run-backup /usr/local/bin/xtrabackup-run-backup RUN GOOS=$GOOS GOARCH=${TARGETARCH} CGO_ENABLED=$CGO_ENABLED GO_LDFLAGS=$GO_LDFLAGS \ go build -ldflags "-w -s -X main.GitCommit=$GIT_COMMIT -X main.GitBranch=$GIT_BRANCH -X main.BuildTime=$BUILD_TIME" \ @@ -61,6 +66,7 @@ COPY --from=go_builder /usr/local/bin/peer-list /peer-list COPY --from=go_builder /usr/local/bin/pitr /pitr COPY --from=go_builder /usr/local/bin/mysql-state-monitor /mysql-state-monitor COPY --from=go_builder /usr/local/bin/xtrabackup-server-sidecar /xtrabackup-server-sidecar +COPY --from=go_builder /usr/local/bin/xtrabackup-run-backup /xtrabackup-run-backup COPY build/pxc-entrypoint.sh /pxc-entrypoint.sh COPY build/pxc-init-entrypoint.sh /pxc-init-entrypoint.sh COPY build/pitr-init-entrypoint.sh /pitr-init-entrypoint.sh diff --git a/build/backup-init-entrypoint.sh b/build/backup-init-entrypoint.sh index 84e414a828..e002b2af21 100755 --- a/build/backup-init-entrypoint.sh +++ b/build/backup-init-entrypoint.sh @@ -4,6 +4,7 @@ set -o errexit set -o xtrace install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /peer-list /opt/percona/peer-list +install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /xtrabackup-run-backup /opt/percona/xtrabackup-run-backup mkdir -p /opt/percona/backup/lib/pxc install -o "$(id -u)" -g "$(id -g)" -m 0755 -D /backup/lib/pxc/* /opt/percona/backup/lib/pxc/ From 6342784d1dbb3745dd19bfd2fad3acc5c40e481d Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 16:43:04 +0530 Subject: [PATCH 10/77] add volume mount Signed-off-by: Mayank Shah --- pkg/pxc/app/statefulset/node.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/pxc/app/statefulset/node.go b/pkg/pxc/app/statefulset/node.go index c325728f43..f2f163f642 100644 --- a/pkg/pxc/app/statefulset/node.go +++ b/pkg/pxc/app/statefulset/node.go @@ -400,6 +400,12 @@ func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBClu ContainerPort: server.DefaultPort, }, }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: app.DataVolumeName, + MountPath: "/var/lib/mysql", + }, + }, // TODO: make this configurable from CR Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ From af8bdf11caa9dec5ee9770f51ef476aff1b063d6 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 17:28:20 +0530 Subject: [PATCH 11/77] update run-backup Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 149 +++--------------------------- 1 file changed, 14 insertions(+), 135 deletions(-) diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index 701fcb908d..1c7d5b3c37 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -2,11 +2,10 @@ package main import ( "context" + "encoding/json" "flag" - "fmt" "log" "os" - "strings" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" "google.golang.org/grpc" @@ -16,9 +15,10 @@ import ( ) func main() { - req, err := parseFlags() - if err != nil { - log.Fatal("Failed to parse flags: %w", err) + + req := getRequestObject() + if req == nil { + log.Fatal("Failed to get request object") } serverHost, ok := os.LookupEnv("HOST") @@ -44,139 +44,18 @@ func main() { log.Println("Backup created successfully") } -func parseFlags() (*api.CreateBackupRequest, error) { - var ( - request = &api.CreateBackupRequest{ - BackupConfig: &api.BackupConfig{ - S3: &api.S3Config{}, - Gcs: &api.GCSConfig{}, - Azure: &api.AzureConfig{}, - ContainerOptions: &api.ContainerOptions{ - Env: []*api.EnvVar{}, - Args: &api.BackupContainerArgs{ - Xtrabackup: []string{}, - }, - }, - }, - } - backupType string - ) - - // Backup name - flag.StringVar(&request.BackupName, "backup-name", "", "Name of the backup") - - // BackupConfig fields - flag.StringVar(&request.BackupConfig.Destination, "destination", "", "Backup destination path") - flag.BoolVar(&request.BackupConfig.VerifyTls, "verify-tls", true, "Verify TLS certificates") - flag.StringVar(&backupType, "type", "", "Storage type: s3, azure, or gcs") - switch backupType { - case "s3": - request.BackupConfig.Type = api.BackupStorageType_S3 - case "azure": - request.BackupConfig.Type = api.BackupStorageType_AZURE - case "gcs": - request.BackupConfig.Type = api.BackupStorageType_GCS - default: - return nil, fmt.Errorf("invalid storage type: %s", backupType) - } - - // S3Config fields - flag.StringVar(&request.BackupConfig.S3.Bucket, "s3.bucket", "", "S3 bucket name") - flag.StringVar(&request.BackupConfig.S3.Region, "s3.region", "", "S3 region") - flag.StringVar(&request.BackupConfig.S3.EndpointUrl, "s3.endpoint", "", "S3 endpoint URL") - flag.StringVar(&request.BackupConfig.S3.AccessKey, "s3.access-key", "", "S3 access key") - flag.StringVar(&request.BackupConfig.S3.SecretKey, "s3.secret-key", "", "S3 secret key") - flag.StringVar(&request.BackupConfig.S3.StorageClass, "s3.storage-class", "", "S3 storage class") - - // GCSConfig fields - flag.StringVar(&request.BackupConfig.Gcs.Bucket, "gcs.bucket", "", "GCS bucket name") - flag.StringVar(&request.BackupConfig.Gcs.EndpointUrl, "gcs.endpoint", "", "GCS endpoint URL") - flag.StringVar(&request.BackupConfig.Gcs.StorageClass, "gcs.storage-class", "", "GCS storage class") - flag.StringVar(&request.BackupConfig.Gcs.AccessKey, "gcs.access-key", "", "GCS access key") - flag.StringVar(&request.BackupConfig.Gcs.SecretKey, "gcs.secret-key", "", "GCS secret key") - - // AzureConfig fields - flag.StringVar(&request.BackupConfig.Azure.ContainerName, "azure.container", "", "Azure container name") - flag.StringVar(&request.BackupConfig.Azure.EndpointUrl, "azure.endpoint", "", "Azure endpoint URL") - flag.StringVar(&request.BackupConfig.Azure.StorageClass, "azure.storage-class", "", "Azure storage class") - flag.StringVar(&request.BackupConfig.Azure.StorageAccount, "azure.storage-account", "", "Azure storage account") - flag.StringVar(&request.BackupConfig.Azure.AccessKey, "azure.access-key", "", "Azure access key") - - // ContainerOptions - environment variables (format: KEY=VALUE,KEY2=VALUE2) - var envVars string - flag.StringVar(&envVars, "env", "", "Environment variables as comma-separated KEY=VALUE pairs") - // ContainerOptions - xtrabackup args (comma-separated) - var xtrabackupArgs string - flag.StringVar(&xtrabackupArgs, "xtrabackup-args", "", "Xtrabackup arguments (comma-separated)") - var xbcloudArgs string - flag.StringVar(&xbcloudArgs, "xbcloud-args", "", "Xbcloud arguments (comma-separated)") - var xbstreamArgs string - flag.StringVar(&xbstreamArgs, "xbstream-args", "", "Xbstream arguments (comma-separated)") - +func getRequestObject() *api.CreateBackupRequest { + var rawB64Json string + flag.StringVar(&rawB64Json, "request-json", "", "Request JSON in base64 encoded string") flag.Parse() - // Parse ContainerOptions after flag parsing - if envVars != "" || xtrabackupArgs != "" || xbcloudArgs != "" || xbstreamArgs != "" { - request.BackupConfig.ContainerOptions = &api.ContainerOptions{} - - // Parse environment variables - if envVars != "" { - pairs := strings.Split(envVars, ",") - for _, pair := range pairs { - parts := strings.SplitN(strings.TrimSpace(pair), "=", 2) - if len(parts) == 2 { - request.BackupConfig.ContainerOptions.Env = append( - request.BackupConfig.ContainerOptions.Env, - &api.EnvVar{ - Key: strings.TrimSpace(parts[0]), - Value: strings.TrimSpace(parts[1]), - }, - ) - } - } - } - - // Parse container args - if xtrabackupArgs != "" || xbcloudArgs != "" || xbstreamArgs != "" { - request.BackupConfig.ContainerOptions.Args = &api.BackupContainerArgs{} - - if xtrabackupArgs != "" { - parts := strings.Split(xtrabackupArgs, ",") - request.BackupConfig.ContainerOptions.Args.Xtrabackup = make([]string, len(parts)) - for i, part := range parts { - request.BackupConfig.ContainerOptions.Args.Xtrabackup[i] = strings.TrimSpace(part) - } - } - - if xbcloudArgs != "" { - parts := strings.Split(xbcloudArgs, ",") - request.BackupConfig.ContainerOptions.Args.Xbcloud = make([]string, len(parts)) - for i, part := range parts { - request.BackupConfig.ContainerOptions.Args.Xbcloud[i] = strings.TrimSpace(part) - } - } - - if xbstreamArgs != "" { - parts := strings.Split(xbstreamArgs, ",") - request.BackupConfig.ContainerOptions.Args.Xbstream = make([]string, len(parts)) - for i, part := range parts { - request.BackupConfig.ContainerOptions.Args.Xbstream[i] = strings.TrimSpace(part) - } - } - } + if rawB64Json == "" { + log.Fatal("Backup config is required") } - // Clean up empty nested configs - switch request.BackupConfig.Type { - case api.BackupStorageType_S3: - request.BackupConfig.Azure = nil - request.BackupConfig.Gcs = nil - case api.BackupStorageType_AZURE: - request.BackupConfig.S3 = nil - request.BackupConfig.Gcs = nil - case api.BackupStorageType_GCS: - request.BackupConfig.S3 = nil - request.BackupConfig.Azure = nil + req := &api.CreateBackupRequest{} + if err := json.Unmarshal([]byte(rawB64Json), req); err != nil { + log.Fatal("Failed to unmarshal request JSON: %w", err) } - return request, nil + return req } From c8df8dd29facf72cf023635d3e45497600de493d Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 19:25:11 +0530 Subject: [PATCH 12/77] use server stream Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 57 ++++++++++++++++++++++------- pkg/xtrabackup/api/app.pb.go | 6 ++-- pkg/xtrabackup/api/app.proto | 2 +- pkg/xtrabackup/api/app_grpc.pb.go | 60 ++++++++++++++++--------------- pkg/xtrabackup/server/app.go | 8 +++-- 5 files changed, 87 insertions(+), 46 deletions(-) diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index 1c7d5b3c37..2aad0aabc1 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -2,8 +2,7 @@ package main import ( "context" - "encoding/json" - "flag" + "io" "log" "os" @@ -34,28 +33,62 @@ func main() { client := api.NewXtrabackupServiceClient(conn) - _, err = client.CreateBackup(context.Background(), req) + stream, err := client.CreateBackup(context.Background(), req) if err != nil { if status.Code(err) == codes.FailedPrecondition { log.Fatal("Backup is already running") } log.Fatal("Failed to create backup: %w", err) } + for { + _, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatal("Failed to receive response: %w", err) + } + } log.Println("Backup created successfully") } func getRequestObject() *api.CreateBackupRequest { - var rawB64Json string - flag.StringVar(&rawB64Json, "request-json", "", "Request JSON in base64 encoded string") - flag.Parse() - - if rawB64Json == "" { - log.Fatal("Backup config is required") + req := &api.CreateBackupRequest{ + BackupConfig: &api.BackupConfig{}, } - req := &api.CreateBackupRequest{} - if err := json.Unmarshal([]byte(rawB64Json), req); err != nil { - log.Fatal("Failed to unmarshal request JSON: %w", err) + req.BackupName = os.Getenv("BACKUP_NAME") + storageType := os.Getenv("STORAGE_TYPE") + switch storageType { + case "s3": + req.BackupConfig.Type = api.BackupStorageType_S3 + setS3Config(req) + case "azure": + req.BackupConfig.Type = api.BackupStorageType_AZURE + setAzureConfig(req) + default: + log.Fatalf("Invalid storage type: %s", storageType) } return req } + +func setS3Config(req *api.CreateBackupRequest) { + req.BackupConfig.S3 = &api.S3Config{ + Bucket: os.Getenv("S3_BUCKET"), + Region: os.Getenv("DEFAULT_REGION"), + EndpointUrl: os.Getenv("ENDPOINT"), + AccessKey: os.Getenv("ACCESS_KEY_ID"), + SecretKey: os.Getenv("SECRET_ACCESS_KEY"), + StorageClass: os.Getenv("S3_STORAGE_CLASS"), + } +} + +func setAzureConfig(req *api.CreateBackupRequest) { + req.BackupConfig.Azure = &api.AzureConfig{ + ContainerName: os.Getenv("AZURE_CONTAINER_NAME"), + EndpointUrl: os.Getenv("AZURE_ENDPOINT"), + StorageClass: os.Getenv("AZURE_STORAGE_CLASS"), + StorageAccount: os.Getenv("AZURE_STORAGE_ACCOUNT"), + AccessKey: os.Getenv("AZURE_ACCESS_KEY"), + } +} diff --git a/pkg/xtrabackup/api/app.pb.go b/pkg/xtrabackup/api/app.pb.go index 576659aa7a..be3b7c5012 100644 --- a/pkg/xtrabackup/api/app.pb.go +++ b/pkg/xtrabackup/api/app.pb.go @@ -841,10 +841,10 @@ const file_app_proto_rawDesc = "" + "\x11BackupStorageType\x12\x06\n" + "\x02S3\x10\x00\x12\t\n" + "\x05AZURE\x10\x01\x12\a\n" + - "\x03GCS\x10\x022\xee\x01\n" + + "\x03GCS\x10\x022\xf0\x01\n" + "\x11XtrabackupService\x12O\n" + - "\x16GetCurrentBackupConfig\x12\".api.GetCurrentBackupConfigRequest\x1a\x11.api.BackupConfig\x12C\n" + - "\fCreateBackup\x12\x18.api.CreateBackupRequest\x1a\x19.api.CreateBackupResponse\x12C\n" + + "\x16GetCurrentBackupConfig\x12\".api.GetCurrentBackupConfigRequest\x1a\x11.api.BackupConfig\x12E\n" + + "\fCreateBackup\x12\x18.api.CreateBackupRequest\x1a\x19.api.CreateBackupResponse0\x01\x12C\n" + "\fDeleteBackup\x12\x18.api.DeleteBackupRequest\x1a\x19.api.DeleteBackupResponseBGZEgithub.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/apib\x06proto3" var ( diff --git a/pkg/xtrabackup/api/app.proto b/pkg/xtrabackup/api/app.proto index ae6eac88f1..35d4f571e8 100644 --- a/pkg/xtrabackup/api/app.proto +++ b/pkg/xtrabackup/api/app.proto @@ -6,7 +6,7 @@ option go_package = "github.com/percona/percona-xtradb-cluster-operator/pkg/xtra service XtrabackupService { rpc GetCurrentBackupConfig(GetCurrentBackupConfigRequest) returns (BackupConfig); - rpc CreateBackup(CreateBackupRequest) returns (CreateBackupResponse); + rpc CreateBackup(CreateBackupRequest) returns (stream CreateBackupResponse); rpc DeleteBackup(DeleteBackupRequest) returns (DeleteBackupResponse); } diff --git a/pkg/xtrabackup/api/app_grpc.pb.go b/pkg/xtrabackup/api/app_grpc.pb.go index a62ff850f0..e097007523 100644 --- a/pkg/xtrabackup/api/app_grpc.pb.go +++ b/pkg/xtrabackup/api/app_grpc.pb.go @@ -29,7 +29,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type XtrabackupServiceClient interface { GetCurrentBackupConfig(ctx context.Context, in *GetCurrentBackupConfigRequest, opts ...grpc.CallOption) (*BackupConfig, error) - CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*CreateBackupResponse, error) + CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateBackupResponse], error) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*DeleteBackupResponse, error) } @@ -51,16 +51,25 @@ func (c *xtrabackupServiceClient) GetCurrentBackupConfig(ctx context.Context, in return out, nil } -func (c *xtrabackupServiceClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*CreateBackupResponse, error) { +func (c *xtrabackupServiceClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateBackupResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CreateBackupResponse) - err := c.cc.Invoke(ctx, XtrabackupService_CreateBackup_FullMethodName, in, out, cOpts...) + stream, err := c.cc.NewStream(ctx, &XtrabackupService_ServiceDesc.Streams[0], XtrabackupService_CreateBackup_FullMethodName, cOpts...) if err != nil { return nil, err } - return out, nil + x := &grpc.GenericClientStream[CreateBackupRequest, CreateBackupResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type XtrabackupService_CreateBackupClient = grpc.ServerStreamingClient[CreateBackupResponse] + func (c *xtrabackupServiceClient) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*DeleteBackupResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteBackupResponse) @@ -76,7 +85,7 @@ func (c *xtrabackupServiceClient) DeleteBackup(ctx context.Context, in *DeleteBa // for forward compatibility. type XtrabackupServiceServer interface { GetCurrentBackupConfig(context.Context, *GetCurrentBackupConfigRequest) (*BackupConfig, error) - CreateBackup(context.Context, *CreateBackupRequest) (*CreateBackupResponse, error) + CreateBackup(*CreateBackupRequest, grpc.ServerStreamingServer[CreateBackupResponse]) error DeleteBackup(context.Context, *DeleteBackupRequest) (*DeleteBackupResponse, error) mustEmbedUnimplementedXtrabackupServiceServer() } @@ -91,8 +100,8 @@ type UnimplementedXtrabackupServiceServer struct{} func (UnimplementedXtrabackupServiceServer) GetCurrentBackupConfig(context.Context, *GetCurrentBackupConfigRequest) (*BackupConfig, error) { return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") } -func (UnimplementedXtrabackupServiceServer) CreateBackup(context.Context, *CreateBackupRequest) (*CreateBackupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") +func (UnimplementedXtrabackupServiceServer) CreateBackup(*CreateBackupRequest, grpc.ServerStreamingServer[CreateBackupResponse]) error { + return status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") } func (UnimplementedXtrabackupServiceServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*DeleteBackupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") @@ -136,24 +145,17 @@ func _XtrabackupService_GetCurrentBackupConfig_Handler(srv interface{}, ctx cont return interceptor(ctx, in, info, handler) } -func _XtrabackupService_CreateBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateBackupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(XtrabackupServiceServer).CreateBackup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: XtrabackupService_CreateBackup_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(XtrabackupServiceServer).CreateBackup(ctx, req.(*CreateBackupRequest)) +func _XtrabackupService_CreateBackup_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CreateBackupRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - return interceptor(ctx, in, info, handler) + return srv.(XtrabackupServiceServer).CreateBackup(m, &grpc.GenericServerStream[CreateBackupRequest, CreateBackupResponse]{ServerStream: stream}) } +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type XtrabackupService_CreateBackupServer = grpc.ServerStreamingServer[CreateBackupResponse] + func _XtrabackupService_DeleteBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteBackupRequest) if err := dec(in); err != nil { @@ -183,15 +185,17 @@ var XtrabackupService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetCurrentBackupConfig", Handler: _XtrabackupService_GetCurrentBackupConfig_Handler, }, - { - MethodName: "CreateBackup", - Handler: _XtrabackupService_CreateBackup_Handler, - }, { MethodName: "DeleteBackup", Handler: _XtrabackupService_DeleteBackup_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "CreateBackup", + Handler: _XtrabackupService_CreateBackup_Handler, + ServerStreams: true, + }, + }, Metadata: "app.proto", } diff --git a/pkg/xtrabackup/server/app.go b/pkg/xtrabackup/server/app.go index 202a31d44b..ef0c7570e9 100644 --- a/pkg/xtrabackup/server/app.go +++ b/pkg/xtrabackup/server/app.go @@ -2,6 +2,7 @@ package server import ( "context" + "time" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" "google.golang.org/grpc/codes" @@ -26,8 +27,11 @@ func (s *appServer) GetCurrentBackupConfig(ctx context.Context, req *api.GetCurr return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") } -func (s *appServer) CreateBackup(ctx context.Context, req *api.CreateBackupRequest) (*api.CreateBackupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") +func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.XtrabackupService_CreateBackupServer) error { + // do some work, then send message + time.Sleep(120 * time.Second) + stream.Send(&api.CreateBackupResponse{}) + return nil } func (s *appServer) DeleteBackup(ctx context.Context, req *api.DeleteBackupRequest) (*api.DeleteBackupResponse, error) { From 26bab21762e7a7f5c83e1879bcc77982b3da0dfa Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 25 Nov 2025 19:26:10 +0530 Subject: [PATCH 13/77] implement backup job Signed-off-by: Mayank Shah --- pkg/controller/pxcbackup/controller.go | 21 +++-- pkg/pxc/backup/job.go | 111 ++++++++++++++++++++----- 2 files changed, 104 insertions(+), 28 deletions(-) diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 172d76b013..c6d3f0293c 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -27,6 +27,7 @@ import ( "github.com/percona/percona-xtradb-cluster-operator/clientcmd" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/binlogcollector" @@ -290,9 +291,17 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( if err != nil { return nil, errors.Wrap(err, "failed to get initImage") } - job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, initImage) - if err != nil { - return nil, errors.Wrap(err, "can't create job spec") + + if features.Enabled(ctx, features.BackupXtrabackup) { + job.Spec, err = bcp.JobSpecXtrabackup(cr.Spec, cluster, job, initImage) + if err != nil { + return nil, errors.Wrap(err, "can't create job spec for xtrabackup") + } + } else { + job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, initImage) + if err != nil { + return nil, errors.Wrap(err, "can't create job spec") + } } switch storage.Type { @@ -319,7 +328,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( return nil, errors.Wrap(err, "get backup pvc") } - err := backup.SetStoragePVC(&job.Spec, cr, pvc.Name) + err := backup.SetStoragePVC(ctx, &job.Spec, cr, pvc.Name) if err != nil { return nil, errors.Wrap(err, "set storage FS") } @@ -329,7 +338,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( } cr.Status.Destination.SetS3Destination(storage.S3.Bucket, cr.Spec.PXCCluster+"-"+cr.CreationTimestamp.Time.Format("2006-01-02-15:04:05")+"-full") - err := backup.SetStorageS3(&job.Spec, cr) + err := backup.SetStorageS3(ctx, &job.Spec, cr) if err != nil { return nil, errors.Wrap(err, "set storage FS") } @@ -339,7 +348,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( } cr.Status.Destination.SetAzureDestination(storage.Azure.ContainerPath, cr.Spec.PXCCluster+"-"+cr.CreationTimestamp.Time.Format("2006-01-02-15:04:05")+"-full") - err := backup.SetStorageAzure(&job.Spec, cr) + err := backup.SetStorageAzure(ctx, &job.Spec, cr) if err != nil { return nil, errors.Wrap(err, "set storage FS for Azure") } diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index 5ca33b6c9b..af41e7df4b 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -1,6 +1,7 @@ package backup import ( + "context" "path" "strconv" @@ -10,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" @@ -36,6 +38,65 @@ func (*Backup) Job(cr *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraD } } +func (bcp *Backup) JobSpecXtrabackup(spec api.PXCBackupSpec, cluster *api.PerconaXtraDBCluster, job *batchv1.Job, initImage string) (batchv1.JobSpec, error) { + var volumeMounts []corev1.VolumeMount + var volumes []corev1.Volume + volumes = append(volumes, + corev1.Volume{ + Name: app.BinVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + ) + + volumeMounts = append(volumeMounts, + corev1.VolumeMount{ + Name: app.BinVolumeName, + MountPath: app.BinVolumeMountPath, + }, + ) + + storage := cluster.Spec.Backup.Storages[spec.StorageName] + var initContainers []corev1.Container + initContainers = append(initContainers, statefulset.BackupInitContainer(cluster, initImage, storage.ContainerSecurityContext)) + + container := corev1.Container{ + Name: "xtrabackup", + Image: bcp.image, + SecurityContext: storage.ContainerSecurityContext, + ImagePullPolicy: bcp.imagePullPolicy, + Command: []string{"/opt/percona/xtrabackup-run-backup"}, + Resources: storage.Resources, + VolumeMounts: volumeMounts, + } + return batchv1.JobSpec{ + ActiveDeadlineSeconds: spec.ActiveDeadlineSeconds, + Selector: &metav1.LabelSelector{ + MatchLabels: job.Labels, + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + container, + }, + Volumes: volumes, + InitContainers: initContainers, + SecurityContext: storage.PodSecurityContext, + ImagePullSecrets: bcp.imagePullSecrets, + ServiceAccountName: cluster.Spec.Backup.ServiceAccountName, + Affinity: storage.Affinity, + TopologySpreadConstraints: pxc.PodTopologySpreadConstraints(storage.TopologySpreadConstraints, job.Labels), + Tolerations: storage.Tolerations, + NodeSelector: storage.NodeSelector, + SchedulerName: storage.SchedulerName, + PriorityClassName: storage.PriorityClassName, + RuntimeClassName: storage.RuntimeClassName, + }, + }, + }, nil +} + func (bcp *Backup) JobSpec(spec api.PXCBackupSpec, cluster *api.PerconaXtraDBCluster, job *batchv1.Job, initImage string) (batchv1.JobSpec, error) { manualSelector := true backoffLimit := int32(10) @@ -201,7 +262,7 @@ func appendStorageSecret(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBacku return nil } -func SetStoragePVC(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup, volName string) error { +func SetStoragePVC(ctx context.Context, job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup, volName string) error { pvc := corev1.Volume{ Name: "xtrabackup", } @@ -224,15 +285,17 @@ func SetStoragePVC(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup, vol pvc, }...) - err := appendStorageSecret(job, cr) - if err != nil { - return errors.Wrap(err, "failed to append storage secret") + if !features.Enabled(ctx, features.BackupXtrabackup) { + err := appendStorageSecret(job, cr) + if err != nil { + return errors.Wrap(err, "failed to append storage secret") + } } return nil } -func SetStorageAzure(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) error { +func SetStorageAzure(ctx context.Context, job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) error { if cr.Status.Azure == nil { return errors.New("azure storage is not specified in backup status") } @@ -276,16 +339,18 @@ func SetStorageAzure(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) e } job.Template.Spec.Containers[0].Env = append(job.Template.Spec.Containers[0].Env, storageAccount, accessKey, containerName, endpoint, storageClass, backupPath) - // add SSL volumes - err := appendStorageSecret(job, cr) - if err != nil { - return errors.Wrap(err, "failed to append storage secrets") + if !features.Enabled(ctx, features.BackupXtrabackup) { + // add SSL volumes + err := appendStorageSecret(job, cr) + if err != nil { + return errors.Wrap(err, "failed to append storage secrets") + } } return nil } -func SetStorageS3(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) error { +func SetStorageS3(ctx context.Context, job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) error { if cr.Status.S3 == nil { return errors.New("s3 storage is not specified in backup status") } @@ -340,19 +405,21 @@ func SetStorageS3(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) erro } job.Template.Spec.Containers[0].Env = append(job.Template.Spec.Containers[0].Env, bucketEnv, bucketPathEnv) - // add SSL volumes - err := appendStorageSecret(job, cr) - if err != nil { - return errors.Wrap(err, "failed to append storage secrets") - } + if !features.Enabled(ctx, features.BackupXtrabackup) { + // add SSL volumes + err := appendStorageSecret(job, cr) + if err != nil { + return errors.Wrap(err, "failed to append storage secrets") + } - // add ca bundle (this is used by the aws-cli to verify the connection to S3) - if sel := s3.CABundle; sel != nil { - appendCABundleSecretVolume( - &job.Template.Spec.Volumes, - &job.Template.Spec.Containers[0].VolumeMounts, - sel, - ) + // add ca bundle (this is used by the aws-cli to verify the connection to S3) + if sel := s3.CABundle; sel != nil { + appendCABundleSecretVolume( + &job.Template.Spec.Volumes, + &job.Template.Spec.Containers[0].VolumeMounts, + sel, + ) + } } return nil From 3957c44913ecff83227d97507119640aeef8569d Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 26 Nov 2025 14:27:17 +0530 Subject: [PATCH 14/77] refactor getPrimaryPod func Signed-off-by: Mayank Shah --- pkg/controller/pxc/replication.go | 18 +---- pkg/controller/pxc/upgrade.go | 112 +---------------------------- pkg/controller/pxc/version.go | 2 +- pkg/k8s/cluster.go | 113 ++++++++++++++++++++++++++++++ pkg/k8s/utils.go | 12 ++++ 5 files changed, 132 insertions(+), 125 deletions(-) diff --git a/pkg/controller/pxc/replication.go b/pkg/controller/pxc/replication.go index 487b62cb94..78affdb2d3 100644 --- a/pkg/controller/pxc/replication.go +++ b/pkg/controller/pxc/replication.go @@ -16,6 +16,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/queries" @@ -120,12 +121,12 @@ func (r *ReconcilePerconaXtraDBCluster) reconcileReplication(ctx context.Context // connect to failed/pending pods podList := make([]corev1.Pod, 0) for _, pod := range listRaw.Items { - if isPodReady(pod) { + if k8s.IsPodReady(pod) { podList = append(podList, pod) } } - primary, err := r.getPrimaryPod(ctx, cr) + primary, err := k8s.GetPrimaryPod(ctx, r.client, cr) if err != nil { return errors.Wrap(err, "get primary pxc pod") } @@ -568,19 +569,6 @@ func NewExposedPXCService(svcName string, cr *api.PerconaXtraDBCluster) *corev1. return svc } -// isPodReady returns a boolean reflecting if a pod is in a "ready" state -func isPodReady(pod corev1.Pod) bool { - for _, condition := range pod.Status.Conditions { - if condition.Status != corev1.ConditionTrue { - continue - } - if condition.Type == corev1.PodReady { - return true - } - } - return false -} - func currentReplicaConfig(name string, status *api.ReplicationStatus) api.ReplicationChannelConfig { res := api.ReplicationChannelConfig{} if status == nil { diff --git a/pkg/controller/pxc/upgrade.go b/pkg/controller/pxc/upgrade.go index f5f59b83d0..917df8fd71 100644 --- a/pkg/controller/pxc/upgrade.go +++ b/pkg/controller/pxc/upgrade.go @@ -4,7 +4,6 @@ import ( "context" "crypto/md5" "encoding/json" - stdErrors "errors" "fmt" "sort" "strconv" @@ -26,14 +25,11 @@ import ( "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" - "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/queries" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" "github.com/percona/percona-xtradb-cluster-operator/pkg/util" ) -var NoProxyDetectedError = errors.New("can't detect enabled proxy, please enable HAProxy or ProxySQL") - func (r *ReconcilePerconaXtraDBCluster) updatePod( ctx context.Context, sfs api.StatefulApp, @@ -249,7 +245,7 @@ func (r *ReconcilePerconaXtraDBCluster) smartUpdate(ctx context.Context, sfs api return nil } - primary, err := r.getPrimaryPod(ctx, cr) + primary, err := k8s.GetPrimaryPod(ctx, r.client, cr) if err != nil { return errors.Wrap(err, "get primary pod") } @@ -342,7 +338,7 @@ func (r *ReconcilePerconaXtraDBCluster) waitHostgroups(ctx context.Context, cr * return nil } - database, err := r.connectProxy(cr) + database, err := k8s.GetProxyConnection(cr, r.client) if err != nil { return errors.Wrap(err, "failed to get proxySQL db") } @@ -371,7 +367,7 @@ func (r *ReconcilePerconaXtraDBCluster) waitUntilOnline(ctx context.Context, cr return nil } - database, err := r.connectProxy(cr) + database, err := k8s.GetProxyConnection(cr, r.client) if err != nil { return errors.Wrap(err, "failed to get proxySQL db") } @@ -431,108 +427,6 @@ func retry(in, limit time.Duration, f func() (bool, error)) error { } } -// connectProxy returns a new connection through the proxy (ProxySQL or HAProxy) -func (r *ReconcilePerconaXtraDBCluster) connectProxy(cr *api.PerconaXtraDBCluster) (queries.Database, error) { - var database queries.Database - var user, host string - var port, proxySize int32 - - if cr.ProxySQLEnabled() { - user = users.ProxyAdmin - host = fmt.Sprintf("%s-proxysql-unready.%s", cr.ObjectMeta.Name, cr.Namespace) - proxySize = cr.Spec.ProxySQL.Size - port = 6032 - } else if cr.HAProxyEnabled() { - user = users.Monitor - host = fmt.Sprintf("%s-haproxy.%s", cr.Name, cr.Namespace) - proxySize = cr.Spec.HAProxy.Size - - hasKey, err := cr.ConfigHasKey("mysqld", "proxy_protocol_networks") - if err != nil { - return database, errors.Wrap(err, "check if config has proxy_protocol_networks key") - } - - port = 3306 - if hasKey && cr.CompareVersionWith("1.6.0") >= 0 { - port = 33062 - } - } else { - return database, NoProxyDetectedError - } - - secrets := cr.Spec.SecretsName - if cr.CompareVersionWith("1.6.0") >= 0 { - secrets = "internal-" + cr.Name - } - - for i := 0; ; i++ { - db, err := queries.New(r.client, cr.Namespace, secrets, user, host, port, cr.Spec.PXC.ReadinessProbes.TimeoutSeconds) - if err != nil && i < int(proxySize) { - time.Sleep(time.Second) - } else if err != nil && i == int(proxySize) { - return database, err - } else { - database = db - break - } - } - - return database, nil -} - -func (r *ReconcilePerconaXtraDBCluster) getPrimaryPod(ctx context.Context, cr *api.PerconaXtraDBCluster) (string, error) { - conn, err := r.connectProxy(cr) - if err != nil { - if errors.Is(err, NoProxyDetectedError) && cr.Spec.PXC.Size == 1 { - firstReadyPod := func() (string, error) { - sts := statefulset.NewNode(cr) - - podList := new(corev1.PodList) - if err := r.client.List(ctx, podList, &client.ListOptions{ - Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(sts.Labels()), - }); err != nil { - return "", errors.Wrap(err, "get pod list") - } - - readyPods := make([]corev1.Pod, 0) - for _, pod := range podList.Items { - if isPodReady(pod) { - readyPods = append(readyPods, pod) - } - } - if len(readyPods) == 0 { - return "", errors.New("no ready pxc pods") - } - if len(readyPods) != int(cr.Spec.PXC.Size) { - return "", errors.New("waiting for pxc resize") - } - - return readyPods[0].Status.PodIP, nil - } - host, rerr := firstReadyPod() - if rerr == nil { - return host, nil - } - - err = stdErrors.Join(rerr, err) - } - return "", errors.Wrap(err, "failed to get proxy connection") - } - defer conn.Close() - - if cr.HAProxyEnabled() { - host, err := conn.Hostname() - if err != nil { - return "", err - } - - return host, nil - } - - return conn.PrimaryHost() -} - func (r *ReconcilePerconaXtraDBCluster) waitPXCSynced(cr *api.PerconaXtraDBCluster, host string, waitLimit int) error { secrets := cr.Spec.SecretsName port := int32(3306) diff --git a/pkg/controller/pxc/version.go b/pkg/controller/pxc/version.go index d0cc741ca0..f34aee5eeb 100644 --- a/pkg/controller/pxc/version.go +++ b/pkg/controller/pxc/version.go @@ -452,7 +452,7 @@ func (r *ReconcilePerconaXtraDBCluster) mysqlVersion(ctx context.Context, cr *ap } for _, pod := range list.Items { - if !isPodReady(pod) { + if !k8s.IsPodReady(pod) { continue } diff --git a/pkg/k8s/cluster.go b/pkg/k8s/cluster.go index ee9b869d23..5edef7883d 100644 --- a/pkg/k8s/cluster.go +++ b/pkg/k8s/cluster.go @@ -2,9 +2,12 @@ package k8s import ( "context" + "fmt" "strings" "time" + stdErrors "errors" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -13,6 +16,8 @@ import ( api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/queries" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" ) func PauseCluster(ctx context.Context, cl client.Client, cr *api.PerconaXtraDBCluster) (bool, error) { @@ -142,6 +147,114 @@ func PauseClusterWithWait(ctx context.Context, cl client.Client, cr *api.Percona return nil } +var NoProxyDetectedError = errors.New("can't detect enabled proxy, please enable HAProxy or ProxySQL") + +// GetPrimaryPod returns the IP/host of the primary pod for the given cluster +func GetPrimaryPod( + ctx context.Context, + cl client.Client, + cr *api.PerconaXtraDBCluster) (string, error) { + conn, err := GetProxyConnection(cr, cl) + if err != nil { + if errors.Is(err, NoProxyDetectedError) && cr.Spec.PXC.Size == 1 { + firstReadyPod := func() (string, error) { + sts := statefulset.NewNode(cr) + + podList := new(corev1.PodList) + if err := cl.List(ctx, podList, &client.ListOptions{ + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(sts.Labels()), + }); err != nil { + return "", errors.Wrap(err, "get pod list") + } + + readyPods := make([]corev1.Pod, 0) + for _, pod := range podList.Items { + if IsPodReady(pod) { + readyPods = append(readyPods, pod) + } + } + if len(readyPods) == 0 { + return "", errors.New("no ready pxc pods") + } + if len(readyPods) != int(cr.Spec.PXC.Size) { + return "", errors.New("waiting for pxc resize") + } + + return readyPods[0].Status.PodIP, nil + } + host, rerr := firstReadyPod() + if rerr == nil { + return host, nil + } + + err = stdErrors.Join(rerr, err) + } + return "", errors.Wrap(err, "failed to get proxy connection") + } + defer conn.Close() + + if cr.HAProxyEnabled() { + host, err := conn.Hostname() + if err != nil { + return "", err + } + + return host, nil + } + + return conn.PrimaryHost() +} + +// GetProxyConnection returns a new connection through the proxy (ProxySQL or HAProxy) +func GetProxyConnection(cr *api.PerconaXtraDBCluster, cl client.Client) (queries.Database, error) { + var database queries.Database + var user, host string + var port, proxySize int32 + + if cr.ProxySQLEnabled() { + user = users.ProxyAdmin + host = fmt.Sprintf("%s-proxysql-unready.%s", cr.ObjectMeta.Name, cr.Namespace) + proxySize = cr.Spec.ProxySQL.Size + port = 6032 + } else if cr.HAProxyEnabled() { + user = users.Monitor + host = fmt.Sprintf("%s-haproxy.%s", cr.Name, cr.Namespace) + proxySize = cr.Spec.HAProxy.Size + + hasKey, err := cr.ConfigHasKey("mysqld", "proxy_protocol_networks") + if err != nil { + return database, errors.Wrap(err, "check if config has proxy_protocol_networks key") + } + + port = 3306 + if hasKey && cr.CompareVersionWith("1.6.0") >= 0 { + port = 33062 + } + } else { + return database, NoProxyDetectedError + } + + secrets := cr.Spec.SecretsName + if cr.CompareVersionWith("1.6.0") >= 0 { + secrets = "internal-" + cr.Name + } + + for i := 0; ; i++ { + db, err := queries.New(cl, cr.Namespace, secrets, user, host, port, cr.Spec.PXC.ReadinessProbes.TimeoutSeconds) + if err != nil && i < int(proxySize) { + time.Sleep(time.Second) + } else if err != nil && i == int(proxySize) { + return database, err + } else { + database = db + break + } + } + + return database, nil +} + func waitForPodsShutdown(ctx context.Context, cl client.Client, ls map[string]string, namespace string, gracePeriodSec int64) error { for i := int64(0); i < waitLimitSec+gracePeriodSec; i++ { pods := corev1.PodList{} diff --git a/pkg/k8s/utils.go b/pkg/k8s/utils.go index 8a4dd6d90d..050a1deab1 100644 --- a/pkg/k8s/utils.go +++ b/pkg/k8s/utils.go @@ -56,6 +56,18 @@ func GetInitImage(ctx context.Context, cr *api.PerconaXtraDBCluster, cli client. return imageName, nil } +func IsPodReady(pod corev1.Pod) bool { + for _, condition := range pod.Status.Conditions { + if condition.Status != corev1.ConditionTrue { + continue + } + if condition.Type == corev1.PodReady { + return true + } + } + return false +} + func operatorImageName(operatorPod *corev1.Pod) (string, error) { for _, c := range operatorPod.Spec.Containers { if c.Name == "percona-xtradb-cluster-operator" { From c64dd5cfe9ca55b13c6d6eeff73991e92e1d1b71 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 26 Nov 2025 14:34:39 +0530 Subject: [PATCH 15/77] add HOST env var to backup job Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 23 ++++++++++-------- pkg/controller/pxcbackup/controller.go | 2 +- pkg/controller/pxcbackup/deadline_test.go | 10 ++++---- pkg/pxc/backup/backup.go | 5 +++- pkg/pxc/backup/job.go | 29 ++++++++++++++++++++++- 5 files changed, 51 insertions(+), 18 deletions(-) diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index 2aad0aabc1..ce7c2f747c 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -2,11 +2,14 @@ package main import ( "context" + "fmt" "io" "log" "os" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + xbscapi "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + xbscserver "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/server" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" @@ -25,13 +28,13 @@ func main() { log.Fatalf("HOST environment variable is not set") } - conn, err := grpc.NewClient(serverHost, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(fmt.Sprintf("%s:%d", serverHost, xbscserver.DefaultPort), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatal("Failed to connect to server: %w", err) } defer conn.Close() - client := api.NewXtrabackupServiceClient(conn) + client := xbscapi.NewXtrabackupServiceClient(conn) stream, err := client.CreateBackup(context.Background(), req) if err != nil { @@ -52,8 +55,8 @@ func main() { log.Println("Backup created successfully") } -func getRequestObject() *api.CreateBackupRequest { - req := &api.CreateBackupRequest{ +func getRequestObject() *xbscapi.CreateBackupRequest { + req := &xbscapi.CreateBackupRequest{ BackupConfig: &api.BackupConfig{}, } @@ -61,10 +64,10 @@ func getRequestObject() *api.CreateBackupRequest { storageType := os.Getenv("STORAGE_TYPE") switch storageType { case "s3": - req.BackupConfig.Type = api.BackupStorageType_S3 + req.BackupConfig.Type = xbscapi.BackupStorageType_S3 setS3Config(req) case "azure": - req.BackupConfig.Type = api.BackupStorageType_AZURE + req.BackupConfig.Type = xbscapi.BackupStorageType_AZURE setAzureConfig(req) default: log.Fatalf("Invalid storage type: %s", storageType) @@ -72,8 +75,8 @@ func getRequestObject() *api.CreateBackupRequest { return req } -func setS3Config(req *api.CreateBackupRequest) { - req.BackupConfig.S3 = &api.S3Config{ +func setS3Config(req *xbscapi.CreateBackupRequest) { + req.BackupConfig.S3 = &xbscapi.S3Config{ Bucket: os.Getenv("S3_BUCKET"), Region: os.Getenv("DEFAULT_REGION"), EndpointUrl: os.Getenv("ENDPOINT"), @@ -83,8 +86,8 @@ func setS3Config(req *api.CreateBackupRequest) { } } -func setAzureConfig(req *api.CreateBackupRequest) { - req.BackupConfig.Azure = &api.AzureConfig{ +func setAzureConfig(req *xbscapi.CreateBackupRequest) { + req.BackupConfig.Azure = &xbscapi.AzureConfig{ ContainerName: os.Getenv("AZURE_CONTAINER_NAME"), EndpointUrl: os.Getenv("AZURE_ENDPOINT"), StorageClass: os.Getenv("AZURE_STORAGE_CLASS"), diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index c6d3f0293c..6355e39b95 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -285,7 +285,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( ) (*batchv1.Job, error) { log := logf.FromContext(ctx) - bcp := backup.New(cluster) + bcp := backup.New(cluster, r.client) job := bcp.Job(cr, cluster) initImage, err := k8s.GetInitImage(ctx, cluster, r.client) if err != nil { diff --git a/pkg/controller/pxcbackup/deadline_test.go b/pkg/controller/pxcbackup/deadline_test.go index 9d5f29c1e9..19c876afea 100644 --- a/pkg/controller/pxcbackup/deadline_test.go +++ b/pkg/controller/pxcbackup/deadline_test.go @@ -106,7 +106,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster) + bcp := backup.New(cluster, buildFakeClient()) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -134,7 +134,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster) + bcp := backup.New(cluster, buildFakeClient()) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -162,7 +162,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster) + bcp := backup.New(cluster, buildFakeClient()) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -190,7 +190,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster) + bcp := backup.New(cluster, buildFakeClient()) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -226,7 +226,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster) + bcp := backup.New(cluster, buildFakeClient()) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") diff --git a/pkg/pxc/backup/backup.go b/pkg/pxc/backup/backup.go index 65bff1666d..b07527da05 100644 --- a/pkg/pxc/backup/backup.go +++ b/pkg/pxc/backup/backup.go @@ -4,6 +4,7 @@ import ( corev1 "k8s.io/api/core/v1" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) type Backup struct { @@ -13,9 +14,10 @@ type Backup struct { imagePullSecrets []corev1.LocalObjectReference imagePullPolicy corev1.PullPolicy serviceAccountName string + k8sClient client.Client } -func New(cr *api.PerconaXtraDBCluster) *Backup { +func New(cr *api.PerconaXtraDBCluster, cl client.Client) *Backup { return &Backup{ cluster: cr.Name, namespace: cr.Namespace, @@ -23,5 +25,6 @@ func New(cr *api.PerconaXtraDBCluster) *Backup { imagePullSecrets: cr.Spec.Backup.ImagePullSecrets, imagePullPolicy: cr.Spec.Backup.ImagePullPolicy, serviceAccountName: cr.Spec.Backup.ServiceAccountName, + k8sClient: cl, } } diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index af41e7df4b..fc326e2336 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -2,6 +2,7 @@ package backup import ( "context" + "fmt" "path" "strconv" @@ -12,6 +13,7 @@ import ( api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" "github.com/percona/percona-xtradb-cluster-operator/pkg/features" + "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" @@ -38,7 +40,12 @@ func (*Backup) Job(cr *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraD } } -func (bcp *Backup) JobSpecXtrabackup(spec api.PXCBackupSpec, cluster *api.PerconaXtraDBCluster, job *batchv1.Job, initImage string) (batchv1.JobSpec, error) { +func (bcp *Backup) JobSpecXtrabackup( + spec api.PXCBackupSpec, + cluster *api.PerconaXtraDBCluster, + job *batchv1.Job, + initImage string, +) (batchv1.JobSpec, error) { var volumeMounts []corev1.VolumeMount var volumes []corev1.Volume volumes = append(volumes, @@ -61,6 +68,11 @@ func (bcp *Backup) JobSpecXtrabackup(spec api.PXCBackupSpec, cluster *api.Percon var initContainers []corev1.Container initContainers = append(initContainers, statefulset.BackupInitContainer(cluster, initImage, storage.ContainerSecurityContext)) + envs, err := bcp.xtrabackupJobEnvVars(cluster) + if err != nil { + return batchv1.JobSpec{}, fmt.Errorf("failed to get xtrabackup job env vars: %w", err) + } + container := corev1.Container{ Name: "xtrabackup", Image: bcp.image, @@ -69,6 +81,7 @@ func (bcp *Backup) JobSpecXtrabackup(spec api.PXCBackupSpec, cluster *api.Percon Command: []string{"/opt/percona/xtrabackup-run-backup"}, Resources: storage.Resources, VolumeMounts: volumeMounts, + Env: envs, } return batchv1.JobSpec{ ActiveDeadlineSeconds: spec.ActiveDeadlineSeconds, @@ -97,6 +110,20 @@ func (bcp *Backup) JobSpecXtrabackup(spec api.PXCBackupSpec, cluster *api.Percon }, nil } +func (bcp *Backup) xtrabackupJobEnvVars(cluster *api.PerconaXtraDBCluster) ([]corev1.EnvVar, error) { + primary, err := k8s.GetPrimaryPod(context.Background(), bcp.k8sClient, cluster) + if err != nil { + return nil, errors.Wrap(err, "failed to get primary pod") + } + envs := []corev1.EnvVar{ + { + Name: "HOST", + Value: primary, + }, + } + return envs, nil +} + func (bcp *Backup) JobSpec(spec api.PXCBackupSpec, cluster *api.PerconaXtraDBCluster, job *batchv1.Job, initImage string) (batchv1.JobSpec, error) { manualSelector := true backoffLimit := int32(10) From 354993f6d124d9f63cd2436b83ec96be55bd945f Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 26 Nov 2025 15:00:21 +0530 Subject: [PATCH 16/77] add logging Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index ce7c2f747c..e70ac9b77f 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -28,12 +28,15 @@ func main() { log.Fatalf("HOST environment variable is not set") } - conn, err := grpc.NewClient(fmt.Sprintf("%s:%d", serverHost, xbscserver.DefaultPort), grpc.WithTransportCredentials(insecure.NewCredentials())) + connUrl := fmt.Sprintf("%s:%d", serverHost, xbscserver.DefaultPort) + conn, err := grpc.NewClient(connUrl, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { - log.Fatal("Failed to connect to server: %w", err) + log.Fatalf("Failed to connect to server: %v", err) } defer conn.Close() + log.Printf("Created connection to server at %s", connUrl) + client := xbscapi.NewXtrabackupServiceClient(conn) stream, err := client.CreateBackup(context.Background(), req) From 37b7e6a660bc8b4558642a745a8216805c1c7eb8 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 26 Nov 2025 15:00:31 +0530 Subject: [PATCH 17/77] fix job Signed-off-by: Mayank Shah --- pkg/pxc/backup/job.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index fc326e2336..89b9deaf41 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -89,10 +89,16 @@ func (bcp *Backup) JobSpecXtrabackup( MatchLabels: job.Labels, }, Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: job.Labels, + Annotations: storage.Annotations, + }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ container, }, + + RestartPolicy: corev1.RestartPolicyNever, Volumes: volumes, InitContainers: initContainers, SecurityContext: storage.PodSecurityContext, From 1fa919913082f7bd2aba86b4be3e65a13a41ed52 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 26 Nov 2025 15:37:34 +0530 Subject: [PATCH 18/77] fix pod dns Signed-off-by: Mayank Shah --- pkg/k8s/cluster.go | 25 ++++++++++++++++++++++++- pkg/pxc/backup/job.go | 17 ++++++++++++----- 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/pkg/k8s/cluster.go b/pkg/k8s/cluster.go index 5edef7883d..8555967340 100644 --- a/pkg/k8s/cluster.go +++ b/pkg/k8s/cluster.go @@ -147,9 +147,32 @@ func PauseClusterWithWait(ctx context.Context, cl client.Client, cr *api.Percona return nil } +func GetPrimaryPodDNSName(ctx context.Context, cl client.Client, cr *api.PerconaXtraDBCluster) (string, error) { + primary, err := GetPrimaryPod(ctx, cl, cr) + if err != nil { + return "", errors.Wrap(err, "get primary pod") + } + pxcSet := statefulset.NewNode(cr) + podList := corev1.PodList{} + if err := cl.List(ctx, &podList, &client.ListOptions{ + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(pxcSet.Labels()), + }); err != nil { + return "", errors.Wrap(err, "get pod list") + } + pxcSts := pxcSet.StatefulSet() + for _, pod := range podList.Items { + if pod.Status.PodIP == primary || pod.Name == primary { + primary = fmt.Sprintf("%s.%s.%s", pod.Name, pxcSts.GetName(), pxcSts.GetNamespace()) + break + } + } + return primary, nil +} + var NoProxyDetectedError = errors.New("can't detect enabled proxy, please enable HAProxy or ProxySQL") -// GetPrimaryPod returns the IP/host of the primary pod for the given cluster +// GetPrimaryPod returns the primary pod func GetPrimaryPod( ctx context.Context, cl client.Client, diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index 89b9deaf41..7645a82f8b 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -68,7 +68,7 @@ func (bcp *Backup) JobSpecXtrabackup( var initContainers []corev1.Container initContainers = append(initContainers, statefulset.BackupInitContainer(cluster, initImage, storage.ContainerSecurityContext)) - envs, err := bcp.xtrabackupJobEnvVars(cluster) + envs, err := bcp.xtrabackupJobEnvVars(cluster, storage) if err != nil { return batchv1.JobSpec{}, fmt.Errorf("failed to get xtrabackup job env vars: %w", err) } @@ -83,8 +83,11 @@ func (bcp *Backup) JobSpecXtrabackup( VolumeMounts: volumeMounts, Env: envs, } + + manualSelector := true return batchv1.JobSpec{ ActiveDeadlineSeconds: spec.ActiveDeadlineSeconds, + ManualSelector: &manualSelector, Selector: &metav1.LabelSelector{ MatchLabels: job.Labels, }, @@ -116,15 +119,19 @@ func (bcp *Backup) JobSpecXtrabackup( }, nil } -func (bcp *Backup) xtrabackupJobEnvVars(cluster *api.PerconaXtraDBCluster) ([]corev1.EnvVar, error) { - primary, err := k8s.GetPrimaryPod(context.Background(), bcp.k8sClient, cluster) +func (bcp *Backup) xtrabackupJobEnvVars(cluster *api.PerconaXtraDBCluster, storage *api.BackupStorageSpec) ([]corev1.EnvVar, error) { + host, err := k8s.GetPrimaryPodDNSName(context.Background(), bcp.k8sClient, cluster) if err != nil { - return nil, errors.Wrap(err, "failed to get primary pod") + return nil, errors.Wrap(err, "failed to get primary pod host") } envs := []corev1.EnvVar{ { Name: "HOST", - Value: primary, + Value: host, + }, + { + Name: "STORAGE_TYPE", + Value: string(storage.Type), }, } return envs, nil From 4a8863bbdb22563d1c279c14f1dfbab88df58454 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 26 Nov 2025 17:07:36 +0530 Subject: [PATCH 19/77] implement CreateBackup handler Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 21 ++ cmd/xtrabackup/server/main.go | 7 +- pkg/apis/pxc/v1/pxc_backup_types.go | 5 + pkg/controller/pxcbackup/controller.go | 10 +- pkg/pxc/app/app.go | 1 + pkg/pxc/app/statefulset/node.go | 10 + pkg/pxc/backup/job.go | 5 + pkg/pxc/backup/storage/options.go | 25 +++ pkg/pxc/statefulset.go | 10 + pkg/xtrabackup/server/app.go | 28 ++- pkg/xtrabackup/server/create.go | 294 +++++++++++++++++++++++++ pkg/xtrabackup/server/status.go | 40 ++++ pkg/xtrabackup/xtrabackup.go | 75 +++++++ 13 files changed, 519 insertions(+), 12 deletions(-) create mode 100644 pkg/xtrabackup/server/create.go create mode 100644 pkg/xtrabackup/server/status.go create mode 100644 pkg/xtrabackup/xtrabackup.go diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index e70ac9b77f..dba7feb3c0 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -2,6 +2,7 @@ package main import ( "context" + "encoding/json" "fmt" "io" "log" @@ -46,6 +47,8 @@ func main() { } log.Fatal("Failed to create backup: %w", err) } + + log.Println("Backup requested") for { _, err := stream.Recv() if err == io.EOF { @@ -63,7 +66,19 @@ func getRequestObject() *xbscapi.CreateBackupRequest { BackupConfig: &api.BackupConfig{}, } + containerOptions := &xbscapi.ContainerOptions{} + if opts := os.Getenv("CONTAINER_OPTIONS"); opts != "" { + err := json.Unmarshal([]byte(opts), containerOptions) + if err != nil { + log.Fatalf("Failed to unmarshal container options: %v", err) + } + } + req.BackupName = os.Getenv("BACKUP_NAME") + req.BackupConfig.Destination = os.Getenv("BACKUP_DEST") + req.BackupConfig.VerifyTls = os.Getenv("VERIFY_TLS") == "true" + req.BackupConfig.ContainerOptions = containerOptions + storageType := os.Getenv("STORAGE_TYPE") switch storageType { case "s3": @@ -75,6 +90,12 @@ func getRequestObject() *xbscapi.CreateBackupRequest { default: log.Fatalf("Invalid storage type: %s", storageType) } + + reqJson, err := json.Marshal(req) + if err != nil { + log.Fatalf("Failed to marshal request: %v", err) + } + log.Printf("Request=", string(reqJson)) return req } diff --git a/cmd/xtrabackup/server/main.go b/cmd/xtrabackup/server/main.go index 6b8335a505..f750dbd0fc 100644 --- a/cmd/xtrabackup/server/main.go +++ b/cmd/xtrabackup/server/main.go @@ -19,7 +19,12 @@ func main() { var serverOptions []grpc.ServerOption grpcServ := grpc.NewServer(serverOptions...) - xbsidecarapi.RegisterXtrabackupServiceServer(grpcServ, xbsidecarserver.New()) + + app, err := xbsidecarserver.New() + if err != nil { + log.Fatalf("failed to create server: %v", err) + } + xbsidecarapi.RegisterXtrabackupServiceServer(grpcServ, app) log.Printf("server listening at %v", lis.Addr()) if err := grpcServ.Serve(lis); err != nil { diff --git a/pkg/apis/pxc/v1/pxc_backup_types.go b/pkg/apis/pxc/v1/pxc_backup_types.go index 67b25917c1..936a05131a 100644 --- a/pkg/apis/pxc/v1/pxc_backup_types.go +++ b/pkg/apis/pxc/v1/pxc_backup_types.go @@ -111,6 +111,11 @@ func (dest *PXCBackupDestination) StorageTypePrefix() string { return "" } +func (dest *PXCBackupDestination) PathWithoutBucket() string { + _, prefix := dest.BucketAndPrefix() + return path.Join(prefix, dest.BackupName()) +} + func (dest *PXCBackupDestination) BucketAndPrefix() (string, string) { d := strings.TrimPrefix(dest.String(), dest.StorageTypePrefix()) bucket, left, _ := strings.Cut(d, "/") diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 6355e39b95..663389d2b2 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -292,7 +292,8 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( return nil, errors.Wrap(err, "failed to get initImage") } - if features.Enabled(ctx, features.BackupXtrabackup) { + xtrabackupEnabled := features.Enabled(ctx, features.BackupXtrabackup) + if xtrabackupEnabled { job.Spec, err = bcp.JobSpecXtrabackup(cr.Spec, cluster, job, initImage) if err != nil { return nil, errors.Wrap(err, "can't create job spec for xtrabackup") @@ -354,6 +355,13 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( } } + if xtrabackupEnabled { + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "BACKUP_DEST", + Value: cr.Status.Destination.PathWithoutBucket(), + }) + } + // Set PerconaXtraDBClusterBackup instance as the owner and controller if err := k8s.SetControllerReference(cr, job, r.scheme); err != nil { return nil, errors.Wrap(err, "job/setControllerReference") diff --git a/pkg/pxc/app/app.go b/pkg/pxc/app/app.go index 7c7e03e540..c59ba82b1a 100644 --- a/pkg/pxc/app/app.go +++ b/pkg/pxc/app/app.go @@ -8,4 +8,5 @@ const ( const ( BinVolumeMountPath = "/opt/percona" + BackupLogDir = "/var/log/xtrabackup" ) diff --git a/pkg/pxc/app/statefulset/node.go b/pkg/pxc/app/statefulset/node.go index f2f163f642..3257e1b9d6 100644 --- a/pkg/pxc/app/statefulset/node.go +++ b/pkg/pxc/app/statefulset/node.go @@ -392,6 +392,12 @@ func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBClu }, }, }, + { + Name: "XTRABACKUP_USER_PASS", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: app.SecretKeySelector(cr.Spec.SecretsName, users.Xtrabackup), + }, + }, }, Command: []string{"/var/lib/mysql/xtrabackup-server-sidecar"}, Ports: []corev1.ContainerPort{ @@ -405,6 +411,10 @@ func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBClu Name: app.DataVolumeName, MountPath: "/var/lib/mysql", }, + { + Name: "backup-logs", + MountPath: app.BackupLogDir, + }, }, // TODO: make this configurable from CR Resources: corev1.ResourceRequirements{ diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index 7645a82f8b..ac8aaeff01 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -10,6 +10,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" "github.com/percona/percona-xtradb-cluster-operator/pkg/features" @@ -133,6 +134,10 @@ func (bcp *Backup) xtrabackupJobEnvVars(cluster *api.PerconaXtraDBCluster, stora Name: "STORAGE_TYPE", Value: string(storage.Type), }, + { + Name: "VERIFY_TLS", + Value: fmt.Sprintf("%t", ptr.Deref(storage.VerifyTLS, true)), + }, } return envs, nil } diff --git a/pkg/pxc/backup/storage/options.go b/pkg/pxc/backup/storage/options.go index 9f3c24da13..d1c3706af8 100644 --- a/pkg/pxc/backup/storage/options.go +++ b/pkg/pxc/backup/storage/options.go @@ -4,6 +4,7 @@ import ( "context" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + xbscapi "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -41,6 +42,30 @@ func GetOptionsFromBackup(ctx context.Context, cl client.Client, cluster *api.Pe } } +func GetOptionsFromBackupConfig(cfg *xbscapi.BackupConfig) (Options, error) { + switch cfg.Type { + case xbscapi.BackupStorageType_S3: + return &S3Options{ + Endpoint: cfg.S3.EndpointUrl, + AccessKeyID: cfg.S3.AccessKey, + SecretAccessKey: cfg.S3.SecretKey, + BucketName: cfg.S3.Bucket, + Region: cfg.S3.Region, + VerifyTLS: cfg.VerifyTls, + }, nil + case xbscapi.BackupStorageType_AZURE: + return &AzureOptions{ + StorageAccount: cfg.Azure.StorageAccount, + AccessKey: cfg.Azure.AccessKey, + Endpoint: cfg.Azure.EndpointUrl, + Container: cfg.Azure.ContainerName, + }, nil + default: + return nil, errors.Errorf("unknown storage type %s", cfg.Type) + } + +} + func getAzureOptions( ctx context.Context, cl client.Client, diff --git a/pkg/pxc/statefulset.go b/pkg/pxc/statefulset.go index 87e4e38b00..dc86ef3ef5 100644 --- a/pkg/pxc/statefulset.go +++ b/pkg/pxc/statefulset.go @@ -13,6 +13,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" ) @@ -59,6 +60,15 @@ func StatefulSet( pod.Volumes = sfsVolume.Volumes } + if features.Enabled(ctx, features.BackupXtrabackup) { + pod.Volumes = append(pod.Volumes, corev1.Volume{ + Name: "backup-logs", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) + } + appC, err := sfs.AppContainer(podSpec, secrets, cr, pod.Volumes) if err != nil { return nil, errors.Wrap(err, "app container") diff --git a/pkg/xtrabackup/server/app.go b/pkg/xtrabackup/server/app.go index ef0c7570e9..390fa3764c 100644 --- a/pkg/xtrabackup/server/app.go +++ b/pkg/xtrabackup/server/app.go @@ -2,8 +2,9 @@ package server import ( "context" - "time" + "os" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -14,26 +15,33 @@ const DefaultPort = 6450 type appServer struct { api.UnimplementedXtrabackupServiceServer + + backupStatus backupStatus + namespace string + newStorageFunc storage.NewClientFunc + deleteBackupFunc func(ctx context.Context, cfg *api.BackupConfig, backupName string) error } var _ api.XtrabackupServiceServer = (*appServer)(nil) // New returns a new app server. -func New() api.XtrabackupServiceServer { - return &appServer{} +func New() (api.XtrabackupServiceServer, error) { + namespace, ok := os.LookupEnv("POD_NAMESPACE") + if !ok || namespace == "" { + return nil, status.Errorf(codes.InvalidArgument, "POD_NAMESPACE environment variable is not set") + } + return &appServer{ + namespace: namespace, + backupStatus: backupStatus{}, + newStorageFunc: storage.NewClient, + deleteBackupFunc: deleteBackup, + }, nil } func (s *appServer) GetCurrentBackupConfig(ctx context.Context, req *api.GetCurrentBackupConfigRequest) (*api.BackupConfig, error) { return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") } -func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.XtrabackupService_CreateBackupServer) error { - // do some work, then send message - time.Sleep(120 * time.Second) - stream.Send(&api.CreateBackupResponse{}) - return nil -} - func (s *appServer) DeleteBackup(ctx context.Context, req *api.DeleteBackupRequest) (*api.DeleteBackupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") } diff --git a/pkg/xtrabackup/server/create.go b/pkg/xtrabackup/server/create.go new file mode 100644 index 0000000000..3a02738caf --- /dev/null +++ b/pkg/xtrabackup/server/create.go @@ -0,0 +1,294 @@ +package server + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" + xb "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup" + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.XtrabackupService_CreateBackupServer) error { + log := logf.Log.WithName("xtrabackup-server").WithName("CreateBackup") + + if !s.backupStatus.tryRunBackup() { + log.Info("backup is already running") + return status.Errorf(codes.FailedPrecondition, "backup is already running") + } + defer s.backupStatus.doneBackup() + + log = log.WithValues("namespace", s.namespace, "name", req.BackupName) + + s.backupStatus.setBackupConfig(*req.BackupConfig) + defer s.backupStatus.removeBackupConfig() + + ctx := stream.Context() + + log.Info("Checking if backup exists") + exists, err := s.backupExists(ctx, req.BackupConfig) + if err != nil { + return errors.Wrap(err, "check if backup exists") + } + if exists { + log.Info("Backup already exists, deleting") + if err := s.deleteBackupFunc(ctx, req.BackupConfig, req.BackupName); err != nil { + log.Error(err, "failed to delete backup") + return errors.Wrap(err, "delete backup") + } + } + + backupUser := users.Xtrabackup + backupPass, err := getUserPassword() + if err != nil { + log.Error(err, "failed to get backup user password") + return errors.Wrap(err, "get backup user password") + } + + g, gCtx := errgroup.WithContext(ctx) + + xtrabackup := exec.CommandContext(gCtx, "xtrabackup", xtrabackupArgs(string(backupUser), backupPass, req.BackupConfig)...) + xtrabackup.Env = envs(req.BackupConfig) + + xbOut, err := xtrabackup.StdoutPipe() + if err != nil { + log.Error(err, "xtrabackup stdout pipe failed") + return errors.Wrap(err, "xtrabackup stdout pipe failed") + } + defer xbOut.Close() //nolint:errcheck + + xbErr, err := xtrabackup.StderrPipe() + if err != nil { + log.Error(err, "xtrabackup stderr pipe failed") + return errors.Wrap(err, "xtrabackup stderr pipe failed") + } + defer xbErr.Close() //nolint:errcheck + + backupLog, err := os.Create(filepath.Join(app.BackupLogDir, req.BackupName+".log")) + if err != nil { + log.Error(err, "failed to create log file") + return errors.Wrap(err, "failed to create log file") + } + defer backupLog.Close() //nolint:errcheck + logWriter := io.MultiWriter(backupLog, os.Stderr) + + xbcloud := exec.CommandContext(gCtx, "xbcloud", xb.XBCloudArgs(xb.XBCloudActionPut, req.BackupConfig)...) + xbcloud.Env = envs(req.BackupConfig) + xbcloud.Stdin = xbOut + + xbcloudErr, err := xbcloud.StderrPipe() + if err != nil { + log.Error(err, "xbcloud stderr pipe failed") + return errors.Wrap(err, "xbcloud stderr pipe failed") + } + defer xbcloudErr.Close() //nolint:errcheck + + log.Info( + "Backup starting", + "destination", req.BackupConfig.Destination, + "storage", req.BackupConfig.Type, + "xtrabackupCmd", sanitizeCmd(xtrabackup), + "xbcloudCmd", sanitizeCmd(xbcloud), + ) + + g.Go(func() error { + if err := xbcloud.Start(); err != nil { + log.Error(err, "failed to start xbcloud") + return err + } + + if _, err := io.Copy(logWriter, xbcloudErr); err != nil { + log.Error(err, "failed to copy xbcloud stderr") + return err + } + + if err := xbcloud.Wait(); err != nil { + log.Error(err, "failed waiting for xbcloud to finish") + return err + } + return nil + }) + + g.Go(func() error { + if err := xtrabackup.Start(); err != nil { + log.Error(err, "failed to start xtrabackup command") + return err + } + + if _, err := io.Copy(logWriter, xbErr); err != nil { + log.Error(err, "failed to copy xtrabackup stderr") + return err + } + + if err := xtrabackup.Wait(); err != nil { + log.Error(err, "failed to wait for xtrabackup to finish") + return err + } + return nil + }) + + if err := g.Wait(); err != nil { + log.Error(err, "backup failed") + return errors.Wrap(err, "backup failed") + } + if err := s.checkBackupMD5Size(ctx, req.BackupConfig); err != nil { + log.Error(err, "check backup md5 file size") + return errors.Wrap(err, "check backup md5 file size") + } + log.Info("Backup finished successfully", "destination", req.BackupConfig.Destination, "storage", req.BackupConfig.Type) + + return nil +} + +func (s *appServer) checkBackupMD5Size(ctx context.Context, cfg *api.BackupConfig) error { + // xbcloud doesn't create md5 file for azure + if cfg.Type == api.BackupStorageType_AZURE { + return nil + } + + opts, err := storage.GetOptionsFromBackupConfig(cfg) + if err != nil { + return errors.Wrap(err, "get options from backup config") + } + storageClient, err := s.newStorageFunc(ctx, opts) + if err != nil { + return errors.Wrap(err, "new storage") + } + r, err := storageClient.GetObject(ctx, cfg.Destination+".md5") + if err != nil { + return errors.Wrap(err, "get object") + } + defer r.Close() //nolint:errcheck + data, err := io.ReadAll(r) + if err != nil { + return errors.Wrap(err, "read all") + } + + // Q: what value we should use here? + // size of the `demand-backup` test md5 file is 4575 + minSize := 3000 + if len(data) < minSize { + return errors.Errorf("backup was finished unsuccessful: small md5 size: %d: expected to be >= %d", len(data), minSize) + } + return nil +} + +func getUserPassword() (string, error) { + password, ok := os.LookupEnv("XTRABACKUP_USER_PASS") + if !ok { + return "", errors.New("XTRABACKUP_USER_PASS environment variable is not set") + } + return password, nil +} + +func (s *appServer) backupExists(ctx context.Context, cfg *api.BackupConfig) (bool, error) { + opts, err := storage.GetOptionsFromBackupConfig(cfg) + if err != nil { + return false, errors.Wrap(err, "get options from backup config") + } + storage, err := s.newStorageFunc(ctx, opts) + if err != nil { + return false, errors.Wrap(err, "new storage") + } + objects, err := storage.ListObjects(ctx, cfg.Destination+"/") + if err != nil { + return false, errors.Wrap(err, "list objects") + } + if len(objects) == 0 { + return false, nil + } + return true, nil +} + +func deleteBackup(ctx context.Context, cfg *api.BackupConfig, backupName string) error { + log := logf.Log.WithName("deleteBackup") + + logWriter := io.Writer(os.Stderr) + if backupName != "" { + backupLog, err := os.OpenFile( + filepath.Join(app.BackupLogDir, backupName+".log"), + os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o666) + if err != nil { + return errors.Wrap(err, "failed to open log file") + } + defer backupLog.Close() //nolint:errcheck + logWriter = io.MultiWriter(backupLog, os.Stderr) + } + xbcloud := exec.CommandContext(ctx, "xbcloud", xb.XBCloudArgs(xb.XBCloudActionDelete, cfg)...) + xbcloud.Env = envs(cfg) + xbcloudErr, err := xbcloud.StderrPipe() + if err != nil { + return errors.Wrap(err, "xbcloud stderr pipe failed") + } + defer xbcloudErr.Close() //nolint:errcheck + log.Info( + "Deleting Backup", + "destination", cfg.Destination, + "storage", cfg.Type, + "xbcloudCmd", sanitizeCmd(xbcloud), + ) + if err := xbcloud.Start(); err != nil { + return errors.Wrap(err, "failed to start xbcloud") + } + + if _, err := io.Copy(logWriter, xbcloudErr); err != nil { + return errors.Wrap(err, "failed to copy xbcloud stderr") + } + + if err := xbcloud.Wait(); err != nil { + return errors.Wrap(err, "failed waiting for xbcloud to finish") + } + return nil + +} + +func envs(cfg *api.BackupConfig) []string { + envs := os.Environ() + if cfg.ContainerOptions != nil { + for _, env := range cfg.ContainerOptions.Env { + envs = append(envs, fmt.Sprintf("%s=%s", env.Key, env.Value)) + } + } + return envs +} + +func sanitizeCmd(cmd *exec.Cmd) string { + sensitiveFlags := regexp.MustCompile("--password=(.*)|--.*-access-key=(.*)|--.*secret-key=(.*)") + c := []string{cmd.Path} + + for _, arg := range cmd.Args[1:] { + c = append(c, sensitiveFlags.ReplaceAllString(arg, "")) + } + + return strings.Join(c, " ") +} + +func xtrabackupArgs(user, pass string, conf *api.BackupConfig) []string { + args := []string{ + "--backup", + "--stream=xbstream", + "--safe-slave-backup", + "--slave-info", + "--target-dir=/backup/", + fmt.Sprintf("--user=%s", user), + fmt.Sprintf("--password=%s", pass), + } + if conf != nil && conf.ContainerOptions != nil && conf.ContainerOptions.Args != nil { + args = append(args, conf.ContainerOptions.Args.Xtrabackup...) + } + return args +} diff --git a/pkg/xtrabackup/server/status.go b/pkg/xtrabackup/server/status.go new file mode 100644 index 0000000000..42129e3f65 --- /dev/null +++ b/pkg/xtrabackup/server/status.go @@ -0,0 +1,40 @@ +package server + +import ( + "sync" + "sync/atomic" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" +) + +type backupStatus struct { + isRunning atomic.Bool + currentBackupConf *api.BackupConfig + mu sync.Mutex +} + +func (s *backupStatus) tryRunBackup() bool { + return s.isRunning.CompareAndSwap(false, true) +} + +func (s *backupStatus) doneBackup() { + s.isRunning.Store(false) +} + +func (s *backupStatus) setBackupConfig(conf api.BackupConfig) { + s.mu.Lock() + s.currentBackupConf = &conf + s.mu.Unlock() +} + +func (s *backupStatus) removeBackupConfig() { + s.mu.Lock() + s.currentBackupConf = nil + s.mu.Unlock() +} + +func (s *backupStatus) getBackupConfig() *api.BackupConfig { + s.mu.Lock() + defer s.mu.Unlock() + return s.currentBackupConf +} diff --git a/pkg/xtrabackup/xtrabackup.go b/pkg/xtrabackup/xtrabackup.go new file mode 100644 index 0000000000..9386e02329 --- /dev/null +++ b/pkg/xtrabackup/xtrabackup.go @@ -0,0 +1,75 @@ +package xtrabackup + +import ( + "fmt" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" +) + +type XBCloudAction string + +const ( + XBCloudActionPut XBCloudAction = "put" + XBCloudActionDelete XBCloudAction = "delete" +) + +func XBCloudArgs(action XBCloudAction, conf *api.BackupConfig) []string { + args := []string{string(action), "--parallel=10", "--curl-retriable-errors=7"} + + if !conf.VerifyTls { + args = append(args, "--insecure") + } + + if conf.ContainerOptions != nil && conf.ContainerOptions.Args != nil { + args = append(args, conf.ContainerOptions.Args.Xbcloud...) + } + + switch conf.Type { + case api.BackupStorageType_GCS: + args = append( + args, + []string{ + "--md5", + "--storage=google", + fmt.Sprintf("--google-bucket=%s", conf.Gcs.Bucket), + fmt.Sprintf("--google-access-key=%s", conf.Gcs.AccessKey), + fmt.Sprintf("--google-secret-key=%s", conf.Gcs.SecretKey), + }..., + ) + if len(conf.Gcs.EndpointUrl) > 0 { + args = append(args, fmt.Sprintf("--google-endpoint=%s", conf.Gcs.EndpointUrl)) + } + case api.BackupStorageType_S3: + args = append( + args, + []string{ + "--md5", + "--storage=s3", + fmt.Sprintf("--s3-bucket=%s", conf.S3.Bucket), + fmt.Sprintf("--s3-region=%s", conf.S3.Region), + fmt.Sprintf("--s3-access-key=%s", conf.S3.AccessKey), + fmt.Sprintf("--s3-secret-key=%s", conf.S3.SecretKey), + }..., + ) + if len(conf.S3.EndpointUrl) > 0 { + args = append(args, fmt.Sprintf("--s3-endpoint=%s", conf.S3.EndpointUrl)) + } + case api.BackupStorageType_AZURE: + args = append( + args, + []string{ + "--storage=azure", + fmt.Sprintf("--azure-storage-account=%s", conf.Azure.StorageAccount), + fmt.Sprintf("--azure-container-name=%s", conf.Azure.ContainerName), + fmt.Sprintf("--azure-access-key=%s", conf.Azure.AccessKey), + }..., + ) + if len(conf.Azure.EndpointUrl) > 0 { + args = append(args, fmt.Sprintf("--azure-endpoint=%s", conf.Azure.EndpointUrl)) + } + } + + args = append(args, conf.Destination) + + return args +} From 0d3451246d188d9a135abb5c6a77f9761ae4bb18 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Thu, 27 Nov 2025 20:32:01 +0530 Subject: [PATCH 20/77] add socket path Signed-off-by: Mayank Shah --- pkg/pxc/app/statefulset/node.go | 4 ++++ pkg/xtrabackup/server/create.go | 1 + 2 files changed, 5 insertions(+) diff --git a/pkg/pxc/app/statefulset/node.go b/pkg/pxc/app/statefulset/node.go index 3257e1b9d6..0e1e2f6700 100644 --- a/pkg/pxc/app/statefulset/node.go +++ b/pkg/pxc/app/statefulset/node.go @@ -415,6 +415,10 @@ func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBClu Name: "backup-logs", MountPath: app.BackupLogDir, }, + { + Name: "tmp", + MountPath: "/tmp", + }, }, // TODO: make this configurable from CR Resources: corev1.ResourceRequirements{ diff --git a/pkg/xtrabackup/server/create.go b/pkg/xtrabackup/server/create.go index 3a02738caf..07900bbd23 100644 --- a/pkg/xtrabackup/server/create.go +++ b/pkg/xtrabackup/server/create.go @@ -284,6 +284,7 @@ func xtrabackupArgs(user, pass string, conf *api.BackupConfig) []string { "--safe-slave-backup", "--slave-info", "--target-dir=/backup/", + "--socket=/tmp/mysql.sock", fmt.Sprintf("--user=%s", user), fmt.Sprintf("--password=%s", pass), } From 7a9520139d9b65ebe931d98f9aaa5ae2af1557e5 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 10:20:34 +0530 Subject: [PATCH 21/77] refactoring Signed-off-by: Mayank Shah --- pkg/controller/pxcbackup/controller.go | 24 +++-- pkg/pxc/backup/backup.go | 2 - pkg/pxc/backup/job.go | 104 --------------------- pkg/xtrabackup/api/cmd.go | 124 +++++++++++++++++++++++++ pkg/xtrabackup/job.go | 115 +++++++++++++++++++++++ pkg/xtrabackup/server/create.go | 42 +-------- pkg/xtrabackup/xtrabackup.go | 75 --------------- 7 files changed, 258 insertions(+), 228 deletions(-) create mode 100644 pkg/xtrabackup/api/cmd.go create mode 100644 pkg/xtrabackup/job.go delete mode 100644 pkg/xtrabackup/xtrabackup.go diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 663389d2b2..ecb36612c6 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -2,6 +2,7 @@ package pxcbackup import ( "context" + "fmt" "os" "reflect" "strconv" @@ -34,6 +35,7 @@ import ( "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" "github.com/percona/percona-xtradb-cluster-operator/pkg/version" + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup" ) // Add creates a new PerconaXtraDBClusterBackup Controller and adds it to the Manager. The Manager will set fields on the Controller @@ -293,16 +295,20 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( } xtrabackupEnabled := features.Enabled(ctx, features.BackupXtrabackup) - if xtrabackupEnabled { - job.Spec, err = bcp.JobSpecXtrabackup(cr.Spec, cluster, job, initImage) - if err != nil { - return nil, errors.Wrap(err, "can't create job spec for xtrabackup") - } - } else { - job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, initImage) - if err != nil { - return nil, errors.Wrap(err, "can't create job spec") + getJobSpec := func() (batchv1.JobSpec, error) { + if xtrabackupEnabled { + srcNode, err := k8s.GetPrimaryPodDNSName(ctx, r.client, cluster) + if err != nil { + return batchv1.JobSpec{}, errors.Wrap(err, "failed to get primary pod dns name") + } + return xtrabackup.JobSpec(&cr.Spec, cluster, job, initImage, srcNode) } + return bcp.JobSpec(cr.Spec, cluster, job, initImage) + } + + job.Spec, err = getJobSpec() + if err != nil { + return nil, fmt.Errorf("failed to get job spec: %w (xtrabackup enabled: %t)", err, xtrabackupEnabled) } switch storage.Type { diff --git a/pkg/pxc/backup/backup.go b/pkg/pxc/backup/backup.go index b07527da05..a40ede95ba 100644 --- a/pkg/pxc/backup/backup.go +++ b/pkg/pxc/backup/backup.go @@ -14,7 +14,6 @@ type Backup struct { imagePullSecrets []corev1.LocalObjectReference imagePullPolicy corev1.PullPolicy serviceAccountName string - k8sClient client.Client } func New(cr *api.PerconaXtraDBCluster, cl client.Client) *Backup { @@ -25,6 +24,5 @@ func New(cr *api.PerconaXtraDBCluster, cl client.Client) *Backup { imagePullSecrets: cr.Spec.Backup.ImagePullSecrets, imagePullPolicy: cr.Spec.Backup.ImagePullPolicy, serviceAccountName: cr.Spec.Backup.ServiceAccountName, - k8sClient: cl, } } diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index ac8aaeff01..78fe7764ba 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -2,7 +2,6 @@ package backup import ( "context" - "fmt" "path" "strconv" @@ -10,11 +9,9 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" "github.com/percona/percona-xtradb-cluster-operator/pkg/features" - "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" @@ -41,107 +38,6 @@ func (*Backup) Job(cr *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraD } } -func (bcp *Backup) JobSpecXtrabackup( - spec api.PXCBackupSpec, - cluster *api.PerconaXtraDBCluster, - job *batchv1.Job, - initImage string, -) (batchv1.JobSpec, error) { - var volumeMounts []corev1.VolumeMount - var volumes []corev1.Volume - volumes = append(volumes, - corev1.Volume{ - Name: app.BinVolumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - ) - - volumeMounts = append(volumeMounts, - corev1.VolumeMount{ - Name: app.BinVolumeName, - MountPath: app.BinVolumeMountPath, - }, - ) - - storage := cluster.Spec.Backup.Storages[spec.StorageName] - var initContainers []corev1.Container - initContainers = append(initContainers, statefulset.BackupInitContainer(cluster, initImage, storage.ContainerSecurityContext)) - - envs, err := bcp.xtrabackupJobEnvVars(cluster, storage) - if err != nil { - return batchv1.JobSpec{}, fmt.Errorf("failed to get xtrabackup job env vars: %w", err) - } - - container := corev1.Container{ - Name: "xtrabackup", - Image: bcp.image, - SecurityContext: storage.ContainerSecurityContext, - ImagePullPolicy: bcp.imagePullPolicy, - Command: []string{"/opt/percona/xtrabackup-run-backup"}, - Resources: storage.Resources, - VolumeMounts: volumeMounts, - Env: envs, - } - - manualSelector := true - return batchv1.JobSpec{ - ActiveDeadlineSeconds: spec.ActiveDeadlineSeconds, - ManualSelector: &manualSelector, - Selector: &metav1.LabelSelector{ - MatchLabels: job.Labels, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: job.Labels, - Annotations: storage.Annotations, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - container, - }, - - RestartPolicy: corev1.RestartPolicyNever, - Volumes: volumes, - InitContainers: initContainers, - SecurityContext: storage.PodSecurityContext, - ImagePullSecrets: bcp.imagePullSecrets, - ServiceAccountName: cluster.Spec.Backup.ServiceAccountName, - Affinity: storage.Affinity, - TopologySpreadConstraints: pxc.PodTopologySpreadConstraints(storage.TopologySpreadConstraints, job.Labels), - Tolerations: storage.Tolerations, - NodeSelector: storage.NodeSelector, - SchedulerName: storage.SchedulerName, - PriorityClassName: storage.PriorityClassName, - RuntimeClassName: storage.RuntimeClassName, - }, - }, - }, nil -} - -func (bcp *Backup) xtrabackupJobEnvVars(cluster *api.PerconaXtraDBCluster, storage *api.BackupStorageSpec) ([]corev1.EnvVar, error) { - host, err := k8s.GetPrimaryPodDNSName(context.Background(), bcp.k8sClient, cluster) - if err != nil { - return nil, errors.Wrap(err, "failed to get primary pod host") - } - envs := []corev1.EnvVar{ - { - Name: "HOST", - Value: host, - }, - { - Name: "STORAGE_TYPE", - Value: string(storage.Type), - }, - { - Name: "VERIFY_TLS", - Value: fmt.Sprintf("%t", ptr.Deref(storage.VerifyTLS, true)), - }, - } - return envs, nil -} - func (bcp *Backup) JobSpec(spec api.PXCBackupSpec, cluster *api.PerconaXtraDBCluster, job *batchv1.Job, initImage string) (batchv1.JobSpec, error) { manualSelector := true backoffLimit := int32(10) diff --git a/pkg/xtrabackup/api/cmd.go b/pkg/xtrabackup/api/cmd.go new file mode 100644 index 0000000000..9bf836f4bd --- /dev/null +++ b/pkg/xtrabackup/api/cmd.go @@ -0,0 +1,124 @@ +package api + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" +) + +const ( + xtrabackupCmd = "xtrabackup" + xbcloudCmd = "xbcloud" +) + +type XBCloudAction string + +const ( + XBCloudActionPut XBCloudAction = "put" + XBCloudActionDelete XBCloudAction = "delete" +) + +// NewXtrabackupCmd creates a new xtrabackup command +func (cfg *BackupConfig) NewXtrabackupCmd(ctx context.Context, user, password string) *exec.Cmd { + cmd := exec.CommandContext(ctx, xtrabackupCmd, cfg.xtrabackupArgs(user, password)...) + cmd.Env = cfg.envs() + return cmd +} + +// NewXbcloudCmd creates a new xbcloud command +func (cfg *BackupConfig) NewXbcloudCmd(ctx context.Context, action XBCloudAction, in io.Reader) *exec.Cmd { + cmd := exec.CommandContext(ctx, xbcloudCmd, cfg.xbcloudArgs(action)...) + cmd.Env = cfg.envs() + cmd.Stdin = in + return cmd +} + +func (cfg *BackupConfig) xbcloudArgs(action XBCloudAction) []string { + args := []string{string(action), "--parallel=10", "--curl-retriable-errors=7"} + + if !cfg.VerifyTls { + args = append(args, "--insecure") + } + + if cfg.ContainerOptions != nil && cfg.ContainerOptions.Args != nil { + args = append(args, cfg.ContainerOptions.Args.Xbcloud...) + } + + switch cfg.Type { + case BackupStorageType_GCS: + args = append( + args, + []string{ + "--md5", + "--storage=google", + fmt.Sprintf("--google-bucket=%s", cfg.Gcs.Bucket), + fmt.Sprintf("--google-access-key=%s", cfg.Gcs.AccessKey), + fmt.Sprintf("--google-secret-key=%s", cfg.Gcs.SecretKey), + }..., + ) + if len(cfg.Gcs.EndpointUrl) > 0 { + args = append(args, fmt.Sprintf("--google-endpoint=%s", cfg.Gcs.EndpointUrl)) + } + case BackupStorageType_S3: + args = append( + args, + []string{ + "--md5", + "--storage=s3", + fmt.Sprintf("--s3-bucket=%s", cfg.S3.Bucket), + fmt.Sprintf("--s3-region=%s", cfg.S3.Region), + fmt.Sprintf("--s3-access-key=%s", cfg.S3.AccessKey), + fmt.Sprintf("--s3-secret-key=%s", cfg.S3.SecretKey), + }..., + ) + if len(cfg.S3.EndpointUrl) > 0 { + args = append(args, fmt.Sprintf("--s3-endpoint=%s", cfg.S3.EndpointUrl)) + } + case BackupStorageType_AZURE: + args = append( + args, + []string{ + "--storage=azure", + fmt.Sprintf("--azure-storage-account=%s", cfg.Azure.StorageAccount), + fmt.Sprintf("--azure-container-name=%s", cfg.Azure.ContainerName), + fmt.Sprintf("--azure-access-key=%s", cfg.Azure.AccessKey), + }..., + ) + if len(cfg.Azure.EndpointUrl) > 0 { + args = append(args, fmt.Sprintf("--azure-endpoint=%s", cfg.Azure.EndpointUrl)) + } + } + + args = append(args, cfg.Destination) + + return args +} + +func (cfg *BackupConfig) xtrabackupArgs(user, pass string) []string { + args := []string{ + "--backup", + "--stream=xbstream", + "--safe-slave-backup", + "--slave-info", + "--target-dir=/backup/", + "--socket=/tmp/mysql.sock", + fmt.Sprintf("--user=%s", user), + fmt.Sprintf("--password=%s", pass), + } + if cfg != nil && cfg.ContainerOptions != nil && cfg.ContainerOptions.Args != nil { + args = append(args, cfg.ContainerOptions.Args.Xtrabackup...) + } + return args +} + +func (cfg *BackupConfig) envs() []string { + envs := os.Environ() + if cfg.ContainerOptions != nil { + for _, env := range cfg.ContainerOptions.Env { + envs = append(envs, fmt.Sprintf("%s=%s", env.Key, env.Value)) + } + } + return envs +} diff --git a/pkg/xtrabackup/job.go b/pkg/xtrabackup/job.go new file mode 100644 index 0000000000..0be1bb35e6 --- /dev/null +++ b/pkg/xtrabackup/job.go @@ -0,0 +1,115 @@ +package xtrabackup + +import ( + "fmt" + + pxcv1 "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +func JobSpec( + spec *pxcv1.PXCBackupSpec, + cluster *pxcv1.PerconaXtraDBCluster, + job *batchv1.Job, + initImage string, + primaryPodHost string, +) (batchv1.JobSpec, error) { + var volumeMounts []corev1.VolumeMount + var volumes []corev1.Volume + volumes = append(volumes, + corev1.Volume{ + Name: app.BinVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + ) + + volumeMounts = append(volumeMounts, + corev1.VolumeMount{ + Name: app.BinVolumeName, + MountPath: app.BinVolumeMountPath, + }, + ) + + storage := cluster.Spec.Backup.Storages[spec.StorageName] + var initContainers []corev1.Container + initContainers = append(initContainers, statefulset.BackupInitContainer(cluster, initImage, storage.ContainerSecurityContext)) + + envs, err := xtrabackupJobEnvVars(storage, primaryPodHost) + if err != nil { + return batchv1.JobSpec{}, fmt.Errorf("failed to get xtrabackup job env vars: %w", err) + } + + container := corev1.Container{ + Name: "xtrabackup", + Image: cluster.Spec.Backup.Image, + SecurityContext: storage.ContainerSecurityContext, + ImagePullPolicy: cluster.Spec.Backup.ImagePullPolicy, + Command: []string{"/opt/percona/xtrabackup-run-backup"}, + Resources: storage.Resources, + VolumeMounts: volumeMounts, + Env: envs, + } + + manualSelector := true + return batchv1.JobSpec{ + ActiveDeadlineSeconds: spec.ActiveDeadlineSeconds, + ManualSelector: &manualSelector, + Selector: &metav1.LabelSelector{ + MatchLabels: job.Labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: job.Labels, + Annotations: storage.Annotations, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + container, + }, + + RestartPolicy: corev1.RestartPolicyNever, + Volumes: volumes, + InitContainers: initContainers, + SecurityContext: storage.PodSecurityContext, + ImagePullSecrets: cluster.Spec.Backup.ImagePullSecrets, + ServiceAccountName: cluster.Spec.Backup.ServiceAccountName, + Affinity: storage.Affinity, + TopologySpreadConstraints: pxc.PodTopologySpreadConstraints(storage.TopologySpreadConstraints, job.Labels), + Tolerations: storage.Tolerations, + NodeSelector: storage.NodeSelector, + SchedulerName: storage.SchedulerName, + PriorityClassName: storage.PriorityClassName, + RuntimeClassName: storage.RuntimeClassName, + }, + }, + }, nil +} + +func xtrabackupJobEnvVars( + storage *pxcv1.BackupStorageSpec, + primaryPodHost string, +) ([]corev1.EnvVar, error) { + envs := []corev1.EnvVar{ + { + Name: "HOST", + Value: primaryPodHost, + }, + { + Name: "STORAGE_TYPE", + Value: string(storage.Type), + }, + { + Name: "VERIFY_TLS", + Value: fmt.Sprintf("%t", ptr.Deref(storage.VerifyTLS, true)), + }, + } + return envs, nil +} diff --git a/pkg/xtrabackup/server/create.go b/pkg/xtrabackup/server/create.go index 07900bbd23..8da067ff5b 100644 --- a/pkg/xtrabackup/server/create.go +++ b/pkg/xtrabackup/server/create.go @@ -2,7 +2,6 @@ package server import ( "context" - "fmt" "io" "os" "os/exec" @@ -13,7 +12,6 @@ import ( "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" - xb "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -61,9 +59,7 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba g, gCtx := errgroup.WithContext(ctx) - xtrabackup := exec.CommandContext(gCtx, "xtrabackup", xtrabackupArgs(string(backupUser), backupPass, req.BackupConfig)...) - xtrabackup.Env = envs(req.BackupConfig) - + xtrabackup := req.BackupConfig.NewXtrabackupCmd(gCtx, backupUser, backupPass) xbOut, err := xtrabackup.StdoutPipe() if err != nil { log.Error(err, "xtrabackup stdout pipe failed") @@ -86,10 +82,7 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba defer backupLog.Close() //nolint:errcheck logWriter := io.MultiWriter(backupLog, os.Stderr) - xbcloud := exec.CommandContext(gCtx, "xbcloud", xb.XBCloudArgs(xb.XBCloudActionPut, req.BackupConfig)...) - xbcloud.Env = envs(req.BackupConfig) - xbcloud.Stdin = xbOut - + xbcloud := req.BackupConfig.NewXbcloudCmd(gCtx, api.XBCloudActionPut, xbOut) xbcloudErr, err := xbcloud.StderrPipe() if err != nil { log.Error(err, "xbcloud stderr pipe failed") @@ -228,8 +221,8 @@ func deleteBackup(ctx context.Context, cfg *api.BackupConfig, backupName string) defer backupLog.Close() //nolint:errcheck logWriter = io.MultiWriter(backupLog, os.Stderr) } - xbcloud := exec.CommandContext(ctx, "xbcloud", xb.XBCloudArgs(xb.XBCloudActionDelete, cfg)...) - xbcloud.Env = envs(cfg) + + xbcloud := cfg.NewXbcloudCmd(ctx, api.XBCloudActionDelete, nil) xbcloudErr, err := xbcloud.StderrPipe() if err != nil { return errors.Wrap(err, "xbcloud stderr pipe failed") @@ -256,16 +249,6 @@ func deleteBackup(ctx context.Context, cfg *api.BackupConfig, backupName string) } -func envs(cfg *api.BackupConfig) []string { - envs := os.Environ() - if cfg.ContainerOptions != nil { - for _, env := range cfg.ContainerOptions.Env { - envs = append(envs, fmt.Sprintf("%s=%s", env.Key, env.Value)) - } - } - return envs -} - func sanitizeCmd(cmd *exec.Cmd) string { sensitiveFlags := regexp.MustCompile("--password=(.*)|--.*-access-key=(.*)|--.*secret-key=(.*)") c := []string{cmd.Path} @@ -276,20 +259,3 @@ func sanitizeCmd(cmd *exec.Cmd) string { return strings.Join(c, " ") } - -func xtrabackupArgs(user, pass string, conf *api.BackupConfig) []string { - args := []string{ - "--backup", - "--stream=xbstream", - "--safe-slave-backup", - "--slave-info", - "--target-dir=/backup/", - "--socket=/tmp/mysql.sock", - fmt.Sprintf("--user=%s", user), - fmt.Sprintf("--password=%s", pass), - } - if conf != nil && conf.ContainerOptions != nil && conf.ContainerOptions.Args != nil { - args = append(args, conf.ContainerOptions.Args.Xtrabackup...) - } - return args -} diff --git a/pkg/xtrabackup/xtrabackup.go b/pkg/xtrabackup/xtrabackup.go deleted file mode 100644 index 9386e02329..0000000000 --- a/pkg/xtrabackup/xtrabackup.go +++ /dev/null @@ -1,75 +0,0 @@ -package xtrabackup - -import ( - "fmt" - - "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" -) - -type XBCloudAction string - -const ( - XBCloudActionPut XBCloudAction = "put" - XBCloudActionDelete XBCloudAction = "delete" -) - -func XBCloudArgs(action XBCloudAction, conf *api.BackupConfig) []string { - args := []string{string(action), "--parallel=10", "--curl-retriable-errors=7"} - - if !conf.VerifyTls { - args = append(args, "--insecure") - } - - if conf.ContainerOptions != nil && conf.ContainerOptions.Args != nil { - args = append(args, conf.ContainerOptions.Args.Xbcloud...) - } - - switch conf.Type { - case api.BackupStorageType_GCS: - args = append( - args, - []string{ - "--md5", - "--storage=google", - fmt.Sprintf("--google-bucket=%s", conf.Gcs.Bucket), - fmt.Sprintf("--google-access-key=%s", conf.Gcs.AccessKey), - fmt.Sprintf("--google-secret-key=%s", conf.Gcs.SecretKey), - }..., - ) - if len(conf.Gcs.EndpointUrl) > 0 { - args = append(args, fmt.Sprintf("--google-endpoint=%s", conf.Gcs.EndpointUrl)) - } - case api.BackupStorageType_S3: - args = append( - args, - []string{ - "--md5", - "--storage=s3", - fmt.Sprintf("--s3-bucket=%s", conf.S3.Bucket), - fmt.Sprintf("--s3-region=%s", conf.S3.Region), - fmt.Sprintf("--s3-access-key=%s", conf.S3.AccessKey), - fmt.Sprintf("--s3-secret-key=%s", conf.S3.SecretKey), - }..., - ) - if len(conf.S3.EndpointUrl) > 0 { - args = append(args, fmt.Sprintf("--s3-endpoint=%s", conf.S3.EndpointUrl)) - } - case api.BackupStorageType_AZURE: - args = append( - args, - []string{ - "--storage=azure", - fmt.Sprintf("--azure-storage-account=%s", conf.Azure.StorageAccount), - fmt.Sprintf("--azure-container-name=%s", conf.Azure.ContainerName), - fmt.Sprintf("--azure-access-key=%s", conf.Azure.AccessKey), - }..., - ) - if len(conf.Azure.EndpointUrl) > 0 { - args = append(args, fmt.Sprintf("--azure-endpoint=%s", conf.Azure.EndpointUrl)) - } - } - - args = append(args, conf.Destination) - - return args -} From 2162bf800fe8fcca844650dd1e706a0aeb47f52b Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 10:47:33 +0530 Subject: [PATCH 22/77] implement log streaming Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 26 +++- pkg/xtrabackup/api/app.pb.go | 238 +++++++++++++++++++++--------- pkg/xtrabackup/api/app.proto | 9 ++ pkg/xtrabackup/api/app_grpc.pb.go | 41 +++++ pkg/xtrabackup/server/app.go | 2 + pkg/xtrabackup/server/logs.go | 36 +++++ 6 files changed, 281 insertions(+), 71 deletions(-) create mode 100644 pkg/xtrabackup/server/logs.go diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index dba7feb3c0..57de2eed73 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -7,6 +7,8 @@ import ( "io" "log" "os" + "os/signal" + "syscall" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" xbscapi "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" @@ -38,9 +40,13 @@ func main() { log.Printf("Created connection to server at %s", connUrl) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() client := xbscapi.NewXtrabackupServiceClient(conn) - stream, err := client.CreateBackup(context.Background(), req) + defer printLogs(ctx, req.BackupName, client) + + stream, err := client.CreateBackup(ctx, req) if err != nil { if status.Code(err) == codes.FailedPrecondition { log.Fatal("Backup is already running") @@ -61,6 +67,24 @@ func main() { log.Println("Backup created successfully") } +func printLogs(ctx context.Context, backupName string, client xbscapi.XtrabackupServiceClient) { + stream, err := client.GetLogs(ctx, &xbscapi.GetLogsRequest{ + BackupName: backupName, + }) + if err != nil { + log.Fatal("Failed to get logs: %w", err) + } + for { + chunk, err := stream.Recv() + if err == io.EOF { + break + } else if err != nil { + log.Fatal("Failed to receive log chunk: %w", err) + } + fmt.Fprint(os.Stdout, chunk.Log) + } +} + func getRequestObject() *xbscapi.CreateBackupRequest { req := &xbscapi.CreateBackupRequest{ BackupConfig: &api.BackupConfig{}, diff --git a/pkg/xtrabackup/api/app.pb.go b/pkg/xtrabackup/api/app.pb.go index be3b7c5012..32c4889d7d 100644 --- a/pkg/xtrabackup/api/app.pb.go +++ b/pkg/xtrabackup/api/app.pb.go @@ -70,6 +70,94 @@ func (BackupStorageType) EnumDescriptor() ([]byte, []int) { return file_app_proto_rawDescGZIP(), []int{0} } +type GetLogsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + BackupName string `protobuf:"bytes,1,opt,name=backup_name,json=backupName,proto3" json:"backup_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetLogsRequest) Reset() { + *x = GetLogsRequest{} + mi := &file_app_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetLogsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLogsRequest) ProtoMessage() {} + +func (x *GetLogsRequest) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLogsRequest.ProtoReflect.Descriptor instead. +func (*GetLogsRequest) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{0} +} + +func (x *GetLogsRequest) GetBackupName() string { + if x != nil { + return x.BackupName + } + return "" +} + +type LogChunk struct { + state protoimpl.MessageState `protogen:"open.v1"` + Log string `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LogChunk) Reset() { + *x = LogChunk{} + mi := &file_app_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LogChunk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogChunk) ProtoMessage() {} + +func (x *LogChunk) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogChunk.ProtoReflect.Descriptor instead. +func (*LogChunk) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{1} +} + +func (x *LogChunk) GetLog() string { + if x != nil { + return x.Log + } + return "" +} + type GetCurrentBackupConfigRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -78,7 +166,7 @@ type GetCurrentBackupConfigRequest struct { func (x *GetCurrentBackupConfigRequest) Reset() { *x = GetCurrentBackupConfigRequest{} - mi := &file_app_proto_msgTypes[0] + mi := &file_app_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -90,7 +178,7 @@ func (x *GetCurrentBackupConfigRequest) String() string { func (*GetCurrentBackupConfigRequest) ProtoMessage() {} func (x *GetCurrentBackupConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[0] + mi := &file_app_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -103,7 +191,7 @@ func (x *GetCurrentBackupConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCurrentBackupConfigRequest.ProtoReflect.Descriptor instead. func (*GetCurrentBackupConfigRequest) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{0} + return file_app_proto_rawDescGZIP(), []int{2} } type CreateBackupRequest struct { @@ -116,7 +204,7 @@ type CreateBackupRequest struct { func (x *CreateBackupRequest) Reset() { *x = CreateBackupRequest{} - mi := &file_app_proto_msgTypes[1] + mi := &file_app_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -128,7 +216,7 @@ func (x *CreateBackupRequest) String() string { func (*CreateBackupRequest) ProtoMessage() {} func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[1] + mi := &file_app_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -141,7 +229,7 @@ func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead. func (*CreateBackupRequest) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{1} + return file_app_proto_rawDescGZIP(), []int{3} } func (x *CreateBackupRequest) GetBackupName() string { @@ -166,7 +254,7 @@ type CreateBackupResponse struct { func (x *CreateBackupResponse) Reset() { *x = CreateBackupResponse{} - mi := &file_app_proto_msgTypes[2] + mi := &file_app_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -178,7 +266,7 @@ func (x *CreateBackupResponse) String() string { func (*CreateBackupResponse) ProtoMessage() {} func (x *CreateBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[2] + mi := &file_app_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -191,7 +279,7 @@ func (x *CreateBackupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupResponse.ProtoReflect.Descriptor instead. func (*CreateBackupResponse) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{2} + return file_app_proto_rawDescGZIP(), []int{4} } type DeleteBackupRequest struct { @@ -204,7 +292,7 @@ type DeleteBackupRequest struct { func (x *DeleteBackupRequest) Reset() { *x = DeleteBackupRequest{} - mi := &file_app_proto_msgTypes[3] + mi := &file_app_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -216,7 +304,7 @@ func (x *DeleteBackupRequest) String() string { func (*DeleteBackupRequest) ProtoMessage() {} func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[3] + mi := &file_app_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -229,7 +317,7 @@ func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteBackupRequest.ProtoReflect.Descriptor instead. func (*DeleteBackupRequest) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{3} + return file_app_proto_rawDescGZIP(), []int{5} } func (x *DeleteBackupRequest) GetBackupName() string { @@ -254,7 +342,7 @@ type DeleteBackupResponse struct { func (x *DeleteBackupResponse) Reset() { *x = DeleteBackupResponse{} - mi := &file_app_proto_msgTypes[4] + mi := &file_app_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -266,7 +354,7 @@ func (x *DeleteBackupResponse) String() string { func (*DeleteBackupResponse) ProtoMessage() {} func (x *DeleteBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[4] + mi := &file_app_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -279,7 +367,7 @@ func (x *DeleteBackupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteBackupResponse.ProtoReflect.Descriptor instead. func (*DeleteBackupResponse) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{4} + return file_app_proto_rawDescGZIP(), []int{6} } type BackupConfig struct { @@ -297,7 +385,7 @@ type BackupConfig struct { func (x *BackupConfig) Reset() { *x = BackupConfig{} - mi := &file_app_proto_msgTypes[5] + mi := &file_app_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -309,7 +397,7 @@ func (x *BackupConfig) String() string { func (*BackupConfig) ProtoMessage() {} func (x *BackupConfig) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[5] + mi := &file_app_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -322,7 +410,7 @@ func (x *BackupConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. func (*BackupConfig) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{5} + return file_app_proto_rawDescGZIP(), []int{7} } func (x *BackupConfig) GetDestination() string { @@ -388,7 +476,7 @@ type S3Config struct { func (x *S3Config) Reset() { *x = S3Config{} - mi := &file_app_proto_msgTypes[6] + mi := &file_app_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -400,7 +488,7 @@ func (x *S3Config) String() string { func (*S3Config) ProtoMessage() {} func (x *S3Config) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[6] + mi := &file_app_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -413,7 +501,7 @@ func (x *S3Config) ProtoReflect() protoreflect.Message { // Deprecated: Use S3Config.ProtoReflect.Descriptor instead. func (*S3Config) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{6} + return file_app_proto_rawDescGZIP(), []int{8} } func (x *S3Config) GetBucket() string { @@ -471,7 +559,7 @@ type GCSConfig struct { func (x *GCSConfig) Reset() { *x = GCSConfig{} - mi := &file_app_proto_msgTypes[7] + mi := &file_app_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -483,7 +571,7 @@ func (x *GCSConfig) String() string { func (*GCSConfig) ProtoMessage() {} func (x *GCSConfig) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[7] + mi := &file_app_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -496,7 +584,7 @@ func (x *GCSConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use GCSConfig.ProtoReflect.Descriptor instead. func (*GCSConfig) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{7} + return file_app_proto_rawDescGZIP(), []int{9} } func (x *GCSConfig) GetBucket() string { @@ -547,7 +635,7 @@ type AzureConfig struct { func (x *AzureConfig) Reset() { *x = AzureConfig{} - mi := &file_app_proto_msgTypes[8] + mi := &file_app_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -559,7 +647,7 @@ func (x *AzureConfig) String() string { func (*AzureConfig) ProtoMessage() {} func (x *AzureConfig) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[8] + mi := &file_app_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -572,7 +660,7 @@ func (x *AzureConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use AzureConfig.ProtoReflect.Descriptor instead. func (*AzureConfig) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{8} + return file_app_proto_rawDescGZIP(), []int{10} } func (x *AzureConfig) GetContainerName() string { @@ -620,7 +708,7 @@ type EnvVar struct { func (x *EnvVar) Reset() { *x = EnvVar{} - mi := &file_app_proto_msgTypes[9] + mi := &file_app_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -632,7 +720,7 @@ func (x *EnvVar) String() string { func (*EnvVar) ProtoMessage() {} func (x *EnvVar) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[9] + mi := &file_app_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -645,7 +733,7 @@ func (x *EnvVar) ProtoReflect() protoreflect.Message { // Deprecated: Use EnvVar.ProtoReflect.Descriptor instead. func (*EnvVar) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{9} + return file_app_proto_rawDescGZIP(), []int{11} } func (x *EnvVar) GetKey() string { @@ -673,7 +761,7 @@ type BackupContainerArgs struct { func (x *BackupContainerArgs) Reset() { *x = BackupContainerArgs{} - mi := &file_app_proto_msgTypes[10] + mi := &file_app_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -685,7 +773,7 @@ func (x *BackupContainerArgs) String() string { func (*BackupContainerArgs) ProtoMessage() {} func (x *BackupContainerArgs) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[10] + mi := &file_app_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -698,7 +786,7 @@ func (x *BackupContainerArgs) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupContainerArgs.ProtoReflect.Descriptor instead. func (*BackupContainerArgs) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{10} + return file_app_proto_rawDescGZIP(), []int{12} } func (x *BackupContainerArgs) GetXtrabackup() []string { @@ -732,7 +820,7 @@ type ContainerOptions struct { func (x *ContainerOptions) Reset() { *x = ContainerOptions{} - mi := &file_app_proto_msgTypes[11] + mi := &file_app_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -744,7 +832,7 @@ func (x *ContainerOptions) String() string { func (*ContainerOptions) ProtoMessage() {} func (x *ContainerOptions) ProtoReflect() protoreflect.Message { - mi := &file_app_proto_msgTypes[11] + mi := &file_app_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -757,7 +845,7 @@ func (x *ContainerOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use ContainerOptions.ProtoReflect.Descriptor instead. func (*ContainerOptions) Descriptor() ([]byte, []int) { - return file_app_proto_rawDescGZIP(), []int{11} + return file_app_proto_rawDescGZIP(), []int{13} } func (x *ContainerOptions) GetEnv() []*EnvVar { @@ -778,7 +866,12 @@ var File_app_proto protoreflect.FileDescriptor const file_app_proto_rawDesc = "" + "\n" + - "\tapp.proto\x12\x03api\"\x1f\n" + + "\tapp.proto\x12\x03api\"1\n" + + "\x0eGetLogsRequest\x12\x1f\n" + + "\vbackup_name\x18\x01 \x01(\tR\n" + + "backupName\"\x1c\n" + + "\bLogChunk\x12\x10\n" + + "\x03log\x18\x01 \x01(\tR\x03log\"\x1f\n" + "\x1dGetCurrentBackupConfigRequest\"n\n" + "\x13CreateBackupRequest\x12\x1f\n" + "\vbackup_name\x18\x01 \x01(\tR\n" + @@ -841,11 +934,12 @@ const file_app_proto_rawDesc = "" + "\x11BackupStorageType\x12\x06\n" + "\x02S3\x10\x00\x12\t\n" + "\x05AZURE\x10\x01\x12\a\n" + - "\x03GCS\x10\x022\xf0\x01\n" + + "\x03GCS\x10\x022\xa1\x02\n" + "\x11XtrabackupService\x12O\n" + "\x16GetCurrentBackupConfig\x12\".api.GetCurrentBackupConfigRequest\x1a\x11.api.BackupConfig\x12E\n" + "\fCreateBackup\x12\x18.api.CreateBackupRequest\x1a\x19.api.CreateBackupResponse0\x01\x12C\n" + - "\fDeleteBackup\x12\x18.api.DeleteBackupRequest\x1a\x19.api.DeleteBackupResponseBGZEgithub.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/apib\x06proto3" + "\fDeleteBackup\x12\x18.api.DeleteBackupRequest\x1a\x19.api.DeleteBackupResponse\x12/\n" + + "\aGetLogs\x12\x13.api.GetLogsRequest\x1a\r.api.LogChunk0\x01BGZEgithub.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/apib\x06proto3" var ( file_app_proto_rawDescOnce sync.Once @@ -860,40 +954,44 @@ func file_app_proto_rawDescGZIP() []byte { } var file_app_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_app_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_app_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_app_proto_goTypes = []any{ (BackupStorageType)(0), // 0: api.BackupStorageType - (*GetCurrentBackupConfigRequest)(nil), // 1: api.GetCurrentBackupConfigRequest - (*CreateBackupRequest)(nil), // 2: api.CreateBackupRequest - (*CreateBackupResponse)(nil), // 3: api.CreateBackupResponse - (*DeleteBackupRequest)(nil), // 4: api.DeleteBackupRequest - (*DeleteBackupResponse)(nil), // 5: api.DeleteBackupResponse - (*BackupConfig)(nil), // 6: api.BackupConfig - (*S3Config)(nil), // 7: api.S3Config - (*GCSConfig)(nil), // 8: api.GCSConfig - (*AzureConfig)(nil), // 9: api.AzureConfig - (*EnvVar)(nil), // 10: api.EnvVar - (*BackupContainerArgs)(nil), // 11: api.BackupContainerArgs - (*ContainerOptions)(nil), // 12: api.ContainerOptions + (*GetLogsRequest)(nil), // 1: api.GetLogsRequest + (*LogChunk)(nil), // 2: api.LogChunk + (*GetCurrentBackupConfigRequest)(nil), // 3: api.GetCurrentBackupConfigRequest + (*CreateBackupRequest)(nil), // 4: api.CreateBackupRequest + (*CreateBackupResponse)(nil), // 5: api.CreateBackupResponse + (*DeleteBackupRequest)(nil), // 6: api.DeleteBackupRequest + (*DeleteBackupResponse)(nil), // 7: api.DeleteBackupResponse + (*BackupConfig)(nil), // 8: api.BackupConfig + (*S3Config)(nil), // 9: api.S3Config + (*GCSConfig)(nil), // 10: api.GCSConfig + (*AzureConfig)(nil), // 11: api.AzureConfig + (*EnvVar)(nil), // 12: api.EnvVar + (*BackupContainerArgs)(nil), // 13: api.BackupContainerArgs + (*ContainerOptions)(nil), // 14: api.ContainerOptions } var file_app_proto_depIdxs = []int32{ - 6, // 0: api.CreateBackupRequest.backup_config:type_name -> api.BackupConfig - 6, // 1: api.DeleteBackupRequest.backup_config:type_name -> api.BackupConfig + 8, // 0: api.CreateBackupRequest.backup_config:type_name -> api.BackupConfig + 8, // 1: api.DeleteBackupRequest.backup_config:type_name -> api.BackupConfig 0, // 2: api.BackupConfig.type:type_name -> api.BackupStorageType - 12, // 3: api.BackupConfig.container_options:type_name -> api.ContainerOptions - 7, // 4: api.BackupConfig.s3:type_name -> api.S3Config - 8, // 5: api.BackupConfig.gcs:type_name -> api.GCSConfig - 9, // 6: api.BackupConfig.azure:type_name -> api.AzureConfig - 10, // 7: api.ContainerOptions.env:type_name -> api.EnvVar - 11, // 8: api.ContainerOptions.args:type_name -> api.BackupContainerArgs - 1, // 9: api.XtrabackupService.GetCurrentBackupConfig:input_type -> api.GetCurrentBackupConfigRequest - 2, // 10: api.XtrabackupService.CreateBackup:input_type -> api.CreateBackupRequest - 4, // 11: api.XtrabackupService.DeleteBackup:input_type -> api.DeleteBackupRequest - 6, // 12: api.XtrabackupService.GetCurrentBackupConfig:output_type -> api.BackupConfig - 3, // 13: api.XtrabackupService.CreateBackup:output_type -> api.CreateBackupResponse - 5, // 14: api.XtrabackupService.DeleteBackup:output_type -> api.DeleteBackupResponse - 12, // [12:15] is the sub-list for method output_type - 9, // [9:12] is the sub-list for method input_type + 14, // 3: api.BackupConfig.container_options:type_name -> api.ContainerOptions + 9, // 4: api.BackupConfig.s3:type_name -> api.S3Config + 10, // 5: api.BackupConfig.gcs:type_name -> api.GCSConfig + 11, // 6: api.BackupConfig.azure:type_name -> api.AzureConfig + 12, // 7: api.ContainerOptions.env:type_name -> api.EnvVar + 13, // 8: api.ContainerOptions.args:type_name -> api.BackupContainerArgs + 3, // 9: api.XtrabackupService.GetCurrentBackupConfig:input_type -> api.GetCurrentBackupConfigRequest + 4, // 10: api.XtrabackupService.CreateBackup:input_type -> api.CreateBackupRequest + 6, // 11: api.XtrabackupService.DeleteBackup:input_type -> api.DeleteBackupRequest + 1, // 12: api.XtrabackupService.GetLogs:input_type -> api.GetLogsRequest + 8, // 13: api.XtrabackupService.GetCurrentBackupConfig:output_type -> api.BackupConfig + 5, // 14: api.XtrabackupService.CreateBackup:output_type -> api.CreateBackupResponse + 7, // 15: api.XtrabackupService.DeleteBackup:output_type -> api.DeleteBackupResponse + 2, // 16: api.XtrabackupService.GetLogs:output_type -> api.LogChunk + 13, // [13:17] is the sub-list for method output_type + 9, // [9:13] is the sub-list for method input_type 9, // [9:9] is the sub-list for extension type_name 9, // [9:9] is the sub-list for extension extendee 0, // [0:9] is the sub-list for field type_name @@ -904,14 +1002,14 @@ func file_app_proto_init() { if File_app_proto != nil { return } - file_app_proto_msgTypes[5].OneofWrappers = []any{} + file_app_proto_msgTypes[7].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_app_proto_rawDesc), len(file_app_proto_rawDesc)), NumEnums: 1, - NumMessages: 12, + NumMessages: 14, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/xtrabackup/api/app.proto b/pkg/xtrabackup/api/app.proto index 35d4f571e8..91329a6d7a 100644 --- a/pkg/xtrabackup/api/app.proto +++ b/pkg/xtrabackup/api/app.proto @@ -8,6 +8,15 @@ service XtrabackupService { rpc GetCurrentBackupConfig(GetCurrentBackupConfigRequest) returns (BackupConfig); rpc CreateBackup(CreateBackupRequest) returns (stream CreateBackupResponse); rpc DeleteBackup(DeleteBackupRequest) returns (DeleteBackupResponse); + rpc GetLogs(GetLogsRequest) returns (stream LogChunk); +} + +message GetLogsRequest { + string backup_name = 1; +} + +message LogChunk { + string log = 1; } message GetCurrentBackupConfigRequest {} diff --git a/pkg/xtrabackup/api/app_grpc.pb.go b/pkg/xtrabackup/api/app_grpc.pb.go index e097007523..8f17b9917d 100644 --- a/pkg/xtrabackup/api/app_grpc.pb.go +++ b/pkg/xtrabackup/api/app_grpc.pb.go @@ -22,6 +22,7 @@ const ( XtrabackupService_GetCurrentBackupConfig_FullMethodName = "/api.XtrabackupService/GetCurrentBackupConfig" XtrabackupService_CreateBackup_FullMethodName = "/api.XtrabackupService/CreateBackup" XtrabackupService_DeleteBackup_FullMethodName = "/api.XtrabackupService/DeleteBackup" + XtrabackupService_GetLogs_FullMethodName = "/api.XtrabackupService/GetLogs" ) // XtrabackupServiceClient is the client API for XtrabackupService service. @@ -31,6 +32,7 @@ type XtrabackupServiceClient interface { GetCurrentBackupConfig(ctx context.Context, in *GetCurrentBackupConfigRequest, opts ...grpc.CallOption) (*BackupConfig, error) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateBackupResponse], error) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*DeleteBackupResponse, error) + GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[LogChunk], error) } type xtrabackupServiceClient struct { @@ -80,6 +82,25 @@ func (c *xtrabackupServiceClient) DeleteBackup(ctx context.Context, in *DeleteBa return out, nil } +func (c *xtrabackupServiceClient) GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[LogChunk], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &XtrabackupService_ServiceDesc.Streams[1], XtrabackupService_GetLogs_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[GetLogsRequest, LogChunk]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type XtrabackupService_GetLogsClient = grpc.ServerStreamingClient[LogChunk] + // XtrabackupServiceServer is the server API for XtrabackupService service. // All implementations must embed UnimplementedXtrabackupServiceServer // for forward compatibility. @@ -87,6 +108,7 @@ type XtrabackupServiceServer interface { GetCurrentBackupConfig(context.Context, *GetCurrentBackupConfigRequest) (*BackupConfig, error) CreateBackup(*CreateBackupRequest, grpc.ServerStreamingServer[CreateBackupResponse]) error DeleteBackup(context.Context, *DeleteBackupRequest) (*DeleteBackupResponse, error) + GetLogs(*GetLogsRequest, grpc.ServerStreamingServer[LogChunk]) error mustEmbedUnimplementedXtrabackupServiceServer() } @@ -106,6 +128,9 @@ func (UnimplementedXtrabackupServiceServer) CreateBackup(*CreateBackupRequest, g func (UnimplementedXtrabackupServiceServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*DeleteBackupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") } +func (UnimplementedXtrabackupServiceServer) GetLogs(*GetLogsRequest, grpc.ServerStreamingServer[LogChunk]) error { + return status.Errorf(codes.Unimplemented, "method GetLogs not implemented") +} func (UnimplementedXtrabackupServiceServer) mustEmbedUnimplementedXtrabackupServiceServer() {} func (UnimplementedXtrabackupServiceServer) testEmbeddedByValue() {} @@ -174,6 +199,17 @@ func _XtrabackupService_DeleteBackup_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _XtrabackupService_GetLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(XtrabackupServiceServer).GetLogs(m, &grpc.GenericServerStream[GetLogsRequest, LogChunk]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type XtrabackupService_GetLogsServer = grpc.ServerStreamingServer[LogChunk] + // XtrabackupService_ServiceDesc is the grpc.ServiceDesc for XtrabackupService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -196,6 +232,11 @@ var XtrabackupService_ServiceDesc = grpc.ServiceDesc{ Handler: _XtrabackupService_CreateBackup_Handler, ServerStreams: true, }, + { + StreamName: "GetLogs", + Handler: _XtrabackupService_GetLogs_Handler, + ServerStreams: true, + }, }, Metadata: "app.proto", } diff --git a/pkg/xtrabackup/server/app.go b/pkg/xtrabackup/server/app.go index 390fa3764c..93823fd7b0 100644 --- a/pkg/xtrabackup/server/app.go +++ b/pkg/xtrabackup/server/app.go @@ -39,9 +39,11 @@ func New() (api.XtrabackupServiceServer, error) { } func (s *appServer) GetCurrentBackupConfig(ctx context.Context, req *api.GetCurrentBackupConfigRequest) (*api.BackupConfig, error) { + // TODO return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") } func (s *appServer) DeleteBackup(ctx context.Context, req *api.DeleteBackupRequest) (*api.DeleteBackupResponse, error) { + // TODO return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") } diff --git a/pkg/xtrabackup/server/logs.go b/pkg/xtrabackup/server/logs.go new file mode 100644 index 0000000000..1b70185fe1 --- /dev/null +++ b/pkg/xtrabackup/server/logs.go @@ -0,0 +1,36 @@ +package server + +import ( + "bufio" + "os" + "path/filepath" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" + "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + "github.com/pkg/errors" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (s *appServer) GetLogs(req *api.GetLogsRequest, stream api.XtrabackupService_GetLogsServer) error { + log := logf.Log.WithName("xtrabackup-server").WithName("GetLogs") + + log.Info("Getting logs", "backup_name", req.BackupName) + + logFile, err := os.Open(filepath.Join(app.BackupLogDir, req.BackupName+".log")) + if err != nil { + return errors.Wrap(err, "failed to open log file") + } + defer logFile.Close() + + buf := bufio.NewScanner(logFile) + for buf.Scan() { + stream.Send(&api.LogChunk{ + Log: buf.Text(), + }) + } + + if err := buf.Err(); err != nil { + return errors.Wrap(err, "failed to read log file") + } + return nil +} From 5f921cc9562bad7e4f97fd07e2b3c0cfe4115916 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 11:09:28 +0530 Subject: [PATCH 23/77] add log Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index 57de2eed73..f7a60baaa4 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -68,6 +68,7 @@ func main() { } func printLogs(ctx context.Context, backupName string, client xbscapi.XtrabackupServiceClient) { + log.Println("Requesting logs for backup", backupName) stream, err := client.GetLogs(ctx, &xbscapi.GetLogsRequest{ BackupName: backupName, }) From 4a5236ec29a19a64a1c214c176419cc7724f869b Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 11:20:15 +0530 Subject: [PATCH 24/77] Update pkg/controller/pxc/upgrade.go Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- pkg/controller/pxc/upgrade.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/pxc/upgrade.go b/pkg/controller/pxc/upgrade.go index fd7c8d5ae6..28da349f07 100644 --- a/pkg/controller/pxc/upgrade.go +++ b/pkg/controller/pxc/upgrade.go @@ -349,8 +349,8 @@ func (r *ReconcilePerconaXtraDBCluster) waitHostgroups( if !cr.Spec.ProxySQLEnabled() { return nil } - log := logf.FromContext(ctx) - + log := logf.FromContext(ctx) + database, err := k8s.GetProxyConnection(cr, r.client) if err != nil { return errors.Wrap(err, "connect to proxy") From d9e738c0a1efc2dbb957efa177498604d6fba077 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 11:21:40 +0530 Subject: [PATCH 25/77] fix errors Signed-off-by: Mayank Shah --- pkg/controller/pxcbackup/controller.go | 2 +- pkg/controller/pxcbackup/deadline_test.go | 10 +++++----- pkg/pxc/backup/backup.go | 3 +-- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 4457c41eaf..522efa4d9a 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -287,7 +287,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( ) (*batchv1.Job, error) { log := logf.FromContext(ctx) - bcp := backup.New(cluster, r.client) + bcp := backup.New(cluster) job := bcp.Job(cr, cluster) initImage, err := k8s.GetInitImage(ctx, cluster, r.client) if err != nil { diff --git a/pkg/controller/pxcbackup/deadline_test.go b/pkg/controller/pxcbackup/deadline_test.go index a0603f5693..6a922fb071 100644 --- a/pkg/controller/pxcbackup/deadline_test.go +++ b/pkg/controller/pxcbackup/deadline_test.go @@ -198,7 +198,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster, buildFakeClient()) + bcp := backup.New(cluster) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -226,7 +226,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster, buildFakeClient()) + bcp := backup.New(cluster) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -254,7 +254,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster, buildFakeClient()) + bcp := backup.New(cluster) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -282,7 +282,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster, buildFakeClient()) + bcp := backup.New(cluster) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") @@ -318,7 +318,7 @@ var _ = Describe("Suspended deadline", func() { cr, err := readDefaultBackup("backup1", "test") Expect(err).ToNot(HaveOccurred()) - bcp := backup.New(cluster, buildFakeClient()) + bcp := backup.New(cluster) job := bcp.Job(cr, cluster) job.Spec, err = bcp.JobSpec(cr.Spec, cluster, job, "") diff --git a/pkg/pxc/backup/backup.go b/pkg/pxc/backup/backup.go index a40ede95ba..65bff1666d 100644 --- a/pkg/pxc/backup/backup.go +++ b/pkg/pxc/backup/backup.go @@ -4,7 +4,6 @@ import ( corev1 "k8s.io/api/core/v1" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" - "sigs.k8s.io/controller-runtime/pkg/client" ) type Backup struct { @@ -16,7 +15,7 @@ type Backup struct { serviceAccountName string } -func New(cr *api.PerconaXtraDBCluster, cl client.Client) *Backup { +func New(cr *api.PerconaXtraDBCluster) *Backup { return &Backup{ cluster: cr.Name, namespace: cr.Namespace, From 275550aecd416c0b603379378803f0902229c0a7 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 11:35:06 +0530 Subject: [PATCH 26/77] linting Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 2 +- pkg/xtrabackup/server/create.go | 2 +- pkg/xtrabackup/server/logs.go | 7 +++++-- pkg/xtrabackup/server/status.go | 10 ++-------- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index f7a60baaa4..10c762b6db 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -120,7 +120,7 @@ func getRequestObject() *xbscapi.CreateBackupRequest { if err != nil { log.Fatalf("Failed to marshal request: %v", err) } - log.Printf("Request=", string(reqJson)) + log.Printf("Request=%s", string(reqJson)) return req } diff --git a/pkg/xtrabackup/server/create.go b/pkg/xtrabackup/server/create.go index 8da067ff5b..5c1656c4fa 100644 --- a/pkg/xtrabackup/server/create.go +++ b/pkg/xtrabackup/server/create.go @@ -32,7 +32,7 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba log = log.WithValues("namespace", s.namespace, "name", req.BackupName) - s.backupStatus.setBackupConfig(*req.BackupConfig) + s.backupStatus.setBackupConfig(req.BackupConfig) defer s.backupStatus.removeBackupConfig() ctx := stream.Context() diff --git a/pkg/xtrabackup/server/logs.go b/pkg/xtrabackup/server/logs.go index 1b70185fe1..db8c5e5049 100644 --- a/pkg/xtrabackup/server/logs.go +++ b/pkg/xtrabackup/server/logs.go @@ -2,6 +2,7 @@ package server import ( "bufio" + "fmt" "os" "path/filepath" @@ -24,9 +25,11 @@ func (s *appServer) GetLogs(req *api.GetLogsRequest, stream api.XtrabackupServic buf := bufio.NewScanner(logFile) for buf.Scan() { - stream.Send(&api.LogChunk{ + if err := stream.Send(&api.LogChunk{ Log: buf.Text(), - }) + }); err != nil { + return fmt.Errorf("error streaming log chunk: %w", err) + } } if err := buf.Err(); err != nil { diff --git a/pkg/xtrabackup/server/status.go b/pkg/xtrabackup/server/status.go index 42129e3f65..05d411be4c 100644 --- a/pkg/xtrabackup/server/status.go +++ b/pkg/xtrabackup/server/status.go @@ -21,9 +21,9 @@ func (s *backupStatus) doneBackup() { s.isRunning.Store(false) } -func (s *backupStatus) setBackupConfig(conf api.BackupConfig) { +func (s *backupStatus) setBackupConfig(conf *api.BackupConfig) { s.mu.Lock() - s.currentBackupConf = &conf + s.currentBackupConf = conf s.mu.Unlock() } @@ -32,9 +32,3 @@ func (s *backupStatus) removeBackupConfig() { s.currentBackupConf = nil s.mu.Unlock() } - -func (s *backupStatus) getBackupConfig() *api.BackupConfig { - s.mu.Lock() - defer s.mu.Unlock() - return s.currentBackupConf -} From 2f9b072305cf669f6a9c22177fd723ec35af13b2 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 11:59:49 +0530 Subject: [PATCH 27/77] add unit test for JobSpec Signed-off-by: Mayank Shah --- pkg/xtrabackup/job_test.go | 217 +++++++++++++++++++++++++++++++++++++ 1 file changed, 217 insertions(+) create mode 100644 pkg/xtrabackup/job_test.go diff --git a/pkg/xtrabackup/job_test.go b/pkg/xtrabackup/job_test.go new file mode 100644 index 0000000000..6bf50e7876 --- /dev/null +++ b/pkg/xtrabackup/job_test.go @@ -0,0 +1,217 @@ +package xtrabackup + +import ( + "testing" + + pxcv1 "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +func TestJobSpec(t *testing.T) { + activeDeadlineSeconds := int64(3600) + storageName := "s3-storage" + verifyTLS := true + initImage := "percona/percona-xtradb-cluster-operator:init-image" + primaryPodHost := "cluster-pxc-0.cluster-pxc" + backupImage := "percona/percona-xtradb-cluster-operator:backup-image" + serviceAccountName := "backup-service-account" + schedulerName := "custom-scheduler" + priorityClassName := "high-priority" + runtimeClassName := "gvisor" + + spec := &pxcv1.PXCBackupSpec{ + StorageName: storageName, + ActiveDeadlineSeconds: &activeDeadlineSeconds, + } + + cluster := &pxcv1.PerconaXtraDBCluster{ + Spec: pxcv1.PerconaXtraDBClusterSpec{ + Backup: &pxcv1.PXCScheduledBackup{ + Image: backupImage, + ImagePullPolicy: corev1.PullIfNotPresent, + ImagePullSecrets: []corev1.LocalObjectReference{ + {Name: "backup-registry-secret"}, + }, + ServiceAccountName: serviceAccountName, + Storages: map[string]*pxcv1.BackupStorageSpec{ + storageName: { + Type: pxcv1.BackupStorageS3, + S3: &pxcv1.BackupStorageS3Spec{ + Bucket: "test-bucket", + Region: "us-west-2", + CredentialsSecret: "s3-credentials", + }, + VerifyTLS: &verifyTLS, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + }, + ContainerSecurityContext: &corev1.SecurityContext{ + RunAsUser: ptr.To(int64(1000)), + RunAsGroup: ptr.To(int64(1000)), + Privileged: ptr.To(false), + }, + PodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: ptr.To(int64(1000)), + RunAsGroup: ptr.To(int64(1000)), + FSGroup: ptr.To(int64(1000)), + }, + Annotations: map[string]string{ + "backup.annotation/key": "value", + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/arch", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"amd64"}, + }, + }, + }, + }, + }, + }, + }, + Tolerations: []corev1.Toleration{ + { + Key: "backup", + Operator: corev1.TolerationOpEqual, + Value: "true", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + NodeSelector: map[string]string{ + "backup-node": "true", + }, + SchedulerName: schedulerName, + PriorityClassName: priorityClassName, + RuntimeClassName: &runtimeClassName, + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "kubernetes.io/hostname", + WhenUnsatisfiable: corev1.DoNotSchedule, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "backup", + }, + }, + }, + }, + }, + }, + }, + InitContainer: pxcv1.InitContainerSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("64Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + }, + }, + }, + } + + jobLabels := map[string]string{ + "app": "percona-xtradb-cluster-backup", + "cluster": "test-cluster", + "backup-name": "test-backup", + "percona.com/backup-type": "manual", + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Labels: jobLabels, + }, + } + + jobSpec, err := JobSpec(spec, cluster, job, initImage, primaryPodHost) + assert.NoError(t, err) + + // Assert JobSpec fields + assert.NotNil(t, jobSpec.ManualSelector) + assert.True(t, *jobSpec.ManualSelector) + assert.Equal(t, &activeDeadlineSeconds, jobSpec.ActiveDeadlineSeconds) + assert.NotNil(t, jobSpec.Selector) + assert.Equal(t, jobLabels, jobSpec.Selector.MatchLabels) + + // Assert PodTemplateSpec + podTemplate := jobSpec.Template + assert.Equal(t, jobLabels, podTemplate.Labels) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].Annotations, podTemplate.Annotations) + + // Assert PodSpec + podSpec := podTemplate.Spec + assert.Equal(t, corev1.RestartPolicyNever, podSpec.RestartPolicy) + assert.Equal(t, cluster.Spec.Backup.ServiceAccountName, podSpec.ServiceAccountName) + assert.Equal(t, cluster.Spec.Backup.ImagePullSecrets, podSpec.ImagePullSecrets) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].PodSecurityContext, podSpec.SecurityContext) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].Affinity, podSpec.Affinity) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].Tolerations, podSpec.Tolerations) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].NodeSelector, podSpec.NodeSelector) + assert.Equal(t, schedulerName, podSpec.SchedulerName) + assert.Equal(t, priorityClassName, podSpec.PriorityClassName) + assert.Equal(t, &runtimeClassName, podSpec.RuntimeClassName) + assert.NotNil(t, podSpec.TopologySpreadConstraints) + assert.Len(t, podSpec.TopologySpreadConstraints, 1) + + // Assert Volumes + assert.Len(t, podSpec.Volumes, 1) + assert.Equal(t, app.BinVolumeName, podSpec.Volumes[0].Name) + assert.NotNil(t, podSpec.Volumes[0].EmptyDir) + + // Assert InitContainers + assert.Len(t, podSpec.InitContainers, 1) + initContainer := podSpec.InitContainers[0] + assert.Equal(t, "backup-init", initContainer.Name) + assert.Equal(t, initImage, initContainer.Image) + assert.Equal(t, cluster.Spec.Backup.ImagePullPolicy, initContainer.ImagePullPolicy) + assert.Equal(t, []string{"/backup-init-entrypoint.sh"}, initContainer.Command) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].ContainerSecurityContext, initContainer.SecurityContext) + assert.Len(t, initContainer.VolumeMounts, 1) + assert.Equal(t, app.BinVolumeName, initContainer.VolumeMounts[0].Name) + assert.Equal(t, app.BinVolumeMountPath, initContainer.VolumeMounts[0].MountPath) + + // Assert Containers + assert.Len(t, podSpec.Containers, 1) + container := podSpec.Containers[0] + assert.Equal(t, "xtrabackup", container.Name) + assert.Equal(t, backupImage, container.Image) + assert.Equal(t, cluster.Spec.Backup.ImagePullPolicy, container.ImagePullPolicy) + assert.Equal(t, []string{"/opt/percona/xtrabackup-run-backup"}, container.Command) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].Resources, container.Resources) + assert.Equal(t, cluster.Spec.Backup.Storages[storageName].ContainerSecurityContext, container.SecurityContext) + assert.Len(t, container.VolumeMounts, 1) + assert.Equal(t, app.BinVolumeName, container.VolumeMounts[0].Name) + assert.Equal(t, app.BinVolumeMountPath, container.VolumeMounts[0].MountPath) + + // Assert Environment Variables + assert.Len(t, container.Env, 3) + envMap := make(map[string]string) + for _, env := range container.Env { + envMap[env.Name] = env.Value + } + assert.Equal(t, primaryPodHost, envMap["HOST"]) + assert.Equal(t, string(pxcv1.BackupStorageS3), envMap["STORAGE_TYPE"]) + assert.Equal(t, "true", envMap["VERIFY_TLS"]) +} From a97a83294f518ca7c4525de8cb480d5281955677 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 28 Nov 2025 12:16:12 +0530 Subject: [PATCH 28/77] add unit tests for commands Signed-off-by: Mayank Shah --- pkg/xtrabackup/api/cmd_test.go | 309 +++++++++++++++++++++++++++++++++ 1 file changed, 309 insertions(+) create mode 100644 pkg/xtrabackup/api/cmd_test.go diff --git a/pkg/xtrabackup/api/cmd_test.go b/pkg/xtrabackup/api/cmd_test.go new file mode 100644 index 0000000000..9db3f1cda4 --- /dev/null +++ b/pkg/xtrabackup/api/cmd_test.go @@ -0,0 +1,309 @@ +package api + +import ( + context "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewXtrabackupCmd(t *testing.T) { + testCases := []struct { + backupConfig *BackupConfig + expectedArgs []string + }{ + { + backupConfig: &BackupConfig{ + Destination: "s3://bucket/backup", + Type: BackupStorageType_S3, + VerifyTls: true, + ContainerOptions: &ContainerOptions{ + Args: &BackupContainerArgs{ + Xtrabackup: []string{ + "--compress", + "--compress-threads=4", + "--parallel=4", + }, + }, + }, + }, + expectedArgs: []string{ + "xtrabackup", + "--backup", + "--stream=xbstream", + "--safe-slave-backup", + "--slave-info", + "--target-dir=/backup/", + "--socket=/tmp/mysql.sock", + "--user=root", + "--password=password123", + "--compress", + "--compress-threads=4", + "--parallel=4", + }, + }, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) { + cmd := tc.backupConfig.NewXtrabackupCmd(context.Background(), "root", "password123") + assert.Equal(t, tc.expectedArgs, cmd.Args) + }) + } +} + +func TestNewXbcloudCmd(t *testing.T) { + testCases := []struct { + name string + backupConfig *BackupConfig + action XBCloudAction + expectedArgs []string + expectedEnv []string // Custom env vars that should be present (in addition to os.Environ()) + }{ + { + name: "S3 storage with verify TLS and put action", + backupConfig: &BackupConfig{ + Destination: "s3://bucket/backup-name", + Type: BackupStorageType_S3, + VerifyTls: true, + S3: &S3Config{ + Bucket: "test-bucket", + Region: "us-west-2", + AccessKey: "access-key-123", + SecretKey: "secret-key-456", + EndpointUrl: "", + }, + }, + action: XBCloudActionPut, + expectedArgs: []string{ + "xbcloud", + "put", + "--parallel=10", + "--curl-retriable-errors=7", + "--md5", + "--storage=s3", + "--s3-bucket=test-bucket", + "--s3-region=us-west-2", + "--s3-access-key=access-key-123", + "--s3-secret-key=secret-key-456", + "s3://bucket/backup-name", + }, + expectedEnv: []string{}, + }, + { + name: "S3 storage without verify TLS and delete action", + backupConfig: &BackupConfig{ + Destination: "s3://bucket/backup-name", + Type: BackupStorageType_S3, + VerifyTls: false, + S3: &S3Config{ + Bucket: "test-bucket", + Region: "eu-central-1", + AccessKey: "access-key-789", + SecretKey: "secret-key-012", + EndpointUrl: "https://s3.custom.endpoint.com", + }, + }, + action: XBCloudActionDelete, + expectedArgs: []string{ + "xbcloud", + "delete", + "--parallel=10", + "--curl-retriable-errors=7", + "--insecure", + "--md5", + "--storage=s3", + "--s3-bucket=test-bucket", + "--s3-region=eu-central-1", + "--s3-access-key=access-key-789", + "--s3-secret-key=secret-key-012", + "--s3-endpoint=https://s3.custom.endpoint.com", + "s3://bucket/backup-name", + }, + expectedEnv: []string{}, + }, + { + name: "S3 storage with container options and xbcloud args", + backupConfig: &BackupConfig{ + Destination: "s3://bucket/backup-name", + Type: BackupStorageType_S3, + VerifyTls: true, + S3: &S3Config{ + Bucket: "test-bucket", + Region: "us-east-1", + AccessKey: "access-key", + SecretKey: "secret-key", + EndpointUrl: "", + }, + ContainerOptions: &ContainerOptions{ + Env: []*EnvVar{ + {Key: "AWS_PROFILE", Value: "production"}, + {Key: "CUSTOM_VAR", Value: "custom-value"}, + }, + Args: &BackupContainerArgs{ + Xbcloud: []string{ + "--xb-cloud-arg=some-vaule", + }, + }, + }, + }, + action: XBCloudActionPut, + expectedArgs: []string{ + "xbcloud", + "put", + "--parallel=10", + "--curl-retriable-errors=7", + "--xb-cloud-arg=some-vaule", + "--md5", + "--storage=s3", + "--s3-bucket=test-bucket", + "--s3-region=us-east-1", + "--s3-access-key=access-key", + "--s3-secret-key=secret-key", + "s3://bucket/backup-name", + }, + expectedEnv: []string{ + "AWS_PROFILE=production", + "CUSTOM_VAR=custom-value", + }, + }, + { + name: "Azure storage with verify TLS and put action", + backupConfig: &BackupConfig{ + Destination: "azure://container/backup-name", + Type: BackupStorageType_AZURE, + VerifyTls: true, + Azure: &AzureConfig{ + StorageAccount: "storage-account-123", + ContainerName: "backup-container", + AccessKey: "azure-access-key", + EndpointUrl: "", + }, + }, + action: XBCloudActionPut, + expectedArgs: []string{ + "xbcloud", + "put", + "--parallel=10", + "--curl-retriable-errors=7", + "--storage=azure", + "--azure-storage-account=storage-account-123", + "--azure-container-name=backup-container", + "--azure-access-key=azure-access-key", + "azure://container/backup-name", + }, + expectedEnv: []string{}, + }, + { + name: "Azure storage without verify TLS and delete action", + backupConfig: &BackupConfig{ + Destination: "azure://container/backup-name", + Type: BackupStorageType_AZURE, + VerifyTls: false, + Azure: &AzureConfig{ + StorageAccount: "storage-account-456", + ContainerName: "backup-container", + AccessKey: "azure-access-key-789", + EndpointUrl: "https://custom.azure.endpoint.net", + }, + }, + action: XBCloudActionDelete, + expectedArgs: []string{ + "xbcloud", + "delete", + "--parallel=10", + "--curl-retriable-errors=7", + "--insecure", + "--storage=azure", + "--azure-storage-account=storage-account-456", + "--azure-container-name=backup-container", + "--azure-access-key=azure-access-key-789", + "--azure-endpoint=https://custom.azure.endpoint.net", + "azure://container/backup-name", + }, + expectedEnv: []string{}, + }, + { + name: "Azure storage with container options and env vars", + backupConfig: &BackupConfig{ + Destination: "azure://container/backup-name", + Type: BackupStorageType_AZURE, + VerifyTls: true, + Azure: &AzureConfig{ + StorageAccount: "storage-account", + ContainerName: "backup-container", + AccessKey: "azure-access-key", + EndpointUrl: "https://storage.azure.net", + }, + ContainerOptions: &ContainerOptions{ + Env: []*EnvVar{ + {Key: "AZURE_STORAGE_CONNECTION_STRING", Value: "DefaultEndpointsProtocol=https"}, + {Key: "LOG_LEVEL", Value: "debug"}, + }, + Args: &BackupContainerArgs{ + Xbcloud: []string{ + "--xb-cloud-arg=some-vaule", + }, + }, + }, + }, + action: XBCloudActionPut, + expectedArgs: []string{ + "xbcloud", + "put", + "--parallel=10", + "--curl-retriable-errors=7", + "--xb-cloud-arg=some-vaule", + "--storage=azure", + "--azure-storage-account=storage-account", + "--azure-container-name=backup-container", + "--azure-access-key=azure-access-key", + "--azure-endpoint=https://storage.azure.net", + "azure://container/backup-name", + }, + expectedEnv: []string{ + "AZURE_STORAGE_CONNECTION_STRING=DefaultEndpointsProtocol=https", + "LOG_LEVEL=debug", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := tc.backupConfig.NewXbcloudCmd(context.Background(), tc.action, nil) + + // Verify command arguments + assert.Equal(t, tc.expectedArgs, cmd.Args) + + // Verify environment variables + // The env will include os.Environ() plus custom env vars + envMap := make(map[string]string) + for _, env := range cmd.Env { + parts := splitEnv(env) + if len(parts) == 2 { + envMap[parts[0]] = parts[1] + } + } + + // Check that all expected custom env vars are present + for _, expectedEnv := range tc.expectedEnv { + parts := splitEnv(expectedEnv) + if len(parts) == 2 { + assert.Contains(t, cmd.Env, expectedEnv, "Expected env var %s not found", expectedEnv) + assert.Equal(t, parts[1], envMap[parts[0]], "Env var %s has wrong value", parts[0]) + } + } + }) + } +} + +// Helper function to split env var string into key and value +func splitEnv(env string) []string { + for i := 0; i < len(env); i++ { + if env[i] == '=' { + return []string{env[:i], env[i+1:]} + } + } + return []string{env} +} From 75f12e48de4594f7952367d37ba9ca9beaaa477c Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Sun, 30 Nov 2025 11:20:31 +0530 Subject: [PATCH 29/77] do not allow pvc backups Signed-off-by: Mayank Shah --- pkg/controller/pxcbackup/controller.go | 32 +++++++++++++++++--------- pkg/pxc/backup/job.go | 8 +++---- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 14552a1ebb..e40e6e2065 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -155,6 +155,27 @@ func (r *ReconcilePerconaXtraDBClusterBackup) Reconcile(ctx context.Context, req log = log.WithValues("cluster", cluster.Name) + storage, ok := cluster.Spec.Backup.Storages[cr.Spec.StorageName] + if !ok { + err := errors.Errorf("storage %s doesn't exist", cr.Spec.StorageName) + + if err := r.setFailedStatus(ctx, cr, err); err != nil { + return rr, errors.Wrap(err, "update status") + } + + return reconcile.Result{}, err + } + + // TODO: implement support + if storage.Type == api.BackupStorageFilesystem && features.Enabled(ctx, features.BackupXtrabackup) { + err := errors.New("pvc backup is not supported for xtrabackup mode") + + if err := r.setFailedStatus(ctx, cr, err); err != nil { + return rr, errors.Wrap(err, "update status") + } + return reconcile.Result{}, err + } + err = cluster.CheckNSetDefaults(r.serverVersion, log) if err != nil { err := errors.Wrap(err, "wrong PXC options") @@ -224,17 +245,6 @@ func (r *ReconcilePerconaXtraDBClusterBackup) Reconcile(ctx context.Context, req return rr, nil } - storage, ok := cluster.Spec.Backup.Storages[cr.Spec.StorageName] - if !ok { - err := errors.Errorf("storage %s doesn't exist", cr.Spec.StorageName) - - if err := r.setFailedStatus(ctx, cr, err); err != nil { - return rr, errors.Wrap(err, "update status") - } - - return reconcile.Result{}, err - } - log = log.WithValues("storage", cr.Spec.StorageName) log.V(1).Info("Check if parallel backups are allowed", "allowed", cluster.Spec.Backup.GetAllowParallel()) diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index 78fe7764ba..ff9a1b32ca 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -226,11 +226,9 @@ func SetStoragePVC(ctx context.Context, job *batchv1.JobSpec, cr *api.PerconaXtr pvc, }...) - if !features.Enabled(ctx, features.BackupXtrabackup) { - err := appendStorageSecret(job, cr) - if err != nil { - return errors.Wrap(err, "failed to append storage secret") - } + err := appendStorageSecret(job, cr) + if err != nil { + return errors.Wrap(err, "failed to append storage secret") } return nil From 9bc42ede0a9986870a2ec049cd80703a37508fb2 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Mon, 1 Dec 2025 13:52:14 +0530 Subject: [PATCH 30/77] allow specifying PXCO_FEATURE_GATES in tests Signed-off-by: Mayank Shah --- deploy/bundle.yaml | 2 ++ deploy/cw-bundle.yaml | 2 ++ deploy/operator.yaml | 2 ++ e2e-tests/functions | 2 ++ 4 files changed, 8 insertions(+) diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index 6d0e6ee75d..b5527d511e 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -12534,6 +12534,8 @@ spec: value: "10" - name: MAX_CONCURRENT_RECONCILES value: "1" + - name: PXCO_FEATURE_GATES + value: "" image: perconalab/percona-xtradb-cluster-operator:main imagePullPolicy: Always livenessProbe: diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index 7246088a7b..b310a3c8a0 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -12544,6 +12544,8 @@ spec: value: "10" - name: MAX_CONCURRENT_RECONCILES value: "1" + - name: PXCO_FEATURE_GATES + value: "" image: perconalab/percona-xtradb-cluster-operator:main imagePullPolicy: Always resources: diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 68af7cf01e..244787f7b3 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -49,6 +49,8 @@ spec: value: "10" - name: MAX_CONCURRENT_RECONCILES value: "1" + - name: PXCO_FEATURE_GATES + value: "" image: perconalab/percona-xtradb-cluster-operator:main imagePullPolicy: Always livenessProbe: diff --git a/e2e-tests/functions b/e2e-tests/functions index 4ffad83347..70ab211057 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -346,6 +346,7 @@ deploy_operator() { | sed -e "s^failureThreshold: .*^failureThreshold: 10^" \ | yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - \ | yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - \ + | yq eval "(select(.kind == \"Deployment\").spec.template.spec.containers[] | select(.name == \"percona-xtradb-cluster-operator\").env[] | select(.name == \"PXCO_FEATURE_GATES\").value) = \"${PXCO_FEATURE_GATES}\"" - \ | kubectl_bin apply -f - else apply_rbac rbac @@ -354,6 +355,7 @@ deploy_operator() { | sed -e "s^failureThreshold: .*^failureThreshold: 10^" \ | yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - \ | yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - \ + | yq eval "(select(.kind == \"Deployment\").spec.template.spec.containers[] | select(.name == \"percona-xtradb-cluster-operator\").env[] | select(.name == \"PXCO_FEATURE_GATES\").value) = \"${PXCO_FEATURE_GATES}\"" - \ | kubectl_bin apply -f - fi From 3b958f359bed958c166c76423b768bd3201435d8 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Mon, 1 Dec 2025 15:57:19 +0530 Subject: [PATCH 31/77] add protoc to Makefile Signed-off-by: Mayank Shah --- Makefile | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Makefile b/Makefile index 878de0a6c0..8c13d773fd 100644 --- a/Makefile +++ b/Makefile @@ -108,6 +108,24 @@ SWAGGER = $(shell pwd)/bin/swagger swagger: ## Download swagger locally if necessary. $(call go-get-tool,$(SWAGGER),github.com/go-swagger/go-swagger/cmd/swagger@latest) +PROTOC_VERSION = 33.1 +PROTOC = $(shell pwd)/bin/protoc +protoc: ## Download protoc locally if necessary. + os='linux'; \ + arch='x86_64'; \ + if [ "$(shell uname)" = "Darwin" ]; then \ + os='osx'; \ + fi; \ + if [ "$(shell uname -m)" = "arm64" ]; then \ + arch='aarch_64'; \ + fi; \ + curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-$${os}-$${arch}.zip"; \ + unzip -o protoc-${PROTOC_VERSION}-$${os}-$${arch}.zip -d protoc-${PROTOC_VERSION}-$${os}-$${arch}; \ + rm protoc-${PROTOC_VERSION}-$${os}-$${arch}.zip; \ + mv -f protoc-${PROTOC_VERSION}-$${os}-$${arch}/bin/protoc $(PROTOC); \ + rm -rf protoc-${PROTOC_VERSION}-$${os}-$${arch}; \ + $(call go install google.golang.org/protobuf/cmd/protoc-gen-go@latest) + # Prepare release include e2e-tests/release_versions CERT_MANAGER_VER := $(shell grep -Eo "cert-manager v.*" go.mod|grep -Eo "[0-9]+\.[0-9]+\.[0-9]+") From 8203b8409e77910bf11b045f2c8bc2832d37a094 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Mon, 1 Dec 2025 15:57:28 +0530 Subject: [PATCH 32/77] add --galera-info Signed-off-by: Mayank Shah --- pkg/xtrabackup/api/cmd.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/xtrabackup/api/cmd.go b/pkg/xtrabackup/api/cmd.go index 9bf836f4bd..32e0cf522d 100644 --- a/pkg/xtrabackup/api/cmd.go +++ b/pkg/xtrabackup/api/cmd.go @@ -102,6 +102,7 @@ func (cfg *BackupConfig) xtrabackupArgs(user, pass string) []string { "--stream=xbstream", "--safe-slave-backup", "--slave-info", + "--galera-info", "--target-dir=/backup/", "--socket=/tmp/mysql.sock", fmt.Sprintf("--user=%s", user), From 8f9dbca5c3b6695564b5c554807b896560d78ba0 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 09:59:41 +0530 Subject: [PATCH 33/77] fix pitr Signed-off-by: Mayank Shah --- cmd/pitr/recoverer/recoverer.go | 267 ++++++++++++++++++-------------- 1 file changed, 155 insertions(+), 112 deletions(-) diff --git a/cmd/pitr/recoverer/recoverer.go b/cmd/pitr/recoverer/recoverer.go index 9993250360..d988d2229f 100644 --- a/cmd/pitr/recoverer/recoverer.go +++ b/cmd/pitr/recoverer/recoverer.go @@ -3,6 +3,7 @@ package recoverer import ( "bytes" "context" + "fmt" "io" "log" "net/url" @@ -78,7 +79,7 @@ func (c Config) storages(ctx context.Context) (storage.Storage, storage.Storage, return nil, nil, errors.Wrap(err, "get bucket and prefix") } prefix = prefix[:len(prefix)-1] - defaultStorage, err = storage.NewS3(ctx, c.BackupStorageS3.Endpoint, c.BackupStorageS3.AccessKeyID, c.BackupStorageS3.AccessKey, bucket, prefix+".sst_info/", c.BackupStorageS3.Region, c.VerifyTLS, caBundle) + defaultStorage, err = storage.NewS3(ctx, c.BackupStorageS3.Endpoint, c.BackupStorageS3.AccessKeyID, c.BackupStorageS3.AccessKey, bucket, prefix, c.BackupStorageS3.Region, c.VerifyTLS, caBundle) if err != nil { return nil, nil, errors.Wrap(err, "new storage manager") } @@ -89,7 +90,7 @@ func (c Config) storages(ctx context.Context) (storage.Storage, storage.Storage, if err != nil { return nil, nil, errors.Wrap(err, "new azure storage") } - defaultStorage, err = storage.NewAzure(c.BackupStorageAzure.AccountName, c.BackupStorageAzure.AccountKey, c.BackupStorageAzure.Endpoint, c.BackupStorageAzure.ContainerName, c.BackupStorageAzure.BackupDest+".sst_info/", c.BackupStorageAzure.BlockSize, c.BackupStorageAzure.Concurrency) + defaultStorage, err = storage.NewAzure(c.BackupStorageAzure.AccountName, c.BackupStorageAzure.AccountKey, c.BackupStorageAzure.Endpoint, c.BackupStorageAzure.ContainerName, c.BackupStorageAzure.BackupDest, c.BackupStorageAzure.BlockSize, c.BackupStorageAzure.Concurrency) if err != nil { return nil, nil, errors.Wrap(err, "new azure storage") } @@ -231,45 +232,6 @@ func getBucketAndPrefix(bucketURL string) (bucket string, prefix string, err err return bucket, prefix, err } -func getStartGTIDSet(ctx context.Context, s storage.Storage) (string, error) { - sstInfo, err := s.ListObjects(ctx, "sst_info") - if err != nil { - return "", errors.Wrapf(err, "list objects") - } - if len(sstInfo) == 0 { - return "", errors.New("no info files in sst dir") - } - sort.Strings(sstInfo) - - sstInfoObj, err := s.GetObject(ctx, sstInfo[0]) - if err != nil { - return "", errors.Wrapf(err, "get object") - } - defer sstInfoObj.Close() - - s.SetPrefix(strings.TrimSuffix(s.GetPrefix(), ".sst_info/") + "/") - xtrabackupInfo, err := s.ListObjects(ctx, "xtrabackup_info") - if err != nil { - return "", errors.Wrapf(err, "list objects") - } - if len(xtrabackupInfo) == 0 { - return "", errors.New("no info files in backup") - } - sort.Strings(xtrabackupInfo) - - xtrabackupInfoObj, err := s.GetObject(ctx, xtrabackupInfo[0]) - if err != nil { - return "", errors.Wrapf(err, "get object") - } - - lastGTID, err := getLastBackupGTID(ctx, sstInfoObj, xtrabackupInfoObj) - if err != nil { - return "", errors.Wrap(err, "get last backup gtid") - } - - return lastGTID, nil -} - const ( Latest RecoverType = "latest" // recover to the latest existing binlog Date RecoverType = "date" // recover to exact date @@ -403,77 +365,6 @@ func (r *Recoverer) recover(ctx context.Context) (err error) { return nil } -func getLastBackupGTID(ctx context.Context, sstInfo, xtrabackupInfo io.Reader) (string, error) { - sstContent, err := getDecompressedContent(ctx, sstInfo, "sst_info") - if err != nil { - return "", errors.Wrap(err, "get sst_info content") - } - - xtrabackupContent, err := getDecompressedContent(ctx, xtrabackupInfo, "xtrabackup_info") - if err != nil { - return "", errors.Wrap(err, "get xtrabackup info content") - } - - sstGTIDset, err := getGTIDFromSSTInfo(sstContent) - if err != nil { - return "", err - } - currGTID := strings.Split(sstGTIDset, ":")[0] - - set, err := getSetFromXtrabackupInfo(currGTID, xtrabackupContent) - if err != nil { - return "", err - } - - return currGTID + ":" + set, nil -} - -func getSetFromXtrabackupInfo(gtid string, xtrabackupInfo []byte) (string, error) { - gtids, err := getGTIDFromXtrabackup(xtrabackupInfo) - if err != nil { - return "", errors.Wrap(err, "get gtid from xtrabackup info") - } - for _, v := range strings.Split(gtids, ",") { - valueSplitted := strings.Split(v, ":") - if valueSplitted[0] == gtid { - return valueSplitted[1], nil - } - } - return "", errors.New("can't find current gtid in xtrabackup file") -} - -func getGTIDFromXtrabackup(content []byte) (string, error) { - sep := []byte("GTID of the last") - startIndex := bytes.Index(content, sep) - if startIndex == -1 { - return "", errors.New("no gtid data in backup") - } - newOut := content[startIndex+len(sep):] - e := bytes.Index(newOut, []byte("'\n")) - if e == -1 { - return "", errors.New("can't find gtid data in backup") - } - - se := bytes.Index(newOut, []byte("'")) - set := newOut[se+1 : e] - - return string(set), nil -} - -func getGTIDFromSSTInfo(content []byte) (string, error) { - sep := []byte("galera-gtid=") - startIndex := bytes.Index(content, sep) - if startIndex == -1 { - return "", errors.New("no gtid data in backup") - } - newOut := content[startIndex+len(sep):] - e := bytes.Index(newOut, []byte("\n")) - if e == -1 { - return "", errors.New("can't find gtid data in backup") - } - return string(newOut[:e]), nil -} - func getDecompressedContent(ctx context.Context, infoObj io.Reader, filename string) ([]byte, error) { tmpDir := os.TempDir() @@ -592,3 +483,155 @@ func reverse(list []string) { list[i], list[opp] = list[opp], list[i] } } + +func getStartGTIDSet(ctx context.Context, s storage.Storage) (string, error) { + sstInfoAvailable := true + sstInfo, err := s.ListObjects(ctx, ".sst_info/sst_info") + if err != nil { + return "", errors.Wrapf(err, "list sst_info objects objects") + } + if len(sstInfo) == 0 { + sstInfoAvailable = false + } + + var currGTID string + if sstInfoAvailable { + sort.Strings(sstInfo) + currGTID, err = getGTIDFromSSTInfo(ctx, sstInfo[0], s) + if err != nil { + return "", errors.Wrapf(err, "get gtid from sst_info") + } + } else { + currGTID, err = getGTIDFromXtrabackupBinlogInfo(ctx, s) + if err != nil { + return "", errors.Wrapf(err, "get gtid from xtrabackup_binlog_info") + } + } + + xbInfo, err := s.ListObjects(ctx, "xtrabackup_info") + if err != nil { + return "", errors.Wrapf(err, "list xtrabackup_info objects") + } + if len(xbInfo) == 0 { + return "", errors.New("no xtrabackup_info objects found") + } + sort.Strings(xbInfo) + xbInfoObj, err := s.GetObject(ctx, xbInfo[0]) + if err != nil { + return "", errors.Wrapf(err, "get xtrabackup_info object") + } + xbInfoContent, err := getDecompressedContent(ctx, xbInfoObj, "xtrabackup_info") + if err != nil { + return "", errors.Wrapf(err, "get decompressed content for xtrabackup_info") + } + + set, err := getSetFromXtrabackupInfo(currGTID, xbInfoContent) + if err != nil { + return "", errors.Wrapf(err, "get set from xtrabackup info") + } + return fmt.Sprintf("%s:%s", currGTID, set), nil +} + +func getGTIDFromSSTInfo( + ctx context.Context, + sstInfoFile string, + s storage.Storage) (string, error) { + sstInfoObj, err := s.GetObject(ctx, sstInfoFile) + if err != nil { + return "", errors.Wrapf(err, "get sst_info object") + } + sstContent, err := getDecompressedContent(ctx, sstInfoObj, "sst_info") + if err != nil { + return "", errors.Wrapf(err, "get decompressed content for sst_info") + } + + gtidSet, err := parseGTIDFromSSTInfoContent(sstContent) + if err != nil { + return "", errors.Wrapf(err, "parse gtid from sst_info content") + } + + return strings.Split(gtidSet, ":")[0], nil +} + +func parseGTIDFromSSTInfoContent(content []byte) (string, error) { + sep := []byte("galera-gtid=") + startIndex := bytes.Index(content, sep) + if startIndex == -1 { + return "", errors.New("no gtid data in backup") + } + newOut := content[startIndex+len(sep):] + e := bytes.Index(newOut, []byte("\n")) + if e == -1 { + return "", errors.New("can't find gtid data in backup") + } + return string(newOut[:e]), nil +} + +func getGTIDFromXtrabackupBinlogInfo(ctx context.Context, s storage.Storage) (string, error) { + xbBinlogInfo, err := s.ListObjects(ctx, "xtrabackup_binlog_info") + if err != nil { + return "", errors.Wrapf(err, "list xtrabackup_binlog_info objects") + } + if len(xbBinlogInfo) == 0 { + return "", errors.New("no xtrabackup_binlog_info objects found") + } + sort.Strings(xbBinlogInfo) + + xbBinlogInfoObj, err := s.GetObject(ctx, xbBinlogInfo[0]) + if err != nil { + return "", errors.Wrapf(err, "get xtrabackup_binlog_info object") + } + + xbBinlogInfoContent, err := getDecompressedContent(ctx, xbBinlogInfoObj, "xtrabackup_binlog_info") + if err != nil { + return "", errors.Wrapf(err, "get decompressed content for xtrabackup_binlog_info") + } + + gtidSet, err := parseGTIDFromXtrabackupBinlogInfoContent(xbBinlogInfoContent) + if err != nil { + return "", errors.Wrapf(err, "parse gtid from xtrabackup_binlog_info content") + } + + return strings.Split(gtidSet, ":")[0], nil +} + +func parseGTIDFromXtrabackupBinlogInfoContent(content []byte) (string, error) { + contentStr := string(content) + tokens := strings.Split(contentStr, "\t") + if len(tokens) != 3 { + return "", errors.New("incorrect number of tokens in xtrabackup_binlog_info content") + } + return tokens[2], nil +} + +func getSetFromXtrabackupInfo(gtid string, xtrabackupInfo []byte) (string, error) { + gtids, err := getGTIDFromXtrabackup(xtrabackupInfo) + if err != nil { + return "", errors.Wrap(err, "get gtid from xtrabackup info") + } + for _, v := range strings.Split(gtids, ",") { + valueSplitted := strings.Split(v, ":") + if valueSplitted[0] == gtid { + return valueSplitted[1], nil + } + } + return "", errors.New("can't find current gtid in xtrabackup file") +} + +func getGTIDFromXtrabackup(content []byte) (string, error) { + sep := []byte("GTID of the last") + startIndex := bytes.Index(content, sep) + if startIndex == -1 { + return "", errors.New("no gtid data in backup") + } + newOut := content[startIndex+len(sep):] + e := bytes.Index(newOut, []byte("'\n")) + if e == -1 { + return "", errors.New("can't find gtid data in backup") + } + + se := bytes.Index(newOut, []byte("'")) + set := newOut[se+1 : e] + + return string(set), nil +} From ef68b3a7e7a8534516d48933dd141356309575b0 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 13:10:55 +0530 Subject: [PATCH 34/77] refactor code Signed-off-by: Mayank Shah --- cmd/pitr/recoverer/recoverer.go | 79 ++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 35 deletions(-) diff --git a/cmd/pitr/recoverer/recoverer.go b/cmd/pitr/recoverer/recoverer.go index d988d2229f..39c037cfe4 100644 --- a/cmd/pitr/recoverer/recoverer.go +++ b/cmd/pitr/recoverer/recoverer.go @@ -365,7 +365,14 @@ func (r *Recoverer) recover(ctx context.Context) (err error) { return nil } +type testContextKey struct{} + func getDecompressedContent(ctx context.Context, infoObj io.Reader, filename string) ([]byte, error) { + // this is done to support unit tests + if val, ok := ctx.Value(testContextKey{}).(bool); ok && val { + return io.ReadAll(infoObj) + } + tmpDir := os.TempDir() cmd := exec.CommandContext(ctx, "xbstream", "-x", "--decompress") @@ -485,51 +492,62 @@ func reverse(list []string) { } func getStartGTIDSet(ctx context.Context, s storage.Storage) (string, error) { - sstInfoAvailable := true - sstInfo, err := s.ListObjects(ctx, ".sst_info/sst_info") + currGTID, err := getGTID(ctx, s) if err != nil { - return "", errors.Wrapf(err, "list sst_info objects objects") + return "", errors.Wrapf(err, "get gtid") } - if len(sstInfo) == 0 { - sstInfoAvailable = false + + xbInfoContent, err := getXtrabackupInfo(ctx, s) + if err != nil { + return "", errors.Wrapf(err, "get xtrabackup info") } - var currGTID string - if sstInfoAvailable { - sort.Strings(sstInfo) - currGTID, err = getGTIDFromSSTInfo(ctx, sstInfo[0], s) - if err != nil { - return "", errors.Wrapf(err, "get gtid from sst_info") - } - } else { - currGTID, err = getGTIDFromXtrabackupBinlogInfo(ctx, s) - if err != nil { - return "", errors.Wrapf(err, "get gtid from xtrabackup_binlog_info") - } + set, err := getSetFromXtrabackupInfo(currGTID, xbInfoContent) + if err != nil { + return "", errors.Wrapf(err, "get set from xtrabackup info") } + return fmt.Sprintf("%s:%s", currGTID, set), nil +} +func getXtrabackupInfo(ctx context.Context, s storage.Storage) ([]byte, error) { xbInfo, err := s.ListObjects(ctx, "xtrabackup_info") if err != nil { - return "", errors.Wrapf(err, "list xtrabackup_info objects") + return nil, errors.Wrapf(err, "list xtrabackup_info objects") } if len(xbInfo) == 0 { - return "", errors.New("no xtrabackup_info objects found") + return nil, errors.New("no xtrabackup_info objects found") } sort.Strings(xbInfo) xbInfoObj, err := s.GetObject(ctx, xbInfo[0]) if err != nil { - return "", errors.Wrapf(err, "get xtrabackup_info object") + return nil, errors.Wrapf(err, "get xtrabackup_info object") } xbInfoContent, err := getDecompressedContent(ctx, xbInfoObj, "xtrabackup_info") if err != nil { - return "", errors.Wrapf(err, "get decompressed content for xtrabackup_info") + return nil, errors.Wrapf(err, "get decompressed content for xtrabackup_info") } + return xbInfoContent, nil +} - set, err := getSetFromXtrabackupInfo(currGTID, xbInfoContent) +func getGTID(ctx context.Context, s storage.Storage) (string, error) { + sstInfo, err := s.ListObjects(ctx, ".sst_info/sst_info") if err != nil { - return "", errors.Wrapf(err, "get set from xtrabackup info") + return "", errors.Wrapf(err, "list sst_info objects objects") } - return fmt.Sprintf("%s:%s", currGTID, set), nil + if len(sstInfo) > 0 { + sort.Strings(sstInfo) + return getGTIDFromSSTInfo(ctx, sstInfo[0], s) + } + + xbBinlogInfo, err := s.ListObjects(ctx, "xtrabackup_binlog_info") + if err != nil { + return "", errors.Wrapf(err, "list xtrabackup_binlog_info objects") + } + if len(xbBinlogInfo) > 0 { + sort.Strings(xbBinlogInfo) + return getGTIDFromXtrabackupBinlogInfo(ctx, xbBinlogInfo[0], s) + } + return "", errors.New("no sst_info or xtrabackup_binlog_info objects found") } func getGTIDFromSSTInfo( @@ -567,17 +585,8 @@ func parseGTIDFromSSTInfoContent(content []byte) (string, error) { return string(newOut[:e]), nil } -func getGTIDFromXtrabackupBinlogInfo(ctx context.Context, s storage.Storage) (string, error) { - xbBinlogInfo, err := s.ListObjects(ctx, "xtrabackup_binlog_info") - if err != nil { - return "", errors.Wrapf(err, "list xtrabackup_binlog_info objects") - } - if len(xbBinlogInfo) == 0 { - return "", errors.New("no xtrabackup_binlog_info objects found") - } - sort.Strings(xbBinlogInfo) - - xbBinlogInfoObj, err := s.GetObject(ctx, xbBinlogInfo[0]) +func getGTIDFromXtrabackupBinlogInfo(ctx context.Context, xbBinlogInfoFile string, s storage.Storage) (string, error) { + xbBinlogInfoObj, err := s.GetObject(ctx, xbBinlogInfoFile) if err != nil { return "", errors.Wrapf(err, "get xtrabackup_binlog_info object") } From 2340e30dbde422f72cd709092c0fedeba3e115ec Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 13:11:12 +0530 Subject: [PATCH 35/77] add unit test for gtid Signed-off-by: Mayank Shah --- cmd/pitr/recoverer/recoverer_test.go | 74 +++++++++++++ pkg/pxc/backup/storage/gen.go | 3 + pkg/pxc/backup/storage/mock/storage.go | 148 +++++++++++++++++++++++++ 3 files changed, 225 insertions(+) create mode 100644 pkg/pxc/backup/storage/gen.go create mode 100644 pkg/pxc/backup/storage/mock/storage.go diff --git a/cmd/pitr/recoverer/recoverer_test.go b/cmd/pitr/recoverer/recoverer_test.go index 26460a7f87..72ea5f4efe 100644 --- a/cmd/pitr/recoverer/recoverer_test.go +++ b/cmd/pitr/recoverer/recoverer_test.go @@ -1,7 +1,13 @@ package recoverer import ( + "bytes" + "context" + "io" "testing" + + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage/mock" + "github.com/stretchr/testify/assert" ) func TestGetBucketAndPrefix(t *testing.T) { @@ -103,3 +109,71 @@ func TestGetExtendGTIDSet(t *testing.T) { }) } } + +func newStringReader(s string) io.Reader { + return io.NopCloser(bytes.NewReader([]byte(s))) +} + +func TestGetStartGTID(t *testing.T) { + ctx := context.WithValue(context.Background(), testContextKey{}, true) + testCases := []struct { + desc string + mockFn func(*mock.Storage) + expected string + wantErr bool + }{ + { + desc: "using sst_info", + mockFn: func(s *mock.Storage) { + s.On("ListObjects", ctx, ".sst_info/sst_info").Return([]string{".sst_info/sst_info"}, nil) + s.On("GetObject", ctx, ".sst_info/sst_info").Return(newStringReader("[sst]\ngalera-gtid=abc-xyz:1-10\n"), nil) + s.On("ListObjects", ctx, "xtrabackup_info").Return([]string{"xtrabackup_info.00000000000000000000"}, nil) + s.On("GetObject", ctx, "xtrabackup_info.00000000000000000000").Return(newStringReader("binlog_pos = filename 'binlog.000111', position '237', GTID of the last change 'abc-xyz:1-10'\n"), nil) + }, + expected: "abc-xyz:1-10", + }, + { + desc: "using xtrabackup_binlog_info", + mockFn: func(s *mock.Storage) { + s.On("ListObjects", ctx, ".sst_info/sst_info").Return([]string{}, nil) + s.On("ListObjects", ctx, "xtrabackup_binlog_info").Return([]string{"xtrabackup_binlog_info.00000000000000000000"}, nil) + s.On("GetObject", ctx, "xtrabackup_binlog_info.00000000000000000000").Return(newStringReader("binlog.0001\t197\tabc-xyz:1-10\n"), nil) + s.On("ListObjects", ctx, "xtrabackup_info").Return([]string{"xtrabackup_info.00000000000000000000"}, nil) + s.On("GetObject", ctx, "xtrabackup_info.00000000000000000000").Return(newStringReader("binlog_pos = filename 'binlog.000111', position '237', GTID of the last change 'abc-xyz:1-10'\n"), nil) + }, + expected: "abc-xyz:1-10", + }, + { + desc: "no sst_info or xtrabackup_binlog_info objects found", + mockFn: func(s *mock.Storage) { + s.On("ListObjects", ctx, ".sst_info/sst_info").Return([]string{}, nil) + s.On("ListObjects", ctx, "xtrabackup_binlog_info").Return([]string{}, nil) + }, + expected: "", + wantErr: true, + }, + { + desc: "no gtid in xtrabackup_binlog_info", + mockFn: func(s *mock.Storage) { + s.On("ListObjects", ctx, ".sst_info/sst_info").Return([]string{}, nil) + s.On("ListObjects", ctx, "xtrabackup_binlog_info").Return([]string{"xtrabackup_binlog_info.00000000000000000000"}, nil) + s.On("GetObject", ctx, "xtrabackup_binlog_info.00000000000000000000").Return(newStringReader("binlog.0001\t197\n"), nil) + }, + expected: "", + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + mockStorage := mock.NewStorage(t) + tc.mockFn(mockStorage) + + got, err := getStartGTIDSet(ctx, mockStorage) + if err != nil && !tc.wantErr { + t.Errorf("getStartGTIDSet() error = %v, wantErr %v", err, tc.wantErr) + } + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/pkg/pxc/backup/storage/gen.go b/pkg/pxc/backup/storage/gen.go new file mode 100644 index 0000000000..48d377775c --- /dev/null +++ b/pkg/pxc/backup/storage/gen.go @@ -0,0 +1,3 @@ +package storage + +//go:generate go tool mockery --name=Storage --case=snake --output=./mock --outpkg=mock diff --git a/pkg/pxc/backup/storage/mock/storage.go b/pkg/pxc/backup/storage/mock/storage.go new file mode 100644 index 0000000000..bd2a48cab3 --- /dev/null +++ b/pkg/pxc/backup/storage/mock/storage.go @@ -0,0 +1,148 @@ +// Code generated by mockery v2.53.5. DO NOT EDIT. + +package mock + +import ( + context "context" + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// Storage is an autogenerated mock type for the Storage type +type Storage struct { + mock.Mock +} + +// DeleteObject provides a mock function with given fields: ctx, objectName +func (_m *Storage) DeleteObject(ctx context.Context, objectName string) error { + ret := _m.Called(ctx, objectName) + + if len(ret) == 0 { + panic("no return value specified for DeleteObject") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, objectName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetObject provides a mock function with given fields: ctx, objectName +func (_m *Storage) GetObject(ctx context.Context, objectName string) (io.ReadCloser, error) { + ret := _m.Called(ctx, objectName) + + if len(ret) == 0 { + panic("no return value specified for GetObject") + } + + var r0 io.ReadCloser + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (io.ReadCloser, error)); ok { + return rf(ctx, objectName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) io.ReadCloser); ok { + r0 = rf(ctx, objectName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, objectName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPrefix provides a mock function with no fields +func (_m *Storage) GetPrefix() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetPrefix") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// ListObjects provides a mock function with given fields: ctx, prefix +func (_m *Storage) ListObjects(ctx context.Context, prefix string) ([]string, error) { + ret := _m.Called(ctx, prefix) + + if len(ret) == 0 { + panic("no return value specified for ListObjects") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok { + return rf(ctx, prefix) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []string); ok { + r0 = rf(ctx, prefix) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, prefix) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PutObject provides a mock function with given fields: ctx, name, data, size +func (_m *Storage) PutObject(ctx context.Context, name string, data io.Reader, size int64) error { + ret := _m.Called(ctx, name, data, size) + + if len(ret) == 0 { + panic("no return value specified for PutObject") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, io.Reader, int64) error); ok { + r0 = rf(ctx, name, data, size) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetPrefix provides a mock function with given fields: prefix +func (_m *Storage) SetPrefix(prefix string) { + _m.Called(prefix) +} + +// NewStorage creates a new instance of Storage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStorage(t interface { + mock.TestingT + Cleanup(func()) +}) *Storage { + mock := &Storage{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} From b3c3482bd66c8fe38fdcb2672603a870eceb1da9 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 13:11:18 +0530 Subject: [PATCH 36/77] update go.mod Signed-off-by: Mayank Shah --- go.mod | 22 ++++++++++++++++++++++ go.sum | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/go.mod b/go.mod index 0d44523194..dbaf8bbf75 100644 --- a/go.mod +++ b/go.mod @@ -51,6 +51,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chigopher/pathlib v0.19.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect @@ -82,26 +83,45 @@ require ( github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jinzhu/copier v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.11 // indirect github.com/klauspost/crc32 v1.3.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/crc64nvme v1.1.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/philhofer/fwd v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/rs/xid v1.6.0 // indirect + github.com/rs/zerolog v1.33.0 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.10.1 // indirect github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.20.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/tinylib/msgp v1.3.0 // indirect + github.com/vektra/mockery/v2 v2.53.5 // indirect github.com/x448/float16 v0.8.4 // indirect go.mongodb.org/mongo-driver v1.17.6 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect @@ -168,3 +188,5 @@ exclude ( go.mongodb.org/mongo-driver v1.5.0-beta1 go.mongodb.org/mongo-driver v1.5.0 ) + +tool github.com/vektra/mockery/v2 diff --git a/go.sum b/go.sum index 52f5053227..b1b55be455 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,10 @@ github.com/cert-manager/cert-manager v1.19.1 h1:Txh8L/nLWTDcb7ZnXuXbTe15BxQnLbLi github.com/cert-manager/cert-manager v1.19.1/go.mod h1:8Ps1VXCQRGKT8zNvLQlhDK1gFKWmYKdIPQFmvTS2JeA= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGznx5A= +github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -42,6 +46,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/flosch/pongo2/v6 v6.0.0 h1:lsGru8IAzHgIAw6H2m4PCyleO58I40ow6apih0WprMU= github.com/flosch/pongo2/v6 v6.0.0/go.mod h1:CuDpFm47R0uGGE7z13/tTlt1Y6zdxvr2RLT5LJhsHEU= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= @@ -117,6 +123,7 @@ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9L github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= @@ -140,6 +147,14 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= +github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -161,6 +176,13 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q= @@ -169,6 +191,10 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ= github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -187,6 +213,8 @@ github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -208,16 +236,35 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= +github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -228,6 +275,8 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/vektra/mockery/v2 v2.53.5 h1:iktAY68pNiMvLoHxKqlSNSv/1py0QF/17UGrrAMYDI8= +github.com/vektra/mockery/v2 v2.53.5/go.mod h1:hIFFb3CvzPdDJJiU7J4zLRblUMv7OuezWsHPmswriwo= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -281,6 +330,9 @@ golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= From 4007afd2e4414964b824eda2b97a128da4fe560b Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 16:34:43 +0530 Subject: [PATCH 37/77] fix recoverer Signed-off-by: Mayank Shah --- cmd/pitr/recoverer/recoverer.go | 1 - cmd/pitr/recoverer/recoverer_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/pitr/recoverer/recoverer.go b/cmd/pitr/recoverer/recoverer.go index 39c037cfe4..fb20e0dbea 100644 --- a/cmd/pitr/recoverer/recoverer.go +++ b/cmd/pitr/recoverer/recoverer.go @@ -78,7 +78,6 @@ func (c Config) storages(ctx context.Context) (storage.Storage, storage.Storage, if err != nil { return nil, nil, errors.Wrap(err, "get bucket and prefix") } - prefix = prefix[:len(prefix)-1] defaultStorage, err = storage.NewS3(ctx, c.BackupStorageS3.Endpoint, c.BackupStorageS3.AccessKeyID, c.BackupStorageS3.AccessKey, bucket, prefix, c.BackupStorageS3.Region, c.VerifyTLS, caBundle) if err != nil { return nil, nil, errors.Wrap(err, "new storage manager") diff --git a/cmd/pitr/recoverer/recoverer_test.go b/cmd/pitr/recoverer/recoverer_test.go index 72ea5f4efe..a16c07bbdc 100644 --- a/cmd/pitr/recoverer/recoverer_test.go +++ b/cmd/pitr/recoverer/recoverer_test.go @@ -170,7 +170,7 @@ func TestGetStartGTID(t *testing.T) { tc.mockFn(mockStorage) got, err := getStartGTIDSet(ctx, mockStorage) - if err != nil && !tc.wantErr { + if (err != nil) != tc.wantErr { t.Errorf("getStartGTIDSet() error = %v, wantErr %v", err, tc.wantErr) } assert.Equal(t, tc.expected, got) From b23d3709c25e7b98ab8e3e916772c2c9510d9a4f Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 16:40:29 +0530 Subject: [PATCH 38/77] fix CI Signed-off-by: Mayank Shah --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8c13d773fd..969f6d5859 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ all: build help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -generate: controller-gen ## Generate CRDs and RBAC files +generate: controller-gen protoc ## Generate CRDs and RBAC files go generate ./... $(CONTROLLER_GEN) crd:maxDescLen=0,allowDangerousTypes=true,generateEmbeddedObjectMeta=true rbac:roleName=$(NAME) webhook paths="./..." output:crd:artifacts:config=config/crd/bases ## Generate WebhookConfiguration, Role and CustomResourceDefinition objects. $(CONTROLLER_GEN) object paths="./..." ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. From bb875cc9b53fa12cf07b9b7a9423c8d7271703c4 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 16:43:30 +0530 Subject: [PATCH 39/77] remove --galera-info Signed-off-by: Mayank Shah --- pkg/xtrabackup/api/cmd.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/xtrabackup/api/cmd.go b/pkg/xtrabackup/api/cmd.go index 32e0cf522d..9bf836f4bd 100644 --- a/pkg/xtrabackup/api/cmd.go +++ b/pkg/xtrabackup/api/cmd.go @@ -102,7 +102,6 @@ func (cfg *BackupConfig) xtrabackupArgs(user, pass string) []string { "--stream=xbstream", "--safe-slave-backup", "--slave-info", - "--galera-info", "--target-dir=/backup/", "--socket=/tmp/mysql.sock", fmt.Sprintf("--user=%s", user), From dae2331e117d2bc43694a4a3b44426a7abe74d28 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 16:47:39 +0530 Subject: [PATCH 40/77] fix gen.go Signed-off-by: Mayank Shah --- pkg/xtrabackup/api/gen.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/xtrabackup/api/gen.go b/pkg/xtrabackup/api/gen.go index 7a6cf9b254..a002ab762d 100644 --- a/pkg/xtrabackup/api/gen.go +++ b/pkg/xtrabackup/api/gen.go @@ -1,3 +1,4 @@ package api -//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative app.proto +// Run `make protoc` to install protoc and protoc-gen-go +//go:generate ../../../bin/protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative app.proto From 0b1aadf1d624c9c780bad036374ae23ce9395460 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 16:57:53 +0530 Subject: [PATCH 41/77] fix make protoc Signed-off-by: Mayank Shah --- Makefile | 3 ++- pkg/xtrabackup/api/gen.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 969f6d5859..0815c498ff 100644 --- a/Makefile +++ b/Makefile @@ -110,6 +110,7 @@ swagger: ## Download swagger locally if necessary. PROTOC_VERSION = 33.1 PROTOC = $(shell pwd)/bin/protoc +PROTOC_GEN_GO = $(shell pwd)/bin/protoc-gen-go protoc: ## Download protoc locally if necessary. os='linux'; \ arch='x86_64'; \ @@ -124,7 +125,7 @@ protoc: ## Download protoc locally if necessary. rm protoc-${PROTOC_VERSION}-$${os}-$${arch}.zip; \ mv -f protoc-${PROTOC_VERSION}-$${os}-$${arch}/bin/protoc $(PROTOC); \ rm -rf protoc-${PROTOC_VERSION}-$${os}-$${arch}; \ - $(call go install google.golang.org/protobuf/cmd/protoc-gen-go@latest) + $(call go-get-tool,$(PROTOC_GEN_GO),google.golang.org/protobuf/cmd/protoc-gen-go@latest) # Prepare release include e2e-tests/release_versions diff --git a/pkg/xtrabackup/api/gen.go b/pkg/xtrabackup/api/gen.go index a002ab762d..ffd71bd078 100644 --- a/pkg/xtrabackup/api/gen.go +++ b/pkg/xtrabackup/api/gen.go @@ -1,4 +1,4 @@ package api // Run `make protoc` to install protoc and protoc-gen-go -//go:generate ../../../bin/protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative app.proto +//go:generate ../../../bin/protoc --plugin ../../../bin/protoc-gen-go --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative app.proto From c64cf71d1a8eb996c71aaa34ee0ddea1d5de6f65 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 17:36:29 +0530 Subject: [PATCH 42/77] fix make protoc Signed-off-by: Mayank Shah --- Makefile | 4 +++- pkg/xtrabackup/api/app_grpc.pb.go | 12 ++++++------ pkg/xtrabackup/api/gen.go | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 0815c498ff..76f1a43574 100644 --- a/Makefile +++ b/Makefile @@ -111,6 +111,7 @@ swagger: ## Download swagger locally if necessary. PROTOC_VERSION = 33.1 PROTOC = $(shell pwd)/bin/protoc PROTOC_GEN_GO = $(shell pwd)/bin/protoc-gen-go +PROTOC_GEN_GO_GRPC = $(shell pwd)/bin/protoc-gen-go-grpc protoc: ## Download protoc locally if necessary. os='linux'; \ arch='x86_64'; \ @@ -125,7 +126,8 @@ protoc: ## Download protoc locally if necessary. rm protoc-${PROTOC_VERSION}-$${os}-$${arch}.zip; \ mv -f protoc-${PROTOC_VERSION}-$${os}-$${arch}/bin/protoc $(PROTOC); \ rm -rf protoc-${PROTOC_VERSION}-$${os}-$${arch}; \ - $(call go-get-tool,$(PROTOC_GEN_GO),google.golang.org/protobuf/cmd/protoc-gen-go@latest) + $(call go-get-tool,$(PROTOC_GEN_GO),google.golang.org/protobuf/cmd/protoc-gen-go@latest); \ + $(call go-get-tool,$(PROTOC_GEN_GO_GRPC),google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest) # Prepare release include e2e-tests/release_versions diff --git a/pkg/xtrabackup/api/app_grpc.pb.go b/pkg/xtrabackup/api/app_grpc.pb.go index 8f17b9917d..d4ea0a5703 100644 --- a/pkg/xtrabackup/api/app_grpc.pb.go +++ b/pkg/xtrabackup/api/app_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 +// - protoc-gen-go-grpc v1.6.0 // - protoc v6.33.1 // source: app.proto @@ -120,16 +120,16 @@ type XtrabackupServiceServer interface { type UnimplementedXtrabackupServiceServer struct{} func (UnimplementedXtrabackupServiceServer) GetCurrentBackupConfig(context.Context, *GetCurrentBackupConfigRequest) (*BackupConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") + return nil, status.Error(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") } func (UnimplementedXtrabackupServiceServer) CreateBackup(*CreateBackupRequest, grpc.ServerStreamingServer[CreateBackupResponse]) error { - return status.Errorf(codes.Unimplemented, "method CreateBackup not implemented") + return status.Error(codes.Unimplemented, "method CreateBackup not implemented") } func (UnimplementedXtrabackupServiceServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*DeleteBackupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") + return nil, status.Error(codes.Unimplemented, "method DeleteBackup not implemented") } func (UnimplementedXtrabackupServiceServer) GetLogs(*GetLogsRequest, grpc.ServerStreamingServer[LogChunk]) error { - return status.Errorf(codes.Unimplemented, "method GetLogs not implemented") + return status.Error(codes.Unimplemented, "method GetLogs not implemented") } func (UnimplementedXtrabackupServiceServer) mustEmbedUnimplementedXtrabackupServiceServer() {} func (UnimplementedXtrabackupServiceServer) testEmbeddedByValue() {} @@ -142,7 +142,7 @@ type UnsafeXtrabackupServiceServer interface { } func RegisterXtrabackupServiceServer(s grpc.ServiceRegistrar, srv XtrabackupServiceServer) { - // If the following call pancis, it indicates UnimplementedXtrabackupServiceServer was + // If the following call panics, it indicates UnimplementedXtrabackupServiceServer was // embedded by pointer and is nil. This will cause panics if an // unimplemented method is ever invoked, so we test this at initialization // time to prevent it from happening at runtime later due to I/O. diff --git a/pkg/xtrabackup/api/gen.go b/pkg/xtrabackup/api/gen.go index ffd71bd078..c077c9356c 100644 --- a/pkg/xtrabackup/api/gen.go +++ b/pkg/xtrabackup/api/gen.go @@ -1,4 +1,4 @@ package api // Run `make protoc` to install protoc and protoc-gen-go -//go:generate ../../../bin/protoc --plugin ../../../bin/protoc-gen-go --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative app.proto +//go:generate ../../../bin/protoc --plugin=protoc-gen-go=../../../bin/protoc-gen-go --plugin=protoc-gen-go-grpc=../../../bin/protoc-gen-go-grpc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative app.proto From 0b103d9240aaa77b847cf01af6ad95fcc5b72939 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 21:00:56 +0530 Subject: [PATCH 43/77] add pitr-xtrabackup to run-pr Signed-off-by: Mayank Shah --- e2e-tests/pitr-xtrabackup/run | 6 ++++++ e2e-tests/run-pr.csv | 1 + 2 files changed, 7 insertions(+) create mode 100755 e2e-tests/pitr-xtrabackup/run diff --git a/e2e-tests/pitr-xtrabackup/run b/e2e-tests/pitr-xtrabackup/run new file mode 100755 index 0000000000..d12b58d06f --- /dev/null +++ b/e2e-tests/pitr-xtrabackup/run @@ -0,0 +1,6 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../pitr/run \ No newline at end of file diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 59d3d08a83..0c3781bc05 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -19,6 +19,7 @@ monitoring-pmm3,8.0 one-pod,5.7 one-pod,8.0 pitr,8.0 +pitr-xtrabackup,8.0 pitr-gap-errors,8.0 proxy-protocol,8.0 proxy-switch,8.0 From 472935a96275e16731bd74f8c17b374e418e037d Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 21:06:52 +0530 Subject: [PATCH 44/77] update cw-operator.yaml Signed-off-by: Mayank Shah --- deploy/cw-operator.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy/cw-operator.yaml b/deploy/cw-operator.yaml index 79ae95a2fc..37748366e8 100644 --- a/deploy/cw-operator.yaml +++ b/deploy/cw-operator.yaml @@ -46,6 +46,8 @@ spec: value: "10" - name: MAX_CONCURRENT_RECONCILES value: "1" + - name: PXCO_FEATURE_GATES + value: "" image: perconalab/percona-xtradb-cluster-operator:main imagePullPolicy: Always resources: From 06395ea0d0298cf63d33140e0db1977ba2b51ee4 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Tue, 2 Dec 2025 21:16:37 +0530 Subject: [PATCH 45/77] Update e2e-tests/pitr-xtrabackup/run Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- e2e-tests/pitr-xtrabackup/run | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e-tests/pitr-xtrabackup/run b/e2e-tests/pitr-xtrabackup/run index d12b58d06f..c3cf46c736 100755 --- a/e2e-tests/pitr-xtrabackup/run +++ b/e2e-tests/pitr-xtrabackup/run @@ -3,4 +3,4 @@ set -o errexit test_dir=$(realpath $(dirname $0)) -PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../pitr/run \ No newline at end of file +PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../pitr/run From b0836b984c0162fa6929f95a3c30bbd1f3ecab36 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 3 Dec 2025 14:47:40 +0530 Subject: [PATCH 46/77] support encryption Signed-off-by: Mayank Shah --- build/backup/recovery-cloud.sh | 33 +++++++++++++++++++------------ pkg/pxc/app/statefulset/node.go | 13 +++++++++++- pkg/pxc/backup/job.go | 4 ++-- pkg/pxc/backup/restore.go | 12 +++++------ pkg/pxc/backup/restore_test.go | 5 +++-- pkg/xtrabackup/api/cmd.go | 16 ++++++++++++--- pkg/xtrabackup/api/cmd_test.go | 2 +- pkg/xtrabackup/server/app.go | 35 +++++++++++++++++++++++++-------- pkg/xtrabackup/server/create.go | 2 +- 9 files changed, 85 insertions(+), 37 deletions(-) diff --git a/build/backup/recovery-cloud.sh b/build/backup/recovery-cloud.sh index 47a7d852a4..0889ece174 100644 --- a/build/backup/recovery-cloud.sh +++ b/build/backup/recovery-cloud.sh @@ -55,19 +55,25 @@ fi xbcloud get --parallel="$(grep -c processor /proc/cpuinfo)" ${XBCLOUD_ARGS} "$(destination)" | xbstream -x -C "${tmp}" --parallel="$(grep -c processor /proc/cpuinfo)" $XBSTREAM_EXTRA_ARGS set +o xtrace -transition_key=$(vault_get "$tmp/sst_info") -if [[ -n $transition_key && $transition_key != null ]]; then - MYSQL_VERSION=$(parse_ini 'mysql-version' "$tmp/sst_info") - if ! check_for_version "$MYSQL_VERSION" '5.7.29' \ - && [[ $MYSQL_VERSION != '5.7.28-31-57.2' ]]; then - - # shellcheck disable=SC2016 - transition_key='$transition_key' + +if [[ -f "${tmp}/sst_info" ]]; then + transition_key=$(vault_get "$tmp/sst_info") + if [[ -n $transition_key && $transition_key != null ]]; then + MYSQL_VERSION=$(parse_ini 'mysql-version' "$tmp/sst_info") + if ! check_for_version "$MYSQL_VERSION" '5.7.29' \ + && [[ $MYSQL_VERSION != '5.7.28-31-57.2' ]]; then + + # shellcheck disable=SC2016 + transition_key='$transition_key' + fi + + transition_option="--transition-key=$transition_key" + echo transition-key exists fi +fi - transition_option="--transition-key=$transition_key" +if [ -f "${keyring_vault}" ]; then master_key_options="--generate-new-master-key" - echo transition-key exists fi # Extract --defaults-file from XB_EXTRA_ARGS if present and place it as the first argument @@ -95,21 +101,22 @@ if ! check_for_version "$XTRABACKUP_VERSION" '8.0.0'; then fi echo "+ xtrabackup $DEFAULTS_FILE ${XB_USE_MEMORY+--use-memory=$XB_USE_MEMORY} --prepare $REMAINING_XB_ARGS --binlog-info=ON --rollback-prepared-trx \ +--keyring-vault-config=$keyring_vault \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=$tmp" # shellcheck disable=SC2086 xtrabackup $DEFAULTS_FILE ${XB_USE_MEMORY+--use-memory=$XB_USE_MEMORY} --prepare $REMAINING_XB_ARGS $transition_option --rollback-prepared-trx \ + --keyring-vault-config=$keyring_vault \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" echo "+ xtrabackup $DEFAULTS_FILE --defaults-group=mysqld --datadir=/datadir --move-back $REMAINING_XB_ARGS --binlog-info=ON \ --force-non-empty-directories $master_key_options \ ---keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf --early-plugin-load=keyring_vault.so \ +--keyring-vault-config=$keyring_vault --early-plugin-load=keyring_vault.so \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=$tmp" # shellcheck disable=SC2086 xtrabackup $DEFAULTS_FILE --defaults-group=mysqld --datadir=/datadir --move-back $REMAINING_XB_ARGS \ --force-non-empty-directories $transition_option $master_key_options \ - --keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf --early-plugin-load=keyring_vault.so \ - --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" + --keyring-vault-config=$keyring_vault --early-plugin-load=keyring_vault.so "--target-dir=$tmp" rm -rf "$tmp" diff --git a/pkg/pxc/app/statefulset/node.go b/pkg/pxc/app/statefulset/node.go index 491a7bf842..daac79bb14 100644 --- a/pkg/pxc/app/statefulset/node.go +++ b/pkg/pxc/app/statefulset/node.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "hash/fnv" + "path/filepath" "regexp" "strings" @@ -26,6 +27,8 @@ import ( const ( VaultSecretVolumeName = "vault-keyring-secret" + VaultSecretMountPath = "/etc/mysql/vault-keyring-secret" + VaultKeyringConfig = "keyring_vault.conf" ) type Node struct { @@ -146,7 +149,7 @@ func (c *Node) AppContainer(ctx context.Context, cl client.Client, spec *api.Pod }, { Name: VaultSecretVolumeName, - MountPath: "/etc/mysql/vault-keyring-secret", + MountPath: VaultSecretMountPath, }, }, Env: []corev1.EnvVar{ @@ -454,6 +457,10 @@ func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBClu SecretKeyRef: app.SecretKeySelector(cr.Spec.SecretsName, users.Xtrabackup), }, }, + { + Name: "VAULT_KEYRING_PATH", + Value: filepath.Join(VaultSecretMountPath, VaultKeyringConfig), + }, }, Command: []string{"/var/lib/mysql/xtrabackup-server-sidecar"}, Ports: []corev1.ContainerPort{ @@ -475,6 +482,10 @@ func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBClu Name: "tmp", MountPath: "/tmp", }, + { + Name: "vault-keyring-secret", + MountPath: VaultSecretMountPath, + }, }, // TODO: make this configurable from CR Resources: corev1.ResourceRequirements{ diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index bff09904f0..5ee88628cb 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -193,8 +193,8 @@ func appendStorageSecret(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBacku MountPath: "/etc/mysql/ssl-internal", }, corev1.VolumeMount{ - Name: "vault-keyring-secret", - MountPath: "/etc/mysql/vault-keyring-secret", + Name: statefulset.VaultSecretVolumeName, + MountPath: statefulset.VaultSecretMountPath, }, ) job.Template.Spec.Volumes = append( diff --git a/pkg/pxc/backup/restore.go b/pkg/pxc/backup/restore.go index c574a7d484..f0f67c42db 100644 --- a/pkg/pxc/backup/restore.go +++ b/pkg/pxc/backup/restore.go @@ -115,8 +115,8 @@ func PVCRestorePod(cr *api.PerconaXtraDBClusterRestore, bcpStorageName, pvcName MountPath: "/etc/mysql/ssl-internal", }, { - Name: "vault-keyring-secret", - MountPath: "/etc/mysql/vault-keyring-secret", + Name: statefulset.VaultSecretVolumeName, + MountPath: statefulset.VaultSecretMountPath, }, } @@ -231,8 +231,8 @@ func RestoreJob(cr *api.PerconaXtraDBClusterRestore, bcp *api.PerconaXtraDBClust MountPath: "/datadir", }, { - Name: "vault-keyring-secret", - MountPath: "/etc/mysql/vault-keyring-secret", + Name: statefulset.VaultSecretVolumeName, + MountPath: statefulset.VaultSecretMountPath, }, } volumes := []corev1.Volume{ @@ -765,8 +765,8 @@ func PrepareJob( MountPath: "/etc/mysql/mysql-users-secret", }, { - Name: "vault-keyring-secret", - MountPath: "/etc/mysql/vault-keyring-secret", + Name: statefulset.VaultSecretVolumeName, + MountPath: statefulset.VaultSecretMountPath, }, { Name: "ssl", diff --git a/pkg/pxc/backup/restore_test.go b/pkg/pxc/backup/restore_test.go index 20dfd2da73..37d7ba0b4e 100644 --- a/pkg/pxc/backup/restore_test.go +++ b/pkg/pxc/backup/restore_test.go @@ -11,6 +11,7 @@ import ( "k8s.io/utils/ptr" pxcv1 "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" "github.com/percona/percona-xtradb-cluster-operator/pkg/test" "github.com/percona/percona-xtradb-cluster-operator/pkg/version" ) @@ -171,8 +172,8 @@ func TestPrepareJob(t *testing.T) { MountPath: "/etc/mysql/mysql-users-secret", }, { - Name: "vault-keyring-secret", - MountPath: "/etc/mysql/vault-keyring-secret", + Name: statefulset.VaultSecretVolumeName, + MountPath: statefulset.VaultSecretMountPath, }, { Name: "ssl", diff --git a/pkg/xtrabackup/api/cmd.go b/pkg/xtrabackup/api/cmd.go index 9bf836f4bd..e0baaf5f92 100644 --- a/pkg/xtrabackup/api/cmd.go +++ b/pkg/xtrabackup/api/cmd.go @@ -21,8 +21,12 @@ const ( ) // NewXtrabackupCmd creates a new xtrabackup command -func (cfg *BackupConfig) NewXtrabackupCmd(ctx context.Context, user, password string) *exec.Cmd { - cmd := exec.CommandContext(ctx, xtrabackupCmd, cfg.xtrabackupArgs(user, password)...) +func (cfg *BackupConfig) NewXtrabackupCmd( + ctx context.Context, + user, + password string, + withTablespaceEncryption bool) *exec.Cmd { + cmd := exec.CommandContext(ctx, xtrabackupCmd, cfg.xtrabackupArgs(user, password, withTablespaceEncryption)...) cmd.Env = cfg.envs() return cmd } @@ -96,7 +100,7 @@ func (cfg *BackupConfig) xbcloudArgs(action XBCloudAction) []string { return args } -func (cfg *BackupConfig) xtrabackupArgs(user, pass string) []string { +func (cfg *BackupConfig) xtrabackupArgs(user, pass string, withTablespaceEncryption bool) []string { args := []string{ "--backup", "--stream=xbstream", @@ -107,6 +111,12 @@ func (cfg *BackupConfig) xtrabackupArgs(user, pass string) []string { fmt.Sprintf("--user=%s", user), fmt.Sprintf("--password=%s", pass), } + if withTablespaceEncryption { + args = append(args, + "--generate-transition-key", + "--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf", + ) + } if cfg != nil && cfg.ContainerOptions != nil && cfg.ContainerOptions.Args != nil { args = append(args, cfg.ContainerOptions.Args.Xtrabackup...) } diff --git a/pkg/xtrabackup/api/cmd_test.go b/pkg/xtrabackup/api/cmd_test.go index 9db3f1cda4..0176d536a3 100644 --- a/pkg/xtrabackup/api/cmd_test.go +++ b/pkg/xtrabackup/api/cmd_test.go @@ -47,7 +47,7 @@ func TestNewXtrabackupCmd(t *testing.T) { for i, tc := range testCases { t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) { - cmd := tc.backupConfig.NewXtrabackupCmd(context.Background(), "root", "password123") + cmd := tc.backupConfig.NewXtrabackupCmd(context.Background(), "root", "password123", false) assert.Equal(t, tc.expectedArgs, cmd.Args) }) } diff --git a/pkg/xtrabackup/server/app.go b/pkg/xtrabackup/server/app.go index 93823fd7b0..9b7d4c7adc 100644 --- a/pkg/xtrabackup/server/app.go +++ b/pkg/xtrabackup/server/app.go @@ -6,8 +6,11 @@ import ( "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" + "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + log "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) // DefaultPort is the default port for the app server. @@ -16,10 +19,11 @@ const DefaultPort = 6450 type appServer struct { api.UnimplementedXtrabackupServiceServer - backupStatus backupStatus - namespace string - newStorageFunc storage.NewClientFunc - deleteBackupFunc func(ctx context.Context, cfg *api.BackupConfig, backupName string) error + backupStatus backupStatus + namespace string + newStorageFunc storage.NewClientFunc + deleteBackupFunc func(ctx context.Context, cfg *api.BackupConfig, backupName string) error + tableSpaceEncryptionEnabled bool } var _ api.XtrabackupServiceServer = (*appServer)(nil) @@ -30,14 +34,25 @@ func New() (api.XtrabackupServiceServer, error) { if !ok || namespace == "" { return nil, status.Errorf(codes.InvalidArgument, "POD_NAMESPACE environment variable is not set") } + tableSpaceEncryptionEnabled := vaultKeyringFileExists() return &appServer{ - namespace: namespace, - backupStatus: backupStatus{}, - newStorageFunc: storage.NewClient, - deleteBackupFunc: deleteBackup, + namespace: namespace, + backupStatus: backupStatus{}, + newStorageFunc: storage.NewClient, + deleteBackupFunc: deleteBackup, + tableSpaceEncryptionEnabled: tableSpaceEncryptionEnabled, }, nil } +func vaultKeyringFileExists() bool { + vaultKeyringPath := os.Getenv("VAULT_KEYRING_PATH") + _, err := os.Stat(vaultKeyringPath) + if err != nil && !os.IsNotExist(err) { + panic(errors.Wrap(err, "failed to stat vault keyring file")) + } + return err == nil +} + func (s *appServer) GetCurrentBackupConfig(ctx context.Context, req *api.GetCurrentBackupConfigRequest) (*api.BackupConfig, error) { // TODO return nil, status.Errorf(codes.Unimplemented, "method GetCurrentBackupConfig not implemented") @@ -47,3 +62,7 @@ func (s *appServer) DeleteBackup(ctx context.Context, req *api.DeleteBackupReque // TODO return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented") } + +func init() { + log.SetLogger(zap.New()) +} diff --git a/pkg/xtrabackup/server/create.go b/pkg/xtrabackup/server/create.go index 5c1656c4fa..013add2d11 100644 --- a/pkg/xtrabackup/server/create.go +++ b/pkg/xtrabackup/server/create.go @@ -59,7 +59,7 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba g, gCtx := errgroup.WithContext(ctx) - xtrabackup := req.BackupConfig.NewXtrabackupCmd(gCtx, backupUser, backupPass) + xtrabackup := req.BackupConfig.NewXtrabackupCmd(gCtx, backupUser, backupPass, s.tableSpaceEncryptionEnabled) xbOut, err := xtrabackup.StdoutPipe() if err != nil { log.Error(err, "xtrabackup stdout pipe failed") From 19260a3e041c52d7a58230ba93df067f6e2d1599 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 3 Dec 2025 14:51:11 +0530 Subject: [PATCH 47/77] fix test Signed-off-by: Mayank Shah --- pkg/xtrabackup/job_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/xtrabackup/job_test.go b/pkg/xtrabackup/job_test.go index 6bf50e7876..14a9866dbf 100644 --- a/pkg/xtrabackup/job_test.go +++ b/pkg/xtrabackup/job_test.go @@ -32,7 +32,7 @@ func TestJobSpec(t *testing.T) { cluster := &pxcv1.PerconaXtraDBCluster{ Spec: pxcv1.PerconaXtraDBClusterSpec{ - Backup: &pxcv1.PXCScheduledBackup{ + Backup: &pxcv1.BackupSpec{ Image: backupImage, ImagePullPolicy: corev1.PullIfNotPresent, ImagePullSecrets: []corev1.LocalObjectReference{ From 4adee1c93e7447dccf3d90912efa580aa9712ec3 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 3 Dec 2025 15:50:41 +0530 Subject: [PATCH 48/77] update demand-backup-encrypted-tls to work with xtrabackup Signed-off-by: Mayank Shah --- .../demand-backup-encrypted-with-tls/run | 76 ++++++++++--------- 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/e2e-tests/demand-backup-encrypted-with-tls/run b/e2e-tests/demand-backup-encrypted-with-tls/run index 87bf0a9d65..3f6379a5a3 100755 --- a/e2e-tests/demand-backup-encrypted-with-tls/run +++ b/e2e-tests/demand-backup-encrypted-with-tls/run @@ -26,12 +26,15 @@ main() { keyring_plugin_must_be_in_use "$cluster" table_must_be_encrypted "$cluster" "myApp" - run_backup "$cluster" "on-demand-backup-pvc" - run_recovery_check "$cluster" "on-demand-backup-pvc" - kubectl_bin delete -f "$test_dir/conf/restore-on-demand-backup-pvc.yaml" - check_pvc_md5 "on-demand-backup-pvc" - table_must_be_encrypted "$cluster" "myApp" - keyring_plugin_must_be_in_use "$cluster" + # todo: add support for pvc + if [[ ! "$PXCO_FEATURE_GATES" == *"BackupXtrabackup=true"* ]]; then + run_backup "$cluster" "on-demand-backup-pvc" + run_recovery_check "$cluster" "on-demand-backup-pvc" + kubectl_bin delete -f "$test_dir/conf/restore-on-demand-backup-pvc.yaml" + check_pvc_md5 "on-demand-backup-pvc" + table_must_be_encrypted "$cluster" "myApp" + keyring_plugin_must_be_in_use "$cluster" + fi if [ -z "$SKIP_REMOTE_BACKUPS" ]; then run_backup "$cluster" "on-demand-backup-aws-s3" @@ -41,37 +44,40 @@ main() { keyring_plugin_must_be_in_use "$cluster" fi - mountpt=$(kubectl_bin get -f "$conf_dir/vault-secret.yaml" -o json | grep -E -o "secret_mount_point = \w+" | awk -F "=[ ]*" '{print $2}') - transition_keys=$(kubectl_bin exec --namespace="$vault1" -it $vault1-0 -- sh -c " - VAULT_TOKEN=$token1 vault kv list -format=json $mountpt/backup/" \ - | jq_filter "$mountpt/backup/") - - vault2="vault-service-2-${RANDOM}" - start_vault $vault2 $protocol - token2=$(jq -r ".root_token" <"$tmp_dir/$vault2") - ip2="$protocol://$vault2.$vault2.svc.cluster.local" - - kubectl_bin run -i --tty vault-cp --image=perconalab/vault-cp:latest --restart=Never -- sh -c " - sed -i 's/token=cfg.old_token)/token=cfg.old_token, verify=False)/' /src/vault-cp.py \ - && sed -i 's/token=cfg.new_token)/token=cfg.new_token, verify=False)/' /src/vault-cp.py \ - && echo \" -old_url = '$ip1:8200' -old_token = '$token1' -new_url = '$ip2:8200' -new_token = '$token2' -secrets = [ $transition_keys ] -\" > /src/config.py - python3 /src/vault-cp.py - " - - run_recovery_check "$cluster" "on-demand-backup-pvc" - table_must_be_encrypted "$cluster" "myApp" - keyring_plugin_must_be_in_use "$cluster" - - if [ -z "$SKIP_REMOTE_BACKUPS" ]; then - run_recovery_check "$cluster" "on-demand-backup-aws-s3" + # in xtrabackup mode, we do not store our own transition keys, so this test is not applicable + if [[ ! "$PXCO_FEATURE_GATES" == *"BackupXtrabackup=true"* ]]; then + mountpt=$(kubectl_bin get -f "$conf_dir/vault-secret.yaml" -o json | grep -E -o "secret_mount_point = \w+" | awk -F "=[ ]*" '{print $2}') + transition_keys=$(kubectl_bin exec --namespace="$vault1" -it $vault1-0 -- sh -c " + VAULT_TOKEN=$token1 vault kv list -format=json $mountpt/backup/" \ + | jq_filter "$mountpt/backup/") + + vault2="vault-service-2-${RANDOM}" + start_vault $vault2 $protocol + token2=$(jq -r ".root_token" <"$tmp_dir/$vault2") + ip2="$protocol://$vault2.$vault2.svc.cluster.local" + + kubectl_bin run -i --tty vault-cp --image=perconalab/vault-cp:latest --restart=Never -- sh -c " + sed -i 's/token=cfg.old_token)/token=cfg.old_token, verify=False)/' /src/vault-cp.py \ + && sed -i 's/token=cfg.new_token)/token=cfg.new_token, verify=False)/' /src/vault-cp.py \ + && echo \" + old_url = '$ip1:8200' + old_token = '$token1' + new_url = '$ip2:8200' + new_token = '$token2' + secrets = [ $transition_keys ] + \" > /src/config.py + python3 /src/vault-cp.py + " + + run_recovery_check "$cluster" "on-demand-backup-pvc" table_must_be_encrypted "$cluster" "myApp" keyring_plugin_must_be_in_use "$cluster" + + if [ -z "$SKIP_REMOTE_BACKUPS" ]; then + run_recovery_check "$cluster" "on-demand-backup-aws-s3" + table_must_be_encrypted "$cluster" "myApp" + keyring_plugin_must_be_in_use "$cluster" + fi fi for i in $vault1 $vault2; do From 348b92abeaea04878b3e851e3ab008de42ad5944 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 3 Dec 2025 16:33:45 +0530 Subject: [PATCH 49/77] add demand-backup-encrypted-with-tls-xtrabackup Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100755 e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run diff --git a/e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run b/e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run new file mode 100755 index 0000000000..0a1284269e --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run @@ -0,0 +1,6 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../demand-backup-encrypted-with-tls/run From 6a07c546de7fad5e37b9bc60e6eba90625800f2b Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 3 Dec 2025 18:33:31 +0530 Subject: [PATCH 50/77] fix restore scripts Signed-off-by: Mayank Shah --- build/backup/recovery-cloud.sh | 24 ++++++++++++----- pkg/controller/pxcrestore/controller.go | 12 ++++----- pkg/controller/pxcrestore/controller_test.go | 4 +-- pkg/controller/pxcrestore/restorer.go | 26 +++++++++--------- pkg/pxc/backup/restore.go | 28 +++++++++++++++++--- 5 files changed, 63 insertions(+), 31 deletions(-) diff --git a/build/backup/recovery-cloud.sh b/build/backup/recovery-cloud.sh index 0889ece174..20d1489e6d 100644 --- a/build/backup/recovery-cloud.sh +++ b/build/backup/recovery-cloud.sh @@ -100,23 +100,33 @@ if ! check_for_version "$XTRABACKUP_VERSION" '8.0.0'; then XB_EXTRA_ARGS="$XB_EXTRA_ARGS --binlog-info=ON" fi +DEFAULTS_GROUP="--defaults-group=mysqld" +EARLY_PLUGIN_LOAD="--early-plugin-load=keyring_vault.so" +if [[ "${XTRABACKUP_ENABLED}" == "true" ]]; then + # these must not be set for pxb + DEFAULTS_GROUP="" + DEFAULTS_FILE="" + # EARLY_PLUGIN_LOAD="" +fi + echo "+ xtrabackup $DEFAULTS_FILE ${XB_USE_MEMORY+--use-memory=$XB_USE_MEMORY} --prepare $REMAINING_XB_ARGS --binlog-info=ON --rollback-prepared-trx \ ---keyring-vault-config=$keyring_vault \ +--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=$tmp" # shellcheck disable=SC2086 xtrabackup $DEFAULTS_FILE ${XB_USE_MEMORY+--use-memory=$XB_USE_MEMORY} --prepare $REMAINING_XB_ARGS $transition_option --rollback-prepared-trx \ - --keyring-vault-config=$keyring_vault \ + --keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" -echo "+ xtrabackup $DEFAULTS_FILE --defaults-group=mysqld --datadir=/datadir --move-back $REMAINING_XB_ARGS --binlog-info=ON \ +echo "+ xtrabackup $DEFAULTS_FILE $DEFAULTS_GROUP --datadir=/datadir --move-back $REMAINING_XB_ARGS --binlog-info=ON \ --force-non-empty-directories $master_key_options \ ---keyring-vault-config=$keyring_vault --early-plugin-load=keyring_vault.so \ +--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf $EARLY_PLUGIN_LOAD \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=$tmp" # shellcheck disable=SC2086 -xtrabackup $DEFAULTS_FILE --defaults-group=mysqld --datadir=/datadir --move-back $REMAINING_XB_ARGS \ +xtrabackup $DEFAULTS_FILE $DEFAULTS_GROUP --datadir=/datadir --move-back $REMAINING_XB_ARGS \ --force-non-empty-directories $transition_option $master_key_options \ - --keyring-vault-config=$keyring_vault --early-plugin-load=keyring_vault.so "--target-dir=$tmp" + --keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf $EARLY_PLUGIN_LOAD \ + --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" -rm -rf "$tmp" +rm -rf "$tmp" \ No newline at end of file diff --git a/pkg/controller/pxcrestore/controller.go b/pkg/controller/pxcrestore/controller.go index dbb2fdec8a..8cdbd019b9 100644 --- a/pkg/controller/pxcrestore/controller.go +++ b/pkg/controller/pxcrestore/controller.go @@ -238,7 +238,7 @@ func (r *ReconcilePerconaXtraDBClusterRestore) reconcileStateStartCluster(ctx co } func validate(ctx context.Context, restorer Restorer, cr *api.PerconaXtraDBClusterRestore) error { - job, err := restorer.Job() + job, err := restorer.Job(ctx) if err != nil { return errors.Wrap(err, "failed to create restore job") } @@ -247,7 +247,7 @@ func validate(ctx context.Context, restorer Restorer, cr *api.PerconaXtraDBClust } if cr.Spec.PITR != nil { - job, err := restorer.PITRJob() + job, err := restorer.PITRJob(ctx) if err != nil { return errors.Wrap(err, "failed to create pitr restore job") } @@ -317,7 +317,7 @@ func (r *ReconcilePerconaXtraDBClusterRestore) reconcileStateRestore(ctx context RequeueAfter: time.Second * 5, } - restorerJob, err := restorer.Job() + restorerJob, err := restorer.Job(ctx) if err != nil { return rr, errors.Wrap(err, "failed to create restore job") } @@ -409,7 +409,7 @@ func (r *ReconcilePerconaXtraDBClusterRestore) reconcileStatePITR(ctx context.Co RequeueAfter: time.Second * 5, } - restorerJob, err := restorer.PITRJob() + restorerJob, err := restorer.PITRJob(ctx) if err != nil { return rr, errors.Wrap(err, "failed to create restore job") } @@ -468,12 +468,12 @@ func createRestoreJob(ctx context.Context, cl client.Client, restorer Restorer, return errors.Wrap(err, "failed to init restore") } - job, err := restorer.Job() + job, err := restorer.Job(ctx) if err != nil { return errors.Wrap(err, "failed to get restore job") } if pitr { - job, err = restorer.PITRJob() + job, err = restorer.PITRJob(ctx) if err != nil { return errors.Wrap(err, "failed to create pitr restore job") } diff --git a/pkg/controller/pxcrestore/controller_test.go b/pkg/controller/pxcrestore/controller_test.go index b2cd342b68..dd33d84cc7 100644 --- a/pkg/controller/pxcrestore/controller_test.go +++ b/pkg/controller/pxcrestore/controller_test.go @@ -457,12 +457,12 @@ func TestOperatorRestart(t *testing.T) { if err != nil { t.Fatal(err) } - job, err := restorer.Job() + job, err := restorer.Job(ctx) if err != nil { t.Fatal(err) } if state == api.RestorePITR { - job, err = restorer.PITRJob() + job, err = restorer.PITRJob(ctx) if err != nil { t.Fatal(err) } diff --git a/pkg/controller/pxcrestore/restorer.go b/pkg/controller/pxcrestore/restorer.go index 5c35bf20cf..7adf56d675 100644 --- a/pkg/controller/pxcrestore/restorer.go +++ b/pkg/controller/pxcrestore/restorer.go @@ -27,8 +27,8 @@ var ( type Restorer interface { Init(ctx context.Context) error - Job() (*batchv1.Job, error) - PITRJob() (*batchv1.Job, error) + Job(context.Context) (*batchv1.Job, error) + PITRJob(context.Context) (*batchv1.Job, error) Finalize(ctx context.Context) error Validate(ctx context.Context) error ValidateJob(ctx context.Context, job *batchv1.Job) error @@ -40,12 +40,12 @@ func (s *s3) Init(context.Context) error { return nil } func (s *s3) Finalize(context.Context) error { return nil } -func (s *s3) Job() (*batchv1.Job, error) { - return backup.RestoreJob(s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, false) +func (s *s3) Job(ctx context.Context) (*batchv1.Job, error) { + return backup.RestoreJob(ctx, s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, false) } -func (s *s3) PITRJob() (*batchv1.Job, error) { - return backup.RestoreJob(s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, true) +func (s *s3) PITRJob(ctx context.Context) (*batchv1.Job, error) { + return backup.RestoreJob(ctx, s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, true) } func (s *s3) ValidateJob(ctx context.Context, job *batchv1.Job) error { @@ -118,11 +118,11 @@ func (s *pvc) Validate(ctx context.Context) error { } } -func (s *pvc) Job() (*batchv1.Job, error) { - return backup.RestoreJob(s.cr, s.bcp, s.cluster, s.initImage, s.scheme, "", false) +func (s *pvc) Job(ctx context.Context) (*batchv1.Job, error) { + return backup.RestoreJob(ctx, s.cr, s.bcp, s.cluster, s.initImage, s.scheme, "", false) } -func (s *pvc) PITRJob() (*batchv1.Job, error) { +func (s *pvc) PITRJob(ctx context.Context) (*batchv1.Job, error) { return nil, errors.New("pitr restore is not supported for pvc") } @@ -200,12 +200,12 @@ func (s *azure) Init(context.Context) error { return nil } func (s *azure) Finalize(context.Context) error { return nil } -func (s *azure) Job() (*batchv1.Job, error) { - return backup.RestoreJob(s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, false) +func (s *azure) Job(ctx context.Context) (*batchv1.Job, error) { + return backup.RestoreJob(ctx, s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, false) } -func (s *azure) PITRJob() (*batchv1.Job, error) { - return backup.RestoreJob(s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, true) +func (s *azure) PITRJob(ctx context.Context) (*batchv1.Job, error) { + return backup.RestoreJob(ctx, s.cr, s.bcp, s.cluster, s.initImage, s.scheme, s.bcp.Status.Destination, true) } func (s *azure) Validate(ctx context.Context) error { diff --git a/pkg/pxc/backup/restore.go b/pkg/pxc/backup/restore.go index f0f67c42db..11976a4a65 100644 --- a/pkg/pxc/backup/restore.go +++ b/pkg/pxc/backup/restore.go @@ -1,6 +1,7 @@ package backup import ( + "context" "path" "strconv" "strings" @@ -16,6 +17,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" @@ -210,7 +212,14 @@ func appendCABundleSecretVolume( *volumeMounts = append(*volumeMounts, mnt) } -func RestoreJob(cr *api.PerconaXtraDBClusterRestore, bcp *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraDBCluster, initImage string, scheme *runtime.Scheme, destination api.PXCBackupDestination, pitr bool) (*batchv1.Job, error) { +func RestoreJob( + ctx context.Context, + cr *api.PerconaXtraDBClusterRestore, + bcp *api.PerconaXtraDBClusterBackup, + cluster *api.PerconaXtraDBCluster, + initImage string, + scheme *runtime.Scheme, + destination api.PXCBackupDestination, pitr bool) (*batchv1.Job, error) { switch bcp.Status.GetStorageType(cluster) { case api.BackupStorageAzure: if bcp.Status.Azure == nil { @@ -321,7 +330,7 @@ func RestoreJob(cr *api.PerconaXtraDBClusterRestore, bcp *api.PerconaXtraDBClust } } - envs, err := restoreJobEnvs(bcp, cr, cluster, destination, pitr) + envs, err := restoreJobEnvs(ctx, bcp, cr, cluster, destination, pitr) if err != nil { return nil, errors.Wrap(err, "restore job envs") } @@ -401,7 +410,13 @@ func RestoreJob(cr *api.PerconaXtraDBClusterRestore, bcp *api.PerconaXtraDBClust return job, nil } -func restoreJobEnvs(bcp *api.PerconaXtraDBClusterBackup, cr *api.PerconaXtraDBClusterRestore, cluster *api.PerconaXtraDBCluster, destination api.PXCBackupDestination, pitr bool) ([]corev1.EnvVar, error) { +func restoreJobEnvs( + ctx context.Context, + bcp *api.PerconaXtraDBClusterBackup, + cr *api.PerconaXtraDBClusterRestore, + cluster *api.PerconaXtraDBCluster, + destination api.PXCBackupDestination, + pitr bool) ([]corev1.EnvVar, error) { if bcp.Status.GetStorageType(cluster) == api.BackupStorageFilesystem { return util.MergeEnvLists( []corev1.EnvVar{ @@ -481,6 +496,13 @@ func restoreJobEnvs(bcp *api.PerconaXtraDBClusterBackup, cr *api.PerconaXtraDBCl Value: strconv.FormatBool(verifyTLS), }) + if features.Enabled(ctx, features.BackupXtrabackup) { + envs = append(envs, corev1.EnvVar{ + Name: "XTRABACKUP_ENABLED", + Value: "true", + }) + } + switch bcp.Status.GetStorageType(cluster) { case api.BackupStorageAzure: azureEnvs, err := azureEnvs(cr, bcp, cluster, destination, pitr) From 015b1fd46c402592d6cf6b4c5193b515304944ff Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Wed, 3 Dec 2025 21:46:15 +0530 Subject: [PATCH 51/77] fix restore Signed-off-by: Mayank Shah --- build/backup/recovery-cloud.sh | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/build/backup/recovery-cloud.sh b/build/backup/recovery-cloud.sh index 20d1489e6d..500456537a 100644 --- a/build/backup/recovery-cloud.sh +++ b/build/backup/recovery-cloud.sh @@ -72,7 +72,7 @@ if [[ -f "${tmp}/sst_info" ]]; then fi fi -if [ -f "${keyring_vault}" ]; then +if [ -f "${tmp}/xtrabackup_keys" ]; then master_key_options="--generate-new-master-key" fi @@ -101,32 +101,34 @@ if ! check_for_version "$XTRABACKUP_VERSION" '8.0.0'; then fi DEFAULTS_GROUP="--defaults-group=mysqld" -EARLY_PLUGIN_LOAD="--early-plugin-load=keyring_vault.so" if [[ "${XTRABACKUP_ENABLED}" == "true" ]]; then # these must not be set for pxb DEFAULTS_GROUP="" DEFAULTS_FILE="" - # EARLY_PLUGIN_LOAD="" +fi + +# If backup-my.cnf does not contian plugin_load, then --prepare will fail if you pass the --keyring-vault-config option. +if [[ -n "$(parse_ini 'plugin_load' "${tmp}/backup-my.cnf")" ]]; then + KEYRING_VAULT_CONFIG="--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf" fi echo "+ xtrabackup $DEFAULTS_FILE ${XB_USE_MEMORY+--use-memory=$XB_USE_MEMORY} --prepare $REMAINING_XB_ARGS --binlog-info=ON --rollback-prepared-trx \ ---keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf \ ---xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=$tmp" + $KEYRING_VAULT_CONFIG --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=$tmp" + # shellcheck disable=SC2086 xtrabackup $DEFAULTS_FILE ${XB_USE_MEMORY+--use-memory=$XB_USE_MEMORY} --prepare $REMAINING_XB_ARGS $transition_option --rollback-prepared-trx \ - --keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf \ - --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" + $KEYRING_VAULT_CONFIG --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" echo "+ xtrabackup $DEFAULTS_FILE $DEFAULTS_GROUP --datadir=/datadir --move-back $REMAINING_XB_ARGS --binlog-info=ON \ --force-non-empty-directories $master_key_options \ ---keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf $EARLY_PLUGIN_LOAD \ +--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf --early-plugin-load=keyring_vault.so \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=$tmp" # shellcheck disable=SC2086 xtrabackup $DEFAULTS_FILE $DEFAULTS_GROUP --datadir=/datadir --move-back $REMAINING_XB_ARGS \ --force-non-empty-directories $transition_option $master_key_options \ - --keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf $EARLY_PLUGIN_LOAD \ + --keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf --early-plugin-load=keyring_vault.so \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" rm -rf "$tmp" \ No newline at end of file From 44105205ce925d9182dcbb6a3ef7e47a97625029 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Thu, 4 Dec 2025 09:30:06 +0530 Subject: [PATCH 52/77] use zap logger Signed-off-by: Mayank Shah --- pkg/xtrabackup/server/app.go | 4 +++ pkg/xtrabackup/server/create.go | 43 ++++++++++++++++----------------- 2 files changed, 25 insertions(+), 22 deletions(-) diff --git a/pkg/xtrabackup/server/app.go b/pkg/xtrabackup/server/app.go index 9b7d4c7adc..d16d3760bb 100644 --- a/pkg/xtrabackup/server/app.go +++ b/pkg/xtrabackup/server/app.go @@ -4,6 +4,7 @@ import ( "context" "os" + "github.com/go-logr/logr" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" "github.com/pkg/errors" @@ -23,6 +24,7 @@ type appServer struct { namespace string newStorageFunc storage.NewClientFunc deleteBackupFunc func(ctx context.Context, cfg *api.BackupConfig, backupName string) error + log logr.Logger tableSpaceEncryptionEnabled bool } @@ -35,11 +37,13 @@ func New() (api.XtrabackupServiceServer, error) { return nil, status.Errorf(codes.InvalidArgument, "POD_NAMESPACE environment variable is not set") } tableSpaceEncryptionEnabled := vaultKeyringFileExists() + logger := zap.New() return &appServer{ namespace: namespace, backupStatus: backupStatus{}, newStorageFunc: storage.NewClient, deleteBackupFunc: deleteBackup, + log: logger, tableSpaceEncryptionEnabled: tableSpaceEncryptionEnabled, }, nil } diff --git a/pkg/xtrabackup/server/create.go b/pkg/xtrabackup/server/create.go index 013add2d11..105631619b 100644 --- a/pkg/xtrabackup/server/create.go +++ b/pkg/xtrabackup/server/create.go @@ -22,30 +22,29 @@ import ( ) func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.XtrabackupService_CreateBackupServer) error { - log := logf.Log.WithName("xtrabackup-server").WithName("CreateBackup") - + logger := s.log.WithName("CreateBackup") if !s.backupStatus.tryRunBackup() { - log.Info("backup is already running") + logger.Info("backup is already running") return status.Errorf(codes.FailedPrecondition, "backup is already running") } defer s.backupStatus.doneBackup() - log = log.WithValues("namespace", s.namespace, "name", req.BackupName) + logger = logger.WithValues("namespace", s.namespace, "name", req.BackupName) s.backupStatus.setBackupConfig(req.BackupConfig) defer s.backupStatus.removeBackupConfig() ctx := stream.Context() - log.Info("Checking if backup exists") + logger.Info("Checking if backup exists") exists, err := s.backupExists(ctx, req.BackupConfig) if err != nil { return errors.Wrap(err, "check if backup exists") } if exists { - log.Info("Backup already exists, deleting") + logger.Info("Backup already exists, deleting") if err := s.deleteBackupFunc(ctx, req.BackupConfig, req.BackupName); err != nil { - log.Error(err, "failed to delete backup") + logger.Error(err, "failed to delete backup") return errors.Wrap(err, "delete backup") } } @@ -53,7 +52,7 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba backupUser := users.Xtrabackup backupPass, err := getUserPassword() if err != nil { - log.Error(err, "failed to get backup user password") + logger.Error(err, "failed to get backup user password") return errors.Wrap(err, "get backup user password") } @@ -62,21 +61,21 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba xtrabackup := req.BackupConfig.NewXtrabackupCmd(gCtx, backupUser, backupPass, s.tableSpaceEncryptionEnabled) xbOut, err := xtrabackup.StdoutPipe() if err != nil { - log.Error(err, "xtrabackup stdout pipe failed") + logger.Error(err, "xtrabackup stdout pipe failed") return errors.Wrap(err, "xtrabackup stdout pipe failed") } defer xbOut.Close() //nolint:errcheck xbErr, err := xtrabackup.StderrPipe() if err != nil { - log.Error(err, "xtrabackup stderr pipe failed") + logger.Error(err, "xtrabackup stderr pipe failed") return errors.Wrap(err, "xtrabackup stderr pipe failed") } defer xbErr.Close() //nolint:errcheck backupLog, err := os.Create(filepath.Join(app.BackupLogDir, req.BackupName+".log")) if err != nil { - log.Error(err, "failed to create log file") + logger.Error(err, "failed to create log file") return errors.Wrap(err, "failed to create log file") } defer backupLog.Close() //nolint:errcheck @@ -85,12 +84,12 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba xbcloud := req.BackupConfig.NewXbcloudCmd(gCtx, api.XBCloudActionPut, xbOut) xbcloudErr, err := xbcloud.StderrPipe() if err != nil { - log.Error(err, "xbcloud stderr pipe failed") + logger.Error(err, "xbcloud stderr pipe failed") return errors.Wrap(err, "xbcloud stderr pipe failed") } defer xbcloudErr.Close() //nolint:errcheck - log.Info( + logger.Info( "Backup starting", "destination", req.BackupConfig.Destination, "storage", req.BackupConfig.Type, @@ -100,17 +99,17 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba g.Go(func() error { if err := xbcloud.Start(); err != nil { - log.Error(err, "failed to start xbcloud") + logger.Error(err, "failed to start xbcloud") return err } if _, err := io.Copy(logWriter, xbcloudErr); err != nil { - log.Error(err, "failed to copy xbcloud stderr") + logger.Error(err, "failed to copy xbcloud stderr") return err } if err := xbcloud.Wait(); err != nil { - log.Error(err, "failed waiting for xbcloud to finish") + logger.Error(err, "failed waiting for xbcloud to finish") return err } return nil @@ -118,31 +117,31 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba g.Go(func() error { if err := xtrabackup.Start(); err != nil { - log.Error(err, "failed to start xtrabackup command") + logger.Error(err, "failed to start xtrabackup command") return err } if _, err := io.Copy(logWriter, xbErr); err != nil { - log.Error(err, "failed to copy xtrabackup stderr") + logger.Error(err, "failed to copy xtrabackup stderr") return err } if err := xtrabackup.Wait(); err != nil { - log.Error(err, "failed to wait for xtrabackup to finish") + logger.Error(err, "failed to wait for xtrabackup to finish") return err } return nil }) if err := g.Wait(); err != nil { - log.Error(err, "backup failed") + logger.Error(err, "backup failed") return errors.Wrap(err, "backup failed") } if err := s.checkBackupMD5Size(ctx, req.BackupConfig); err != nil { - log.Error(err, "check backup md5 file size") + logger.Error(err, "check backup md5 file size") return errors.Wrap(err, "check backup md5 file size") } - log.Info("Backup finished successfully", "destination", req.BackupConfig.Destination, "storage", req.BackupConfig.Type) + logger.Info("Backup finished successfully", "destination", req.BackupConfig.Destination, "storage", req.BackupConfig.Type) return nil } From fe6a5b4a5972d387fd1584b3bb402542ee68343c Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Thu, 4 Dec 2025 09:48:30 +0530 Subject: [PATCH 53/77] fix mispell Signed-off-by: Mayank Shah --- build/backup/recovery-cloud.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/backup/recovery-cloud.sh b/build/backup/recovery-cloud.sh index 500456537a..0653ee866e 100644 --- a/build/backup/recovery-cloud.sh +++ b/build/backup/recovery-cloud.sh @@ -107,7 +107,7 @@ if [[ "${XTRABACKUP_ENABLED}" == "true" ]]; then DEFAULTS_FILE="" fi -# If backup-my.cnf does not contian plugin_load, then --prepare will fail if you pass the --keyring-vault-config option. +# If backup-my.cnf does not contain plugin_load, then --prepare will fail if you pass the --keyring-vault-config option. if [[ -n "$(parse_ini 'plugin_load' "${tmp}/backup-my.cnf")" ]]; then KEYRING_VAULT_CONFIG="--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf" fi @@ -131,4 +131,4 @@ xtrabackup $DEFAULTS_FILE $DEFAULTS_GROUP --datadir=/datadir --move-back $REMAIN --keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf --early-plugin-load=keyring_vault.so \ --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin "--target-dir=$tmp" -rm -rf "$tmp" \ No newline at end of file +rm -rf "$tmp" From c37895e8a29b3e1a5092d5a4f2e4ad50675a8924 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Thu, 4 Dec 2025 15:51:29 +0530 Subject: [PATCH 54/77] fix script Signed-off-by: Mayank Shah --- build/backup/recovery-cloud.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/backup/recovery-cloud.sh b/build/backup/recovery-cloud.sh index 0653ee866e..2bdfdf4c65 100644 --- a/build/backup/recovery-cloud.sh +++ b/build/backup/recovery-cloud.sh @@ -108,7 +108,7 @@ if [[ "${XTRABACKUP_ENABLED}" == "true" ]]; then fi # If backup-my.cnf does not contain plugin_load, then --prepare will fail if you pass the --keyring-vault-config option. -if [[ -n "$(parse_ini 'plugin_load' "${tmp}/backup-my.cnf")" ]]; then +if [[ -n "$(parse_ini 'plugin_load' "${tmp}/backup-my.cnf")" ]] && [[ -z $transition_key ]]; then KEYRING_VAULT_CONFIG="--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf" fi From 55724a03221aea7a3dbcb87b42e0645fe6040bb0 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Thu, 4 Dec 2025 18:28:07 +0530 Subject: [PATCH 55/77] add and fix tests Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud-xtrabackup/run | 6 ++ e2e-tests/demand-backup-cloud/run | 76 +++++-------------- e2e-tests/demand-backup-cloud/utils/pxb.sh | 51 +++++++++++++ e2e-tests/demand-backup-cloud/utils/sst.sh | 51 +++++++++++++ .../demand-backup-encrypted-with-tls/run | 10 +-- e2e-tests/functions | 8 ++ .../run | 6 ++ e2e-tests/run-pr.csv | 3 + pkg/controller/pxcbackup/controller.go | 2 +- pkg/pxc/backup/job.go | 37 ++++----- pkg/xtrabackup/api/cmd.go | 18 +++-- pkg/xtrabackup/api/cmd_test.go | 5 +- pkg/xtrabackup/job.go | 13 +++- pkg/xtrabackup/job_test.go | 6 +- pkg/xtrabackup/server/app.go | 9 +++ pkg/xtrabackup/server/create.go | 3 +- pkg/xtrabackup/server/version.go | 34 +++++++++ pkg/xtrabackup/server/version_test.go | 25 ++++++ 18 files changed, 266 insertions(+), 97 deletions(-) create mode 100755 e2e-tests/demand-backup-cloud-xtrabackup/run create mode 100644 e2e-tests/demand-backup-cloud/utils/pxb.sh create mode 100644 e2e-tests/demand-backup-cloud/utils/sst.sh create mode 100755 e2e-tests/restore-to-encrypted-cluster-xtrabackup/run create mode 100644 pkg/xtrabackup/server/version.go create mode 100644 pkg/xtrabackup/server/version_test.go diff --git a/e2e-tests/demand-backup-cloud-xtrabackup/run b/e2e-tests/demand-backup-cloud-xtrabackup/run new file mode 100755 index 0000000000..be001800d7 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-xtrabackup/run @@ -0,0 +1,6 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../demand-backup-cloud/run diff --git a/e2e-tests/demand-backup-cloud/run b/e2e-tests/demand-backup-cloud/run index a1618288d7..e09b5c39c1 100755 --- a/e2e-tests/demand-backup-cloud/run +++ b/e2e-tests/demand-backup-cloud/run @@ -5,6 +5,12 @@ set -o errexit test_dir=$(realpath $(dirname $0)) . ${test_dir}/../functions +if is_feature_gate_enabled "BackupXtrabackup"; then + . ${test_dir}/utils/pxb.sh +else + . ${test_dir}/utils/sst.sh +fi + set_debug get_container_options() { @@ -68,28 +74,11 @@ run_recovery_from_source() { compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password" } -# If backup upload was started and failed for some reason the cloud storage should be cleaned up during second try -delete_backup_pod() { - local backup_name=$1 - - desc "Delete ${backup_name} pod during SST" - echo "Waiting for ${backup_name} pod to become Running" - sleep 1 - kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s - - backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - - echo "Deleting pod/${backup_pod} during SST upload" - kubectl logs -f ${backup_pod} | while IFS= read -r line; do - if [[ $line =~ \.ibd\. ]]; then - kubectl delete pod --force ${backup_pod} - break - fi - done - -} - check_optins_in_restore_pod() { + if is_feature_gate_enabled "BackupXtrabackup"; then + return 0 + fi + local restore_name=$1 local cluster_name=$2 @@ -108,36 +97,6 @@ check_optins_in_restore_pod() { fi } -check_cloud_storage_cleanup() { - local backup_name=$1 - - desc "Check storage cleanup of ${backup_name}" - if [[ $(kubectl_bin get events --field-selector involvedObject.kind=Job,involvedObject.name=xb-${backup_name} | grep -c "Created pod") == '1' ]]; then - echo "There should be 2+ pods started by job. First backup finished too quick" - exit 1 - fi - local backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - if [[ $IMAGE_PXC =~ 5\.7 ]]; then - # There are 2 deletes during backup: $backup_dir_sst_info & $backup_dir - deletes_num=$(kubectl_bin logs ${backup_pod} | grep -c 'Delete completed.') - if [[ ${deletes_num} -ge '2' ]]; then - echo "Bucket cleanup was successful" - else - echo "Something went wrong. Delete was performed for $deletes_num. Expected: 2." - kubectl_bin logs ${backup_pod} - exit 1 - fi - else - if kubectl_bin logs ${backup_pod} | grep 'Object deleted successfully before attempt 1. Exiting.'; then - echo "Something went wrong. Delete was not performed." - kubectl_bin logs ${backup_pod} - exit 1 - else - echo "Clenup was performed." - fi - fi -} - run_backup_with_delete() { local backup_name=$1 @@ -229,22 +188,27 @@ main() { backup_dest_gcp=$(kubectl_bin get pxc-backup "$backup_name_gcp" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) backup_dest_azure=$(kubectl_bin get pxc-backup "$backup_name_azure" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 9-) + check_backup_file_name='.sst_info/sst_info.00000000000000000000' + if is_feature_gate_enabled "BackupXtrabackup"; then + check_backup_file_name='xtrabackup_binlog_info.00000000000000000000' + fi + desc "Check backup existence" setup_aws_credentials - check_backup_existence_aws "$backup_dest_aws" ".sst_info/sst_info.00000000000000000000" + check_backup_existence_aws "$backup_dest_aws" "$check_backup_file_name" setup_gcs_credentials check_backup_existence_gcs "${backup_dest_gcp}" setup_azure_credentials - check_backup_existence_azure "${backup_dest_azure}" ".sst_info/sst_info.00000000000000000000" + check_backup_existence_azure "${backup_dest_azure}" "$check_backup_file_name" kubectl_bin delete pxc-backup --all desc "Check backup deletion" - check_backup_deletion_aws "$backup_dest_aws" ".sst_info/sst_info.00000000000000000000" + check_backup_deletion_aws "$backup_dest_aws" "$check_backup_file_name" check_backup_deletion_gcs "${backup_dest_gcp}" - check_backup_deletion_azure "${backup_dest_azure}" ".sst_info/sst_info.00000000000000000000" + check_backup_deletion_azure "${backup_dest_azure}" "$check_backup_file_name" if [ "$EKS" = 1 ]; then backup_name_aws_iam="on-demand-backup-aws-s3-iam" @@ -252,7 +216,7 @@ main() { run_backup_with_delete "${backup_name_aws_iam}" desc "Check backup existence for $backup_name_aws_iam" backup_dest_aws_iam=$(kubectl_bin get pxc-backup "$backup_name_aws_iam" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) - check_backup_existence_aws "${backup_dest_aws_iam}" ".sst_info/sst_info.00000000000000000000" + check_backup_existence_aws "${backup_dest_aws_iam}" "$check_backup_file_name" fi destroy $namespace diff --git a/e2e-tests/demand-backup-cloud/utils/pxb.sh b/e2e-tests/demand-backup-cloud/utils/pxb.sh new file mode 100644 index 0000000000..14c1c6d21a --- /dev/null +++ b/e2e-tests/demand-backup-cloud/utils/pxb.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +delete_backup_pod() { + local backup_name=$1 + + desc "Delete ${backup_name} pod during backup" + echo "Waiting for ${backup_name} pod to become Running" + sleep 1 + kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s + + backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') + + # sleep for 10 seconds so that an upload is started + sleep 10 + + echo "Deleting pod/${backup_pod} during backup" + kubectl logs -f ${backup_pod} | while IFS= read -r line; do + if [[ $line =~ 'Backup requested' ]]; then + kubectl delete pod --force ${backup_pod} + break + fi + done + +} + +check_cloud_storage_cleanup() { + local backup_name=$1 + + desc "Check storage cleanup of ${backup_name}" + if [[ $(kubectl_bin get events --field-selector involvedObject.kind=Job,involvedObject.name=xb-${backup_name} | grep -c "Created pod") == '1' ]]; then + echo "There should be 2+ pods started by job. First backup finished too quick" + exit 1 + fi + + local cluster_name=$(kubectl_bin get pxc-backup ${backup_name} -o jsonpath='{.spec.pxcCluster}') + if [[ -z $cluster_name ]]; then + echo "Cluster name is not set on backup ${backup_name}" + exit 1 + fi + + local pxc_pod="${cluster_name}-pxc-0" + logs_output=$(kubectl_bin logs ${pxc_pod} -c xtrabackup 2>&1) + if kubectl_bin logs ${pxc_pod} -c xtrabackup | grep 'Deleting Backup'; then + echo "Something went wrong. Delete was not performed." + kubectl_bin logs ${pxc_pod} -c xtrabackup + exit 1 + else + echo "Cleanup was performed." + fi + +} \ No newline at end of file diff --git a/e2e-tests/demand-backup-cloud/utils/sst.sh b/e2e-tests/demand-backup-cloud/utils/sst.sh new file mode 100644 index 0000000000..2c38f65d32 --- /dev/null +++ b/e2e-tests/demand-backup-cloud/utils/sst.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +delete_backup_pod() { + local backup_name=$1 + + desc "Delete ${backup_name} pod during SST" + echo "Waiting for ${backup_name} pod to become Running" + sleep 1 + kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s + + backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') + + echo "Deleting pod/${backup_pod} during SST upload" + kubectl logs -f ${backup_pod} | while IFS= read -r line; do + if [[ $line =~ \.ibd\. ]]; then + kubectl delete pod --force ${backup_pod} + break + fi + done + +} + +check_cloud_storage_cleanup() { + local backup_name=$1 + + desc "Check storage cleanup of ${backup_name}" + if [[ $(kubectl_bin get events --field-selector involvedObject.kind=Job,involvedObject.name=xb-${backup_name} | grep -c "Created pod") == '1' ]]; then + echo "There should be 2+ pods started by job. First backup finished too quick" + exit 1 + fi + local backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') + if [[ $IMAGE_PXC =~ 5\.7 ]]; then + # There are 2 deletes during backup: $backup_dir_sst_info & $backup_dir + deletes_num=$(kubectl_bin logs ${backup_pod} | grep -c 'Delete completed.') + if [[ ${deletes_num} -ge '2' ]]; then + echo "Bucket cleanup was successful" + else + echo "Something went wrong. Delete was performed for $deletes_num. Expected: 2." + kubectl_bin logs ${backup_pod} + exit 1 + fi + else + if kubectl_bin logs ${backup_pod} | grep 'Object deleted successfully before attempt 1. Exiting.'; then + echo "Something went wrong. Delete was not performed." + kubectl_bin logs ${backup_pod} + exit 1 + else + echo "Clenup was performed." + fi + fi +} \ No newline at end of file diff --git a/e2e-tests/demand-backup-encrypted-with-tls/run b/e2e-tests/demand-backup-encrypted-with-tls/run index 3f6379a5a3..1444622314 100755 --- a/e2e-tests/demand-backup-encrypted-with-tls/run +++ b/e2e-tests/demand-backup-encrypted-with-tls/run @@ -60,11 +60,11 @@ main() { sed -i 's/token=cfg.old_token)/token=cfg.old_token, verify=False)/' /src/vault-cp.py \ && sed -i 's/token=cfg.new_token)/token=cfg.new_token, verify=False)/' /src/vault-cp.py \ && echo \" - old_url = '$ip1:8200' - old_token = '$token1' - new_url = '$ip2:8200' - new_token = '$token2' - secrets = [ $transition_keys ] +old_url = '$ip1:8200' +old_token = '$token1' +new_url = '$ip2:8200' +new_token = '$token2' +secrets = [ $transition_keys ] \" > /src/config.py python3 /src/vault-cp.py " diff --git a/e2e-tests/functions b/e2e-tests/functions index a6c6d3034e..c9ff7b1d1d 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -2091,3 +2091,11 @@ get_pvc_name_for_backup() { echo "$pvc_name" } +is_feature_gate_enabled() { + local feature_gate=$1 + if [[ "$PXCO_FEATURE_GATES" == *"$feature_gate=true"* ]]; then + return 0 + else + return 1 + fi +} \ No newline at end of file diff --git a/e2e-tests/restore-to-encrypted-cluster-xtrabackup/run b/e2e-tests/restore-to-encrypted-cluster-xtrabackup/run new file mode 100755 index 0000000000..6586dad5ac --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-xtrabackup/run @@ -0,0 +1,6 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../restore-to-encrypted-cluster/run diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 0c3781bc05..13c5cdd602 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -4,7 +4,9 @@ backup-storage-tls,8.0 cross-site,8.0 custom-users,8.0 demand-backup-cloud,8.0 +demand-backup-cloud-xtrabackup,8.0 demand-backup-encrypted-with-tls,8.0 +demand-backup-encrypted-with-tls-xtrabackup,8.0 demand-backup,8.0 demand-backup-flow-control,8.0 demand-backup-parallel,8.0 @@ -29,6 +31,7 @@ pvc-resize,5.7 pvc-resize,8.0 recreate,8.0 restore-to-encrypted-cluster,8.0 +restore-to-encrypted-cluster-xtrabackup,8.0 scaling-proxysql,8.0 scaling,8.0 scheduled-backup,5.7 diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 34b0cd4d5d..6a50723c62 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -330,7 +330,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( if err != nil { return batchv1.JobSpec{}, errors.Wrap(err, "failed to get primary pod dns name") } - return xtrabackup.JobSpec(&cr.Spec, cluster, job, initImage, srcNode) + return xtrabackup.JobSpec(cr, cluster, job, initImage, srcNode) } return bcp.JobSpec(cr.Spec, cluster, job, initImage) } diff --git a/pkg/pxc/backup/job.go b/pkg/pxc/backup/job.go index 5ee88628cb..6109553664 100644 --- a/pkg/pxc/backup/job.go +++ b/pkg/pxc/backup/job.go @@ -11,7 +11,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" - "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" @@ -281,12 +280,10 @@ func SetStorageAzure(ctx context.Context, job *batchv1.JobSpec, cr *api.PerconaX } job.Template.Spec.Containers[0].Env = append(job.Template.Spec.Containers[0].Env, storageAccount, accessKey, containerName, endpoint, storageClass, backupPath) - if !features.Enabled(ctx, features.BackupXtrabackup) { - // add SSL volumes - err := appendStorageSecret(job, cr) - if err != nil { - return errors.Wrap(err, "failed to append storage secrets") - } + // add SSL volumes + err := appendStorageSecret(job, cr) + if err != nil { + return errors.Wrap(err, "failed to append storage secrets") } return nil @@ -347,21 +344,19 @@ func SetStorageS3(ctx context.Context, job *batchv1.JobSpec, cr *api.PerconaXtra } job.Template.Spec.Containers[0].Env = append(job.Template.Spec.Containers[0].Env, bucketEnv, bucketPathEnv) - if !features.Enabled(ctx, features.BackupXtrabackup) { - // add SSL volumes - err := appendStorageSecret(job, cr) - if err != nil { - return errors.Wrap(err, "failed to append storage secrets") - } + // add SSL volumes + err := appendStorageSecret(job, cr) + if err != nil { + return errors.Wrap(err, "failed to append storage secrets") + } - // add ca bundle (this is used by the aws-cli to verify the connection to S3) - if sel := s3.CABundle; sel != nil { - appendCABundleSecretVolume( - &job.Template.Spec.Volumes, - &job.Template.Spec.Containers[0].VolumeMounts, - sel, - ) - } + // add ca bundle (this is used by the aws-cli to verify the connection to S3) + if sel := s3.CABundle; sel != nil { + appendCABundleSecretVolume( + &job.Template.Spec.Volumes, + &job.Template.Spec.Containers[0].VolumeMounts, + sel, + ) } return nil diff --git a/pkg/xtrabackup/api/cmd.go b/pkg/xtrabackup/api/cmd.go index e0baaf5f92..2212709d6d 100644 --- a/pkg/xtrabackup/api/cmd.go +++ b/pkg/xtrabackup/api/cmd.go @@ -6,6 +6,8 @@ import ( "io" "os" "os/exec" + + goversion "github.com/hashicorp/go-version" ) const ( @@ -25,8 +27,9 @@ func (cfg *BackupConfig) NewXtrabackupCmd( ctx context.Context, user, password string, + mysqlVersion *goversion.Version, withTablespaceEncryption bool) *exec.Cmd { - cmd := exec.CommandContext(ctx, xtrabackupCmd, cfg.xtrabackupArgs(user, password, withTablespaceEncryption)...) + cmd := exec.CommandContext(ctx, xtrabackupCmd, cfg.xtrabackupArgs(user, password, mysqlVersion, withTablespaceEncryption)...) cmd.Env = cfg.envs() return cmd } @@ -100,7 +103,7 @@ func (cfg *BackupConfig) xbcloudArgs(action XBCloudAction) []string { return args } -func (cfg *BackupConfig) xtrabackupArgs(user, pass string, withTablespaceEncryption bool) []string { +func (cfg *BackupConfig) xtrabackupArgs(user, pass string, mysqlVersion *goversion.Version, withTablespaceEncryption bool) []string { args := []string{ "--backup", "--stream=xbstream", @@ -112,10 +115,13 @@ func (cfg *BackupConfig) xtrabackupArgs(user, pass string, withTablespaceEncrypt fmt.Sprintf("--password=%s", pass), } if withTablespaceEncryption { - args = append(args, - "--generate-transition-key", - "--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf", - ) + args = append(args, "--generate-transition-key") + + vaultConfigFlag := "--keyring-vault-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf" + if mysqlVersion.Compare(goversion.Must(goversion.NewVersion("8.4.0"))) >= 0 { + vaultConfigFlag = "--component-keyring-config=/etc/mysql/vault-keyring-secret/keyring_vault.conf" + } + args = append(args, vaultConfigFlag) } if cfg != nil && cfg.ContainerOptions != nil && cfg.ContainerOptions.Args != nil { args = append(args, cfg.ContainerOptions.Args.Xtrabackup...) diff --git a/pkg/xtrabackup/api/cmd_test.go b/pkg/xtrabackup/api/cmd_test.go index 0176d536a3..1a0085d487 100644 --- a/pkg/xtrabackup/api/cmd_test.go +++ b/pkg/xtrabackup/api/cmd_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + goversion "github.com/hashicorp/go-version" "github.com/stretchr/testify/assert" ) @@ -47,7 +48,9 @@ func TestNewXtrabackupCmd(t *testing.T) { for i, tc := range testCases { t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) { - cmd := tc.backupConfig.NewXtrabackupCmd(context.Background(), "root", "password123", false) + cmd := tc.backupConfig.NewXtrabackupCmd( + context.Background(), + "root", "password123", goversion.Must(goversion.NewVersion("8.0.0")), false) assert.Equal(t, tc.expectedArgs, cmd.Args) }) } diff --git a/pkg/xtrabackup/job.go b/pkg/xtrabackup/job.go index 0be1bb35e6..0e16ff1855 100644 --- a/pkg/xtrabackup/job.go +++ b/pkg/xtrabackup/job.go @@ -14,7 +14,7 @@ import ( ) func JobSpec( - spec *pxcv1.PXCBackupSpec, + backup *pxcv1.PerconaXtraDBClusterBackup, cluster *pxcv1.PerconaXtraDBCluster, job *batchv1.Job, initImage string, @@ -37,12 +37,12 @@ func JobSpec( MountPath: app.BinVolumeMountPath, }, ) - - storage := cluster.Spec.Backup.Storages[spec.StorageName] + spec := backup.Spec + storage := cluster.Spec.Backup.Storages[backup.Spec.StorageName] var initContainers []corev1.Container initContainers = append(initContainers, statefulset.BackupInitContainer(cluster, initImage, storage.ContainerSecurityContext)) - envs, err := xtrabackupJobEnvVars(storage, primaryPodHost) + envs, err := xtrabackupJobEnvVars(backup, storage, primaryPodHost) if err != nil { return batchv1.JobSpec{}, fmt.Errorf("failed to get xtrabackup job env vars: %w", err) } @@ -94,6 +94,7 @@ func JobSpec( } func xtrabackupJobEnvVars( + backup *pxcv1.PerconaXtraDBClusterBackup, storage *pxcv1.BackupStorageSpec, primaryPodHost string, ) ([]corev1.EnvVar, error) { @@ -110,6 +111,10 @@ func xtrabackupJobEnvVars( Name: "VERIFY_TLS", Value: fmt.Sprintf("%t", ptr.Deref(storage.VerifyTLS, true)), }, + { + Name: "BACKUP_NAME", + Value: backup.Name, + }, } return envs, nil } diff --git a/pkg/xtrabackup/job_test.go b/pkg/xtrabackup/job_test.go index 14a9866dbf..0463088b8f 100644 --- a/pkg/xtrabackup/job_test.go +++ b/pkg/xtrabackup/job_test.go @@ -144,8 +144,10 @@ func TestJobSpec(t *testing.T) { Labels: jobLabels, }, } - - jobSpec, err := JobSpec(spec, cluster, job, initImage, primaryPodHost) + backup := &pxcv1.PerconaXtraDBClusterBackup{ + Spec: *spec, + } + jobSpec, err := JobSpec(backup, cluster, job, initImage, primaryPodHost) assert.NoError(t, err) // Assert JobSpec fields diff --git a/pkg/xtrabackup/server/app.go b/pkg/xtrabackup/server/app.go index d16d3760bb..d39f824fd5 100644 --- a/pkg/xtrabackup/server/app.go +++ b/pkg/xtrabackup/server/app.go @@ -5,6 +5,7 @@ import ( "os" "github.com/go-logr/logr" + goversion "github.com/hashicorp/go-version" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" "github.com/percona/percona-xtradb-cluster-operator/pkg/xtrabackup/api" "github.com/pkg/errors" @@ -25,6 +26,7 @@ type appServer struct { newStorageFunc storage.NewClientFunc deleteBackupFunc func(ctx context.Context, cfg *api.BackupConfig, backupName string) error log logr.Logger + mysqlVersion *goversion.Version tableSpaceEncryptionEnabled bool } @@ -37,6 +39,12 @@ func New() (api.XtrabackupServiceServer, error) { return nil, status.Errorf(codes.InvalidArgument, "POD_NAMESPACE environment variable is not set") } tableSpaceEncryptionEnabled := vaultKeyringFileExists() + + mysqlVer, err := getMySQLVersionFromXtrabackup() + if err != nil { + return nil, errors.Wrap(err, "failed to get MySQL version from XtraBackup") + } + logger := zap.New() return &appServer{ namespace: namespace, @@ -45,6 +53,7 @@ func New() (api.XtrabackupServiceServer, error) { deleteBackupFunc: deleteBackup, log: logger, tableSpaceEncryptionEnabled: tableSpaceEncryptionEnabled, + mysqlVersion: goversion.Must(goversion.NewVersion(mysqlVer)), }, nil } diff --git a/pkg/xtrabackup/server/create.go b/pkg/xtrabackup/server/create.go index 105631619b..fa3c811079 100644 --- a/pkg/xtrabackup/server/create.go +++ b/pkg/xtrabackup/server/create.go @@ -58,7 +58,8 @@ func (s *appServer) CreateBackup(req *api.CreateBackupRequest, stream api.Xtraba g, gCtx := errgroup.WithContext(ctx) - xtrabackup := req.BackupConfig.NewXtrabackupCmd(gCtx, backupUser, backupPass, s.tableSpaceEncryptionEnabled) + xtrabackup := req.BackupConfig.NewXtrabackupCmd( + gCtx, backupUser, backupPass, s.mysqlVersion, s.tableSpaceEncryptionEnabled) xbOut, err := xtrabackup.StdoutPipe() if err != nil { logger.Error(err, "xtrabackup stdout pipe failed") diff --git a/pkg/xtrabackup/server/version.go b/pkg/xtrabackup/server/version.go new file mode 100644 index 0000000000..21617d05a6 --- /dev/null +++ b/pkg/xtrabackup/server/version.go @@ -0,0 +1,34 @@ +package server + +import ( + "os/exec" + "regexp" +) + +func getMySQLVersionFromXtrabackup() (string, error) { + versionOut, err := runXtrabackupVersion() + if err != nil { + return "", err + } + return parseMySQLVersionFromVersionStr(versionOut), nil +} + +func runXtrabackupVersion() (string, error) { + cmd := exec.Command("xtrabackup", "--version") + output, err := cmd.Output() + if err != nil { + return "", err + } + return string(output), nil +} + +func parseMySQLVersionFromVersionStr(versionStr string) string { + // Regex matches "MySQL server X.Y.Z" and captures X.Y.Z + re := regexp.MustCompile(`MySQL server\s+([0-9]+\.[0-9]+\.[0-9]+)`) + matches := re.FindStringSubmatch(versionStr) + + if len(matches) > 1 { + return matches[1] + } + return "" +} diff --git a/pkg/xtrabackup/server/version_test.go b/pkg/xtrabackup/server/version_test.go new file mode 100644 index 0000000000..e7c3833c82 --- /dev/null +++ b/pkg/xtrabackup/server/version_test.go @@ -0,0 +1,25 @@ +package server + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseMySQLVersionFromVersionStr(t *testing.T) { + tests := []struct { + versionStr string + expected string + }{ + {versionStr: "xtrabackup version 8.4.0-12 based on MySQL server 8.4.0 Linux (x86_64) (revision id: c8a25ff9)", expected: "8.4.0"}, + {versionStr: "xtrabackup version 8.0.35-34 based on MySQL server 8.0.35 Linux (x86_64) (revision id: c8a25ff9)", expected: "8.0.35"}, + {versionStr: "xtrabackup version 5.7.40-xy based on MySQL server 5.7.40 Linux (x86_64) (revision id: 25cdf1e)", expected: "5.7.40"}, + } + + for _, tt := range tests { + t.Run(tt.versionStr, func(t *testing.T) { + actual := parseMySQLVersionFromVersionStr(tt.versionStr) + assert.Equal(t, tt.expected, actual) + }) + } +} From f87bc1224250010b0c5cc0a3945294c979826f5b Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Thu, 4 Dec 2025 19:03:16 +0530 Subject: [PATCH 56/77] fix version Signed-off-by: Mayank Shah --- pkg/xtrabackup/server/version.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/xtrabackup/server/version.go b/pkg/xtrabackup/server/version.go index 21617d05a6..2706bf3200 100644 --- a/pkg/xtrabackup/server/version.go +++ b/pkg/xtrabackup/server/version.go @@ -15,7 +15,7 @@ func getMySQLVersionFromXtrabackup() (string, error) { func runXtrabackupVersion() (string, error) { cmd := exec.Command("xtrabackup", "--version") - output, err := cmd.Output() + output, err := cmd.CombinedOutput() if err != nil { return "", err } @@ -23,7 +23,6 @@ func runXtrabackupVersion() (string, error) { } func parseMySQLVersionFromVersionStr(versionStr string) string { - // Regex matches "MySQL server X.Y.Z" and captures X.Y.Z re := regexp.MustCompile(`MySQL server\s+([0-9]+\.[0-9]+\.[0-9]+)`) matches := re.FindStringSubmatch(versionStr) From e16e157fac13206074081dc68529ebccd3a5c8fc Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Thu, 4 Dec 2025 21:04:25 +0530 Subject: [PATCH 57/77] increase sleep Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud/utils/pxb.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e-tests/demand-backup-cloud/utils/pxb.sh b/e2e-tests/demand-backup-cloud/utils/pxb.sh index 14c1c6d21a..1b5ffcff48 100644 --- a/e2e-tests/demand-backup-cloud/utils/pxb.sh +++ b/e2e-tests/demand-backup-cloud/utils/pxb.sh @@ -10,8 +10,8 @@ delete_backup_pod() { backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - # sleep for 10 seconds so that an upload is started - sleep 10 + # sleep for 25 seconds so that an upload is started + sleep 25 echo "Deleting pod/${backup_pod} during backup" kubectl logs -f ${backup_pod} | while IFS= read -r line; do From 881940ae1d2a07b4ced6da23cf7ae788e88663fd Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 13:23:50 +0530 Subject: [PATCH 58/77] fix tests Signed-off-by: Mayank Shah --- ...p-azure-demand-backup-cloud-xtrabackup.yml | 112 +++++++++++++++ ...ckup-s3-demand-backup-cloud-xtrabackup.yml | 117 ++++++++++++++++ ..._xb-on-demand-backup-aws-s3-xtrabackup.yml | 132 ++++++++++++++++++ ...on-demand-backup-azure-blob-xtrabackup.yml | 129 +++++++++++++++++ e2e-tests/demand-backup-cloud/run | 21 +-- e2e-tests/demand-backup-cloud/utils/pxb.sh | 5 +- .../demand-backup-encrypted-with-tls/run | 6 +- e2e-tests/functions | 7 +- e2e-tests/restore-to-encrypted-cluster/run | 18 ++- 9 files changed, 526 insertions(+), 21 deletions(-) create mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml create mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml create mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml create mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml new file mode 100644 index 0000000000..a17de02514 --- /dev/null +++ b/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml @@ -0,0 +1,112 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/restore-job-name: restore-job-on-demand-backup-azure-demand-backup-cloud + name: restore-job-on-demand-backup-azure-demand-backup-cloud + ownerReferences: + - controller: true + kind: PerconaXtraDBClusterRestore + name: on-demand-backup-azure +spec: + backoffLimit: 4 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + selector: + matchLabels: {} + suspend: false + template: + metadata: + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/restore-job-name: restore-job-on-demand-backup-azure-demand-backup-cloud + spec: + containers: + - command: + - /opt/percona/backup/recovery-cloud.sh + env: + - name: PXC_SERVICE + value: demand-backup-cloud-pxc + - name: PXC_USER + value: xtrabackup + - name: PXC_PASS + valueFrom: + secretKeyRef: + key: xtrabackup + name: my-cluster-secrets + - name: VERIFY_TLS + value: "true" + - name: XTRABACKUP_ENABLED + value: "true" + - name: AZURE_STORAGE_ACCOUNT + valueFrom: + secretKeyRef: + key: AZURE_STORAGE_ACCOUNT_NAME + name: azure-secret + - name: AZURE_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AZURE_STORAGE_ACCOUNT_KEY + name: azure-secret + - name: AZURE_ENDPOINT + - name: AZURE_STORAGE_CLASS + - name: XB_USE_MEMORY + value: 100MB + imagePullPolicy: Always + name: xtrabackup + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /datadir + name: datadir + - mountPath: /etc/mysql/vault-keyring-secret + name: vault-keyring-secret + - mountPath: /opt/percona + name: bin + dnsPolicy: ClusterFirst + initContainers: + - command: + - /backup-init-entrypoint.sh + imagePullPolicy: Always + name: backup-init + resources: + limits: + cpu: 50m + memory: 50M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + restartPolicy: Never + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + supplementalGroups: + - 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir-demand-backup-cloud-pxc-0 + - name: vault-keyring-secret + secret: + defaultMode: 420 + optional: true + secretName: some-name-vault + - emptyDir: {} + name: bin diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml new file mode 100644 index 0000000000..f8490d705f --- /dev/null +++ b/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml @@ -0,0 +1,117 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/restore-job-name: restore-job-on-demand-backup-s3-demand-backup-cloud + name: restore-job-on-demand-backup-s3-demand-backup-cloud + ownerReferences: + - controller: true + kind: PerconaXtraDBClusterRestore + name: on-demand-backup-s3 +spec: + backoffLimit: 4 + completionMode: NonIndexed + completions: 1 + manualSelector: false + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + selector: + matchLabels: {} + suspend: false + template: + metadata: + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/restore-job-name: restore-job-on-demand-backup-s3-demand-backup-cloud + spec: + containers: + - command: + - /opt/percona/backup/recovery-cloud.sh + env: + - name: PXC_SERVICE + value: demand-backup-cloud-pxc + - name: PXC_USER + value: xtrabackup + - name: PXC_PASS + valueFrom: + secretKeyRef: + key: xtrabackup + name: my-cluster-secrets + - name: VERIFY_TLS + value: "true" + - name: XTRABACKUP_ENABLED + value: "true" + - name: ENDPOINT + - name: DEFAULT_REGION + value: us-east-1 + - name: ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: aws-s3-secret + - name: SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: aws-s3-secret + - name: XBCLOUD_EXTRA_ARGS + value: --parallel=2 + - name: XBSTREAM_EXTRA_ARGS + value: --parallel=2 + - name: XB_USE_MEMORY + value: 100MB + imagePullPolicy: Always + name: xtrabackup + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /datadir + name: datadir + - mountPath: /etc/mysql/vault-keyring-secret + name: vault-keyring-secret + - mountPath: /opt/percona + name: bin + dnsPolicy: ClusterFirst + initContainers: + - command: + - /backup-init-entrypoint.sh + imagePullPolicy: Always + name: backup-init + resources: + limits: + cpu: 50m + memory: 50M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + restartPolicy: Never + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + supplementalGroups: + - 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir-demand-backup-cloud-pxc-0 + - name: vault-keyring-secret + secret: + defaultMode: 420 + optional: true + secretName: some-name-vault + - emptyDir: {} + name: bin diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml new file mode 100644 index 0000000000..287be89d04 --- /dev/null +++ b/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml @@ -0,0 +1,132 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/backup-job-name: xb-on-demand-backup-aws-s3 + percona.com/backup-name: on-demand-backup-aws-s3 + percona.com/backup-type: xtrabackup + percona.com/cluster: demand-backup-cloud + name: xb-on-demand-backup-aws-s3 + ownerReferences: + - controller: true + kind: PerconaXtraDBClusterBackup + name: on-demand-backup-aws-s3 +spec: + activeDeadlineSeconds: 7200 + backoffLimit: 6 + completionMode: NonIndexed + completions: 1 + manualSelector: true + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + selector: + matchLabels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/backup-job-name: xb-on-demand-backup-aws-s3 + percona.com/backup-name: on-demand-backup-aws-s3 + percona.com/backup-type: xtrabackup + percona.com/cluster: demand-backup-cloud + suspend: false + template: + metadata: + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/backup-job-name: xb-on-demand-backup-aws-s3 + percona.com/backup-name: on-demand-backup-aws-s3 + percona.com/backup-type: xtrabackup + percona.com/cluster: demand-backup-cloud + spec: + containers: + - command: + - /opt/percona/xtrabackup-run-backup + env: + - name: HOST + value: demand-backup-cloud-pxc-0.demand-backup-cloud-pxc.namespace.svc.cluster.local + - name: STORAGE_TYPE + value: s3 + - name: VERIFY_TLS + value: "true" + - name: BACKUP_NAME + value: on-demand-backup-aws-s3 + - name: ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: aws-s3-secret + - name: SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: aws-s3-secret + - name: DEFAULT_REGION + value: us-east-1 + - name: ENDPOINT + - name: S3_BUCKET + value: operator-testing + - name: BACKUP_DEST + value: demand-backup-cloud-2025-12-04-15:51:44-full + imagePullPolicy: Always + name: xtrabackup + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mysql/ssl + name: ssl + - mountPath: /etc/mysql/ssl-internal + name: ssl-internal + - mountPath: /etc/mysql/vault-keyring-secret + name: vault-keyring-secret + dnsPolicy: ClusterFirst + initContainers: + - command: + - /backup-init-entrypoint.sh + imagePullPolicy: Always + name: backup-init + resources: + limits: + cpu: 50m + memory: 50M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + restartPolicy: Never + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + supplementalGroups: + - 1001 + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: bin + - name: ssl + secret: + defaultMode: 420 + optional: true + secretName: demand-backup-cloud-ssl + - name: ssl-internal + secret: + defaultMode: 420 + optional: true + secretName: demand-backup-cloud-ssl-internal + - name: vault-keyring-secret + secret: + defaultMode: 420 + optional: true + secretName: some-name-vault diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml new file mode 100644 index 0000000000..a738169e12 --- /dev/null +++ b/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml @@ -0,0 +1,129 @@ +apiVersion: batch/v1 +kind: Job +metadata: + generation: 1 + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/backup-job-name: xb-on-demand-backup-azure-blob + percona.com/backup-name: on-demand-backup-azure-blob + percona.com/backup-type: xtrabackup + percona.com/cluster: demand-backup-cloud + name: xb-on-demand-backup-azure-blob + ownerReferences: + - controller: true + kind: PerconaXtraDBClusterBackup + name: on-demand-backup-azure-blob +spec: + backoffLimit: 6 + completionMode: NonIndexed + completions: 1 + manualSelector: true + parallelism: 1 + podReplacementPolicy: TerminatingOrFailed + selector: + matchLabels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/backup-job-name: xb-on-demand-backup-azure-blob + percona.com/backup-name: on-demand-backup-azure-blob + percona.com/backup-type: xtrabackup + percona.com/cluster: demand-backup-cloud + suspend: false + template: + metadata: + labels: + app.kubernetes.io/instance: demand-backup-cloud + app.kubernetes.io/managed-by: percona-xtradb-cluster-operator + app.kubernetes.io/name: percona-xtradb-cluster + app.kubernetes.io/part-of: percona-xtradb-cluster + percona.com/backup-job-name: xb-on-demand-backup-azure-blob + percona.com/backup-name: on-demand-backup-azure-blob + percona.com/backup-type: xtrabackup + percona.com/cluster: demand-backup-cloud + spec: + containers: + - command: + - /opt/percona/xtrabackup-run-backup + env: + - name: HOST + value: demand-backup-cloud-pxc-0.demand-backup-cloud-pxc.namespace.svc.cluster.local + - name: STORAGE_TYPE + value: azure + - name: VERIFY_TLS + value: "true" + - name: BACKUP_NAME + value: on-demand-backup-azure-blob + - name: AZURE_STORAGE_ACCOUNT + valueFrom: + secretKeyRef: + key: AZURE_STORAGE_ACCOUNT_NAME + name: azure-secret + - name: AZURE_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AZURE_STORAGE_ACCOUNT_KEY + name: azure-secret + - name: AZURE_ENDPOINT + - name: AZURE_STORAGE_CLASS + value: Cool + - name: BACKUP_DEST + value: demand-backup-cloud-2025-12-04-16:25:39-full + imagePullPolicy: Always + name: xtrabackup + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mysql/ssl + name: ssl + - mountPath: /etc/mysql/ssl-internal + name: ssl-internal + - mountPath: /etc/mysql/vault-keyring-secret + name: vault-keyring-secret + dnsPolicy: ClusterFirst + initContainers: + - command: + - /backup-init-entrypoint.sh + imagePullPolicy: Always + name: backup-init + resources: + limits: + cpu: 50m + memory: 50M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + restartPolicy: Never + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + supplementalGroups: + - 1001 + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: bin + - name: ssl + secret: + defaultMode: 420 + optional: true + secretName: demand-backup-cloud-ssl + - name: ssl-internal + secret: + defaultMode: 420 + optional: true + secretName: demand-backup-cloud-ssl-internal + - name: vault-keyring-secret + secret: + defaultMode: 420 + optional: true + secretName: some-name-vault diff --git a/e2e-tests/demand-backup-cloud/run b/e2e-tests/demand-backup-cloud/run index e09b5c39c1..e02e75b0b4 100755 --- a/e2e-tests/demand-backup-cloud/run +++ b/e2e-tests/demand-backup-cloud/run @@ -156,10 +156,15 @@ main() { desc 'Run recovery from s3 source without s3:// prefix in destination' run_recovery_from_source 's3' "${backup_name_aws}" '' 'remove_prefix_from_destination' + postfix="" + if is_feature_gate_enabled "BackupXtrabackup"; then + postfix="-xtrabackup" + fi + desc 'Run recovery from s3 source without bucket option' run_recovery_from_source 's3' "${backup_name_aws}" 'remove_bucket_name' - compare_kubectl job.batch/xb-"${backup_name_aws}" - compare_kubectl job.batch/restore-job-on-demand-backup-s3-demand-backup-cloud + compare_kubectl job.batch/xb-"${backup_name_aws}" "${postfix}" + compare_kubectl job.batch/restore-job-on-demand-backup-s3-demand-backup-cloud "${postfix}" desc "Run backup ${backup_name_gcp} for $cluster cluster" run_backup_with_delete "${backup_name_gcp}" @@ -181,8 +186,8 @@ main() { desc 'Run recovery from azure source without container option' run_recovery_from_source 'azure' "${backup_name_azure}" 'remove_container_name' - compare_kubectl job.batch/xb-"${backup_name_azure}" - compare_kubectl job.batch/restore-job-on-demand-backup-azure-demand-backup-cloud + compare_kubectl job.batch/xb-"${backup_name_azure}" "${postfix}" + compare_kubectl job.batch/restore-job-on-demand-backup-azure-demand-backup-cloud "${postfix}" backup_dest_aws=$(kubectl_bin get pxc-backup "$backup_name_aws" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) backup_dest_gcp=$(kubectl_bin get pxc-backup "$backup_name_gcp" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) @@ -190,15 +195,15 @@ main() { check_backup_file_name='.sst_info/sst_info.00000000000000000000' if is_feature_gate_enabled "BackupXtrabackup"; then - check_backup_file_name='xtrabackup_binlog_info.00000000000000000000' + check_backup_file_name='/xtrabackup_binlog_info.00000000000000000000' fi - desc "Check backup existence" + # desc "Check backup existence" setup_aws_credentials check_backup_existence_aws "$backup_dest_aws" "$check_backup_file_name" setup_gcs_credentials - check_backup_existence_gcs "${backup_dest_gcp}" + check_backup_existence_gcs "${backup_dest_gcp}" "$check_backup_file_name" setup_azure_credentials check_backup_existence_azure "${backup_dest_azure}" "$check_backup_file_name" @@ -207,7 +212,7 @@ main() { desc "Check backup deletion" check_backup_deletion_aws "$backup_dest_aws" "$check_backup_file_name" - check_backup_deletion_gcs "${backup_dest_gcp}" + check_backup_deletion_gcs "${backup_dest_gcp}" "$check_backup_file_name" check_backup_deletion_azure "${backup_dest_azure}" "$check_backup_file_name" if [ "$EKS" = 1 ]; then diff --git a/e2e-tests/demand-backup-cloud/utils/pxb.sh b/e2e-tests/demand-backup-cloud/utils/pxb.sh index 1b5ffcff48..27c2e5e505 100644 --- a/e2e-tests/demand-backup-cloud/utils/pxb.sh +++ b/e2e-tests/demand-backup-cloud/utils/pxb.sh @@ -39,13 +39,12 @@ check_cloud_storage_cleanup() { fi local pxc_pod="${cluster_name}-pxc-0" - logs_output=$(kubectl_bin logs ${pxc_pod} -c xtrabackup 2>&1) if kubectl_bin logs ${pxc_pod} -c xtrabackup | grep 'Deleting Backup'; then + echo "Cleanup was performed." + else echo "Something went wrong. Delete was not performed." kubectl_bin logs ${pxc_pod} -c xtrabackup exit 1 - else - echo "Cleanup was performed." fi } \ No newline at end of file diff --git a/e2e-tests/demand-backup-encrypted-with-tls/run b/e2e-tests/demand-backup-encrypted-with-tls/run index 1444622314..49faab0446 100755 --- a/e2e-tests/demand-backup-encrypted-with-tls/run +++ b/e2e-tests/demand-backup-encrypted-with-tls/run @@ -27,7 +27,7 @@ main() { table_must_be_encrypted "$cluster" "myApp" # todo: add support for pvc - if [[ ! "$PXCO_FEATURE_GATES" == *"BackupXtrabackup=true"* ]]; then + if !is_feature_gate_enabled "BackupXtrabackup"; then run_backup "$cluster" "on-demand-backup-pvc" run_recovery_check "$cluster" "on-demand-backup-pvc" kubectl_bin delete -f "$test_dir/conf/restore-on-demand-backup-pvc.yaml" @@ -45,7 +45,7 @@ main() { fi # in xtrabackup mode, we do not store our own transition keys, so this test is not applicable - if [[ ! "$PXCO_FEATURE_GATES" == *"BackupXtrabackup=true"* ]]; then + if !is_feature_gate_enabled "BackupXtrabackup"; then mountpt=$(kubectl_bin get -f "$conf_dir/vault-secret.yaml" -o json | grep -E -o "secret_mount_point = \w+" | awk -F "=[ ]*" '{print $2}') transition_keys=$(kubectl_bin exec --namespace="$vault1" -it $vault1-0 -- sh -c " VAULT_TOKEN=$token1 vault kv list -format=json $mountpt/backup/" \ @@ -65,7 +65,7 @@ old_token = '$token1' new_url = '$ip2:8200' new_token = '$token2' secrets = [ $transition_keys ] - \" > /src/config.py +\" > /src/config.py python3 /src/vault-cp.py " diff --git a/e2e-tests/functions b/e2e-tests/functions index c9ff7b1d1d..c311fbabaf 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -504,6 +504,7 @@ compare_kubectl() { del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | + del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | @@ -1828,8 +1829,9 @@ function check_backup_existence_gcs() { backup_dest_gcp=$1 storage_name="gcp-cs" retry=0 + key=${2:-'.sst_info/sst_info.00000000000000000000'} - gcs_path="gs://${backup_dest_gcp}.sst_info/sst_info.00000000000000000000" + gcs_path="gs://${backup_dest_gcp}${key}" until gsutil ls "$gcs_path" >/dev/null 2>&1; do if [ $retry -ge 30 ]; then @@ -1892,7 +1894,8 @@ function check_backup_deletion_gcs() { backup_dest_gcp=$1 storage_name="gcp-cs" retry=0 - gcs_path="gs://${backup_dest_gcp}.sst_info/sst_info.00000000000000000000" + key=${2:-'.sst_info/sst_info.00000000000000000000'} + gcs_path="gs://${backup_dest_gcp}${key}" while gsutil ls "$gcs_path" >/dev/null 2>&1; do if [ $retry -ge 15 ]; then diff --git a/e2e-tests/restore-to-encrypted-cluster/run b/e2e-tests/restore-to-encrypted-cluster/run index 32d0db88c6..f0a4407f4d 100755 --- a/e2e-tests/restore-to-encrypted-cluster/run +++ b/e2e-tests/restore-to-encrypted-cluster/run @@ -15,17 +15,25 @@ main() { keyring_plugin_must_not_be_in_use "$cluster" table_must_not_be_encrypted "$cluster" "myApp" - run_backup "$cluster" "on-demand-backup-pvc" + # todo: add support for pvc + if !is_feature_gate_enabled "BackupXtrabackup"; then + run_backup "$cluster" "on-demand-backup-pvc" + fi + if [ -z "$SKIP_REMOTE_BACKUPS" ]; then run_backup "$cluster" "on-demand-backup-aws-s3" fi vault1="vault-service-1-${RANDOM}" start_vault $vault1 - run_recovery_check "$cluster" "on-demand-backup-pvc" - check_pvc_md5 "on-demand-backup-pvc" - keyring_plugin_must_be_in_use "$cluster" - table_must_not_be_encrypted "$cluster" "myApp" + + # todo: add support for pvc + if !is_feature_gate_enabled "BackupXtrabackup"; then + run_recovery_check "$cluster" "on-demand-backup-pvc" + check_pvc_md5 "on-demand-backup-pvc" + keyring_plugin_must_be_in_use "$cluster" + table_must_not_be_encrypted "$cluster" "myApp" + fi if [ -z "$SKIP_REMOTE_BACKUPS" ]; then run_recovery_check "$cluster" "on-demand-backup-aws-s3" From 9f8d1abddda247f010ef05463bde937bba488485 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 13:38:03 +0530 Subject: [PATCH 59/77] try fix testname Signed-off-by: Mayank Shah --- e2e-tests/functions | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/e2e-tests/functions b/e2e-tests/functions index c311fbabaf..91075013bf 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -29,6 +29,15 @@ if oc get projects 2>/dev/null; then OPENSHIFT=$(oc version -o json | jq -r '.openshiftVersion' | grep -oE '^[0-9]+\.[0-9]+') fi +is_feature_gate_enabled() { + local feature_gate=$1 + if [[ "$PXCO_FEATURE_GATES" == *"$feature_gate=true"* ]]; then + return 0 + else + return 1 + fi +} + add_docker_reg() { local var=$1 @@ -60,6 +69,9 @@ sed=$(which gsed || which sed) date=$(which gdate || which date) test_name=$(basename $test_dir) +if is_feature_gate_enabled "BackupXtrabackup"; then + test_name="${test_name}-xtrabackup" +fi namespace="${test_name}-${RANDOM}" replica_namespace="${test_name}-replica-${RANDOM}" conf_dir=$(realpath $test_dir/../conf || :) @@ -2093,12 +2105,3 @@ get_pvc_name_for_backup() { echo "$pvc_name" } - -is_feature_gate_enabled() { - local feature_gate=$1 - if [[ "$PXCO_FEATURE_GATES" == *"$feature_gate=true"* ]]; then - return 0 - else - return 1 - fi -} \ No newline at end of file From d90861443865cb4ebccc8d1dfad044b63f3309bc Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 14:08:54 +0530 Subject: [PATCH 60/77] fix storage tests Signed-off-by: Mayank Shah --- .../compare/statefulset_hostpath-proxysql-k127-oc.yml | 8 ++++---- .../compare/statefulset_hostpath-proxysql-k127.yml | 8 ++++---- .../storage/compare/statefulset_hostpath-proxysql-oc.yml | 8 ++++---- .../storage/compare/statefulset_hostpath-proxysql.yml | 8 ++++---- .../storage/compare/statefulset_hostpath-pxc-k127-oc.yml | 4 ++-- .../storage/compare/statefulset_hostpath-pxc-k127.yml | 4 ++-- e2e-tests/storage/compare/statefulset_hostpath-pxc-oc.yml | 4 ++-- e2e-tests/storage/compare/statefulset_hostpath-pxc.yml | 4 ++-- e2e-tests/storage/conf/hostpath.yml | 5 +++++ 9 files changed, 29 insertions(+), 24 deletions(-) diff --git a/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-oc.yml b/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-oc.yml index 7f108ff571..d6810e79e1 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-oc.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127-oc.yml @@ -181,8 +181,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi securityContext: privileged: false terminationMessagePath: /dev/termination-log @@ -196,8 +196,8 @@ spec: name: proxysql-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: diff --git a/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127.yml b/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127.yml index 06bdc6db99..991a4453a2 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-proxysql-k127.yml @@ -199,8 +199,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi securityContext: privileged: false terminationMessagePath: /dev/termination-log @@ -214,8 +214,8 @@ spec: name: proxysql-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: diff --git a/e2e-tests/storage/compare/statefulset_hostpath-proxysql-oc.yml b/e2e-tests/storage/compare/statefulset_hostpath-proxysql-oc.yml index ba76585322..df020af340 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-proxysql-oc.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-proxysql-oc.yml @@ -170,8 +170,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi securityContext: privileged: false terminationMessagePath: /dev/termination-log @@ -185,8 +185,8 @@ spec: name: proxysql-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: diff --git a/e2e-tests/storage/compare/statefulset_hostpath-proxysql.yml b/e2e-tests/storage/compare/statefulset_hostpath-proxysql.yml index 47cd3da662..2d08631f7d 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-proxysql.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-proxysql.yml @@ -170,8 +170,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi securityContext: privileged: false terminationMessagePath: /dev/termination-log @@ -185,8 +185,8 @@ spec: name: proxysql-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: diff --git a/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-oc.yml b/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-oc.yml index 055265f08e..ae931dca55 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-oc.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127-oc.yml @@ -155,8 +155,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: diff --git a/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127.yml b/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127.yml index d02f22dd0a..6e0ade2420 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-pxc-k127.yml @@ -157,8 +157,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi securityContext: privileged: false terminationMessagePath: /dev/termination-log diff --git a/e2e-tests/storage/compare/statefulset_hostpath-pxc-oc.yml b/e2e-tests/storage/compare/statefulset_hostpath-pxc-oc.yml index 34bb601034..aaf37ab26a 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-pxc-oc.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-pxc-oc.yml @@ -152,8 +152,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: diff --git a/e2e-tests/storage/compare/statefulset_hostpath-pxc.yml b/e2e-tests/storage/compare/statefulset_hostpath-pxc.yml index 78999c69ed..984861c53c 100644 --- a/e2e-tests/storage/compare/statefulset_hostpath-pxc.yml +++ b/e2e-tests/storage/compare/statefulset_hostpath-pxc.yml @@ -154,8 +154,8 @@ spec: name: pxc-init resources: limits: - cpu: 50m - memory: 50M + cpu: 100m + memory: 100Mi securityContext: privileged: false terminationMessagePath: /dev/termination-log diff --git a/e2e-tests/storage/conf/hostpath.yml b/e2e-tests/storage/conf/hostpath.yml index 4e638d9e65..55c45c37c6 100644 --- a/e2e-tests/storage/conf/hostpath.yml +++ b/e2e-tests/storage/conf/hostpath.yml @@ -3,6 +3,11 @@ kind: PerconaXtraDBCluster metadata: name: hostpath spec: + initContainer: + resources: + limits: + cpu: 100m + memory: 100Mi secretsName: my-cluster-secrets sslSecretName: some-name-ssl pxc: From f57ecc1748badc2d3fff41b42cb13afdb93585c9 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 14:35:45 +0530 Subject: [PATCH 61/77] go mod tidy Signed-off-by: Mayank Shah --- go.sum | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/go.sum b/go.sum index 8bedc22e72..49fcd1d5fe 100644 --- a/go.sum +++ b/go.sum @@ -145,8 +145,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= @@ -155,8 +155,6 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= -github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= From 1a25b90ebd595798c2cf4ad283324a51aa866585 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:03:20 +0530 Subject: [PATCH 62/77] add request sanitation Signed-off-by: Mayank Shah --- cmd/xtrabackup/run-backup/main.go | 36 ++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/cmd/xtrabackup/run-backup/main.go b/cmd/xtrabackup/run-backup/main.go index 10c762b6db..c039299050 100644 --- a/cmd/xtrabackup/run-backup/main.go +++ b/cmd/xtrabackup/run-backup/main.go @@ -116,11 +116,11 @@ func getRequestObject() *xbscapi.CreateBackupRequest { log.Fatalf("Invalid storage type: %s", storageType) } - reqJson, err := json.Marshal(req) + reqSanitized, err := sanitizeRequest(req) if err != nil { - log.Fatalf("Failed to marshal request: %v", err) + log.Fatalf("Failed to sanitize request: %v", err) } - log.Printf("Request=%s", string(reqJson)) + log.Printf("Request=%s", reqSanitized) return req } @@ -144,3 +144,33 @@ func setAzureConfig(req *xbscapi.CreateBackupRequest) { AccessKey: os.Getenv("AZURE_ACCESS_KEY"), } } + +func sanitizeRequest(req *xbscapi.CreateBackupRequest) (string, error) { + // Create a deep copy to avoid modifying the original request + reqBytes, err := json.Marshal(req) + if err != nil { + return "", err + } + + var reqCopy xbscapi.CreateBackupRequest + if err := json.Unmarshal(reqBytes, &reqCopy); err != nil { + return "", err + } + + // Sanitize the copy + if reqCopy.BackupConfig != nil { + if reqCopy.BackupConfig.S3 != nil { + reqCopy.BackupConfig.S3.SecretKey = "********" + reqCopy.BackupConfig.S3.AccessKey = "********" + } + if reqCopy.BackupConfig.Azure != nil { + reqCopy.BackupConfig.Azure.AccessKey = "********" + } + } + + js, err := json.Marshal(&reqCopy) + if err != nil { + return "", err + } + return string(js), nil +} From 43cd482a238b4b3468d57e806a0a14f6560d9e7e Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:27:48 +0530 Subject: [PATCH 63/77] update e2e tests Signed-off-by: Mayank Shah --- .../compare/operator.log | 0 .../compare/select-1.sql | 1 + .../compare/select-2.sql | 2 + .../conf/demand-backup-cloud.yml | 116 +++++++++ .../conf/on-demand-backup-aws-s3-iam.yml | 8 + .../conf/on-demand-backup-aws-s3.yml | 10 + .../conf/on-demand-backup-azure-blob.yml | 9 + .../conf/on-demand-backup-gcp-cs.yml | 9 + .../conf/restore-from-source-backup.json | 10 + .../conf/restore-on-demand-backup-aws-s3.yaml | 12 + .../restore-on-demand-backup-azure-blob.yaml | 7 + .../conf/restore-on-demand-backup-gcp-cs.yaml | 7 + e2e-tests/demand-backup-cloud-pxb/run | 237 ++++++++++++++++++ e2e-tests/demand-backup-cloud-xtrabackup/run | 6 - ...p-azure-demand-backup-cloud-xtrabackup.yml | 112 --------- ...ckup-s3-demand-backup-cloud-xtrabackup.yml | 117 --------- ..._xb-on-demand-backup-aws-s3-xtrabackup.yml | 132 ---------- ...on-demand-backup-azure-blob-xtrabackup.yml | 129 ---------- e2e-tests/demand-backup-cloud/run | 95 ++++--- e2e-tests/demand-backup-cloud/utils/pxb.sh | 50 ---- e2e-tests/demand-backup-cloud/utils/sst.sh | 51 ---- .../compare/operator.log | 0 .../compare/select-1.sql | 1 + .../compare/select-2.sql | 2 + .../conf/client.yml | 26 ++ .../conf/on-demand-backup-aws-s3.yml | 7 + .../conf/on-demand-backup-pvc.yml | 7 + .../conf/restore-on-demand-backup-aws-s3.yaml | 7 + .../conf/restore-on-demand-backup-pvc.yaml | 7 + .../demand-backup-encrypted-with-tls-pxb/run | 63 +++++ .../run | 6 - .../demand-backup-encrypted-with-tls/run | 62 +++-- e2e-tests/functions | 3 - .../compare/secret_pitr-mysql-init.yml | 7 + e2e-tests/pitr-pxb/compare/select-1.sql | 1 + e2e-tests/pitr-pxb/compare/select-2.sql | 3 + e2e-tests/pitr-pxb/compare/select-3.sql | 3 + e2e-tests/pitr-pxb/compare/select-4.sql | 6 + e2e-tests/pitr-pxb/conf/cert.yml | 9 + e2e-tests/pitr-pxb/conf/issuer.yml | 6 + e2e-tests/pitr-pxb/conf/on-pitr-minio.yml | 7 + e2e-tests/pitr-pxb/conf/pitr.yml | 83 ++++++ .../conf/restore-on-pitr-minio-gtid.yaml | 13 + .../conf/restore-on-pitr-minio-time.yaml | 13 + .../pitr-pxb/conf/restore-on-pitr-minio.yaml | 24 ++ e2e-tests/pitr-pxb/run | 177 +++++++++++++ e2e-tests/pitr-xtrabackup/run | 6 - .../compare/operator.log | 0 .../compare/select-1.sql | 1 + .../compare/select-2.sql | 2 + .../conf/client.yml | 26 ++ .../conf/on-demand-backup-aws-s3.yml | 7 + .../conf/on-demand-backup-pvc.yml | 7 + .../conf/restore-on-demand-backup-aws-s3.yaml | 7 + .../conf/restore-on-demand-backup-pvc.yaml | 7 + .../restore-to-encrypted-cluster-pxb/run | 38 +++ .../run | 6 - e2e-tests/restore-to-encrypted-cluster/run | 18 +- e2e-tests/run-pr.csv | 8 +- 59 files changed, 1095 insertions(+), 701 deletions(-) create mode 100644 e2e-tests/demand-backup-cloud-pxb/compare/operator.log create mode 100644 e2e-tests/demand-backup-cloud-pxb/compare/select-1.sql create mode 100644 e2e-tests/demand-backup-cloud-pxb/compare/select-2.sql create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/demand-backup-cloud.yml create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3-iam.yml create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3.yml create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-azure-blob.yml create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-gcp-cs.yml create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/restore-from-source-backup.json create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-aws-s3.yaml create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-azure-blob.yaml create mode 100644 e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-gcp-cs.yaml create mode 100755 e2e-tests/demand-backup-cloud-pxb/run delete mode 100755 e2e-tests/demand-backup-cloud-xtrabackup/run delete mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml delete mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml delete mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml delete mode 100644 e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml delete mode 100644 e2e-tests/demand-backup-cloud/utils/pxb.sh delete mode 100644 e2e-tests/demand-backup-cloud/utils/sst.sh create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/operator.log create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-1.sql create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-2.sql create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/client.yml create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-aws-s3.yml create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-pvc.yml create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-aws-s3.yaml create mode 100644 e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-pvc.yaml create mode 100755 e2e-tests/demand-backup-encrypted-with-tls-pxb/run delete mode 100755 e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run create mode 100644 e2e-tests/pitr-pxb/compare/secret_pitr-mysql-init.yml create mode 100644 e2e-tests/pitr-pxb/compare/select-1.sql create mode 100644 e2e-tests/pitr-pxb/compare/select-2.sql create mode 100644 e2e-tests/pitr-pxb/compare/select-3.sql create mode 100644 e2e-tests/pitr-pxb/compare/select-4.sql create mode 100644 e2e-tests/pitr-pxb/conf/cert.yml create mode 100644 e2e-tests/pitr-pxb/conf/issuer.yml create mode 100755 e2e-tests/pitr-pxb/conf/on-pitr-minio.yml create mode 100755 e2e-tests/pitr-pxb/conf/pitr.yml create mode 100755 e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-gtid.yaml create mode 100755 e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-time.yaml create mode 100755 e2e-tests/pitr-pxb/conf/restore-on-pitr-minio.yaml create mode 100755 e2e-tests/pitr-pxb/run delete mode 100755 e2e-tests/pitr-xtrabackup/run create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/compare/operator.log create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-1.sql create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-2.sql create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/conf/client.yml create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-aws-s3.yml create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-pvc.yml create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-aws-s3.yaml create mode 100644 e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-pvc.yaml create mode 100755 e2e-tests/restore-to-encrypted-cluster-pxb/run delete mode 100755 e2e-tests/restore-to-encrypted-cluster-xtrabackup/run diff --git a/e2e-tests/demand-backup-cloud-pxb/compare/operator.log b/e2e-tests/demand-backup-cloud-pxb/compare/operator.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/e2e-tests/demand-backup-cloud-pxb/compare/select-1.sql b/e2e-tests/demand-backup-cloud-pxb/compare/select-1.sql new file mode 100644 index 0000000000..8e738f4cf2 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/compare/select-1.sql @@ -0,0 +1 @@ +100500 diff --git a/e2e-tests/demand-backup-cloud-pxb/compare/select-2.sql b/e2e-tests/demand-backup-cloud-pxb/compare/select-2.sql new file mode 100644 index 0000000000..88cf282b62 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/compare/select-2.sql @@ -0,0 +1,2 @@ +100500 +100501 diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/demand-backup-cloud.yml b/e2e-tests/demand-backup-cloud-pxb/conf/demand-backup-cloud.yml new file mode 100644 index 0000000000..9e7ebf7d1a --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/demand-backup-cloud.yml @@ -0,0 +1,116 @@ +apiVersion: pxc.percona.com/v1-6-0 +kind: PerconaXtraDBCluster +metadata: + name: demand-backup-cloud + finalizers: + - percona.com/delete-pxc-pods-in-order + # annotations: + # percona.com/issue-vault-token: "true" +spec: + secretsName: my-cluster-secrets + vaultSecretName: some-name-vault + pause: false + pxc: + size: 3 + image: -pxc + configuration: | + [mysqld] + wsrep_log_conflicts + log_error_verbosity=3 + wsrep_debug=1 + [sst] + xbstream-opts=--decompress + [xtrabackup] + compress=lz4 + resources: + requests: + memory: 0.1G + cpu: 100m + limits: + memory: "2G" + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + proxysql: + enabled: true + size: 2 + image: -proxysql + resources: + requests: + memory: 0.1G + cpu: 100m + limits: + memory: 1G + cpu: 700m + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + pmm: + enabled: false + image: perconalab/pmm-client:1.17.1 + serverHost: monitoring-service + serverUser: pmm + backup: + activeDeadlineSeconds: 3600 + image: -backup + storages: + pvc: + type: filesystem + volume: + persistentVolumeClaim: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + aws-s3: + type: s3 + s3: + region: us-east-1 + bucket: operator-testing + credentialsSecret: aws-s3-secret + containerOptions: + args: + xbstream: + - '--parallel=2' + xbcloud: + - '--parallel=2' + aws-s3-iam: + type: s3 + s3: + region: us-east-1 + bucket: operator-testing + containerOptions: + args: + xbstream: + - '--parallel=2' + xbcloud: + - '--parallel=2' + minio: + type: s3 + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: http://minio-service.#namespace:9000/ + gcp-cs: + type: s3 + s3: + credentialsSecret: gcp-cs-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: https://storage.googleapis.com + azure-blob: + type: azure + azure: + credentialsSecret: azure-secret + container: operator-testing + storageClass: Cool diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3-iam.yml b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3-iam.yml new file mode 100644 index 0000000000..9d48bea983 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3-iam.yml @@ -0,0 +1,8 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-aws-s3-iam +spec: + pxcCluster: demand-backup-cloud + storageName: aws-s3-iam + activeDeadlineSeconds: 7200 diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3.yml b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3.yml new file mode 100644 index 0000000000..b640340d20 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-aws-s3.yml @@ -0,0 +1,10 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-aws-s3 + finalizers: + - percona.com/delete-backup +spec: + pxcCluster: demand-backup-cloud + storageName: aws-s3 + activeDeadlineSeconds: 7200 diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-azure-blob.yml b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-azure-blob.yml new file mode 100644 index 0000000000..f6563139a7 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-azure-blob.yml @@ -0,0 +1,9 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-azure-blob + finalizers: + - percona.com/delete-backup +spec: + pxcCluster: demand-backup-cloud + storageName: azure-blob diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-gcp-cs.yml b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-gcp-cs.yml new file mode 100644 index 0000000000..d31b4317b0 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/on-demand-backup-gcp-cs.yml @@ -0,0 +1,9 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-gcp-cs + finalizers: + - percona.com/delete-backup +spec: + pxcCluster: demand-backup-cloud + storageName: gcp-cs diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/restore-from-source-backup.json b/e2e-tests/demand-backup-cloud-pxb/conf/restore-from-source-backup.json new file mode 100644 index 0000000000..7a2e4bd77c --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/restore-from-source-backup.json @@ -0,0 +1,10 @@ +{ + "apiVersion": "pxc.percona.com/v1", + "kind": "PerconaXtraDBClusterRestore", + "metadata": { + "name": "on-demand-backup-aws-s3-2" + }, + "spec": { + "pxcCluster": "demand-backup-cloud" + } +} diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-aws-s3.yaml b/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-aws-s3.yaml new file mode 100644 index 0000000000..599b0518a2 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-aws-s3.yaml @@ -0,0 +1,12 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: on-demand-backup-aws-s3 +spec: + pxcCluster: demand-backup-cloud + backupName: on-demand-backup-aws-s3 + containerOptions: + args: + xtrabackup: + - "--innodb-undo-directory=./undo" + - "--defaults-file=backup-my.cnf" diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-azure-blob.yaml b/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-azure-blob.yaml new file mode 100644 index 0000000000..07dd65b627 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-azure-blob.yaml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: on-demand-backup-azure-blob +spec: + pxcCluster: demand-backup-cloud + backupName: on-demand-backup-azure-blob diff --git a/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-gcp-cs.yaml b/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-gcp-cs.yaml new file mode 100644 index 0000000000..f65f28edbe --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/conf/restore-on-demand-backup-gcp-cs.yaml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: on-demand-backup-gcp-cs +spec: + pxcCluster: demand-backup-cloud + backupName: on-demand-backup-gcp-cs diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run new file mode 100755 index 0000000000..7dfc3fcea1 --- /dev/null +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -0,0 +1,237 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions + +set_debug + +get_container_options() { + backup_name=$1 + cluster_name=$2 + + backup_options=$(kubectl_bin get pxc-backup "$backup_name" -o jsonpath='{.spec.containerOptions}') + if [[ -n $backup_options ]]; then + echo "$backup_options" + return + fi + + storage_name=$(kubectl_bin get pxc-backup "$backup_name" -o jsonpath='{.spec.storageName}') + storage_options=$(kubectl_bin get pxc "$cluster_name" -o jsonpath="{.spec.backup.storages.$storage_name.containerOptions}") + if [[ -n $storage_options ]]; then + echo "$storage_options" + return + fi +} + +run_recovery_from_source() { + local storage_type=${1:-s3} + local backup_name=${2:-on-demand-backup-aws-s3} + local remove_bucket_or_container_name=${3:-""} + local remove_prefix_from_destination=${4:-""} + + remove_options="" + if [ -n "$remove_bucket_or_container_name" ]; then + remove_options=", .$storage_type.bucket, .$storage_type.container" + fi + + remove_prefix='test' + if [ -n "${remove_prefix_from_destination}" -a -z "${remove_bucket_or_container_name}" ]; then + remove_prefix="$storage_type" + fi + + desc "$storage_type restore with backup source" + restore_name="on-demand-backup-$storage_type" + + restore_json=$(jq ".metadata.name=\"$restore_name\"" "$test_dir/conf/restore-from-source-backup.json") + + container_options=$(get_container_options "$backup_name" "$cluster") + if [[ -n $container_options ]]; then + restore_json=$(echo "$restore_json" | jq ".spec.containerOptions=$container_options") + fi + + backup_source_json=$(kubectl_bin get pxc-backup "$backup_name" -o json \ + | jq -c '.status | {'"$storage_type"', destination} | del(.'"$storage_type"'.endpointUrl, .'"$storage_type"'.storageClass '"$remove_options"')' \ + | $sed "s|$remove_prefix://||") + echo "$restore_json" \ + | jq ".spec.backupSource=$backup_source_json" \ + | kubectl_bin apply -f - + + sleep 30 + wait_for_running "$cluster-proxysql" 1 + wait_for_running "$cluster-pxc" 3 + sleep 30 + desc 'check data after backup from source' + compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-0.$cluster-pxc -uroot -proot_password" + compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-1.$cluster-pxc -uroot -proot_password" + compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password" +} + +# If backup upload was started and failed for some reason the cloud storage should be cleaned up during second try +delete_backup_pod() { + local backup_name=$1 + + desc "Delete ${backup_name} pod during backup" + echo "Waiting for ${backup_name} pod to become Running" + sleep 1 + kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s + + backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') + + # sleep for 25 seconds so that an upload is started + # todo: improve this by monitoring the sidecar logs instead + sleep 25 + + echo "Deleting pod/${backup_pod} during backup" + kubectl logs -f ${backup_pod} | while IFS= read -r line; do + if [[ $line =~ 'Backup requested' ]]; then + kubectl delete pod --force ${backup_pod} + break + fi + done + +} + +check_cloud_storage_cleanup() { + local backup_name=$1 + + desc "Check storage cleanup of ${backup_name}" + if [[ $(kubectl_bin get events --field-selector involvedObject.kind=Job,involvedObject.name=xb-${backup_name} | grep -c "Created pod") == '1' ]]; then + echo "There should be 2+ pods started by job. First backup finished too quick" + exit 1 + fi + + local cluster_name=$(kubectl_bin get pxc-backup ${backup_name} -o jsonpath='{.spec.pxcCluster}') + if [[ -z $cluster_name ]]; then + echo "Cluster name is not set on backup ${backup_name}" + exit 1 + fi + + local pxc_pod="${cluster_name}-pxc-0" + if kubectl_bin logs ${pxc_pod} -c xtrabackup | grep 'Deleting Backup'; then + echo "Cleanup was performed." + else + echo "Something went wrong. Delete was not performed." + kubectl_bin logs ${pxc_pod} -c xtrabackup + exit 1 + fi +} + +run_backup_with_delete() { + local backup_name=$1 + + desc "make backup ${backup_name}" + kubectl_bin apply \ + -f $test_dir/conf/${backup_name}.yml + delete_backup_pod ${backup_name} + wait_backup "${backup_name}" + check_cloud_storage_cleanup "${backup_name}" +} + +main() { + if [ -n "$SKIP_REMOTE_BACKUPS" ]; then + echo "Skipping test because SKIP_REMOTE_BACKUPS variable is set!" + exit 0 + else + if command -v aws >/dev/null 2>&1; then + echo "AWS CLI is installed" + else + echo "AWS CLI is not installed" + exit 1 + fi + if command -v gsutil >/dev/null 2>&1; then + echo "gutil is installed" + else + echo "gsutil command is not installed" + exit 1 + fi + if command -v az >/dev/null 2>&1; then + echo "Azure CLI is installed" + else + echo "Azure CLI is not installed" + exit 1 + fi + + create_infra $namespace + + cluster="demand-backup-cloud" + spinup_pxc "$cluster" "$test_dir/conf/$cluster.yml" + + backup_name_aws="on-demand-backup-aws-s3" + backup_name_gcp="on-demand-backup-gcp-cs" + backup_name_azure="on-demand-backup-azure-blob" + + desc "Run backup ${backup_name_aws} for $cluster cluster" + run_backup_with_delete "${backup_name_aws}" + + desc "Run recovery from s3 for $cluster cluster" + run_recovery_check "$cluster" "${backup_name_aws}" + + desc "Check correct order for custom options" + + desc "Run recovery from s3 source with default options" + run_recovery_from_source + + desc 'Run recovery from s3 source without s3:// prefix in destination' + run_recovery_from_source 's3' "${backup_name_aws}" '' 'remove_prefix_from_destination' + + desc 'Run recovery from s3 source without bucket option' + run_recovery_from_source 's3' "${backup_name_aws}" 'remove_bucket_name' + + desc "Run backup ${backup_name_gcp} for $cluster cluster" + run_backup_with_delete "${backup_name_gcp}" + + desc "Run recovery from s3 for $cluster cluster" + run_recovery_check "$cluster" "${backup_name_gcp}" + + desc "Run backup ${backup_name_azure} for $cluster cluster" + run_backup_with_delete "${backup_name_azure}" + + desc "Run recovery from azure for $cluster cluster" + run_recovery_check "$cluster" "${backup_name_azure}" + + desc "Run recovery from azure source with default options" + run_recovery_from_source 'azure' "${backup_name_azure}" + + desc 'Run recovery from azure source without azure:// prefix in destination' + run_recovery_from_source 'azure' "${backup_name_azure}" '' 'remove_prefix_from_destination' + + desc 'Run recovery from azure source without container option' + run_recovery_from_source 'azure' "${backup_name_azure}" 'remove_container_name' + + backup_dest_aws=$(kubectl_bin get pxc-backup "$backup_name_aws" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) + backup_dest_gcp=$(kubectl_bin get pxc-backup "$backup_name_gcp" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) + backup_dest_azure=$(kubectl_bin get pxc-backup "$backup_name_azure" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 9-) + + desc "Check backup existence" + setup_aws_credentials + check_backup_existence_aws "$backup_dest_aws" "/xtrabackup_binlog_info.00000000000000000000" + + setup_gcs_credentials + check_backup_existence_gcs "${backup_dest_gcp}" "/.xtrabackup_binlog_info.00000000000000000000" + + setup_azure_credentials + check_backup_existence_azure "${backup_dest_azure}" "/.xtrabackup_binlog_info.00000000000000000000" + + kubectl_bin delete pxc-backup --all + + desc "Check backup deletion" + check_backup_deletion_aws "$backup_dest_aws" "/.xtrabackup_binlog_info.00000000000000000000" + check_backup_deletion_gcs "${backup_dest_gcp}" "/.xtrabackup_binlog_info.00000000000000000000" + check_backup_deletion_azure "${backup_dest_azure}" "/.xtrabackup_binlog_info.00000000000000000000" + + if [ "$EKS" = 1 ]; then + backup_name_aws_iam="on-demand-backup-aws-s3-iam" + desc "Run backup ${backup_name_aws_iam} for $cluster cluster using IAM" + run_backup_with_delete "${backup_name_aws_iam}" + desc "Check backup existence for $backup_name_aws_iam" + backup_dest_aws_iam=$(kubectl_bin get pxc-backup "$backup_name_aws_iam" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) + check_backup_existence_aws "${backup_dest_aws_iam}" ".xtrabackup_binlog_info.00000000000000000000" + fi + + destroy $namespace + desc "test passed" + fi +} +main diff --git a/e2e-tests/demand-backup-cloud-xtrabackup/run b/e2e-tests/demand-backup-cloud-xtrabackup/run deleted file mode 100755 index be001800d7..0000000000 --- a/e2e-tests/demand-backup-cloud-xtrabackup/run +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -o errexit - -test_dir=$(realpath $(dirname $0)) -PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../demand-backup-cloud/run diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml deleted file mode 100644 index a17de02514..0000000000 --- a/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-azure-demand-backup-cloud-xtrabackup.yml +++ /dev/null @@ -1,112 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - generation: 1 - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/restore-job-name: restore-job-on-demand-backup-azure-demand-backup-cloud - name: restore-job-on-demand-backup-azure-demand-backup-cloud - ownerReferences: - - controller: true - kind: PerconaXtraDBClusterRestore - name: on-demand-backup-azure -spec: - backoffLimit: 4 - completionMode: NonIndexed - completions: 1 - manualSelector: false - parallelism: 1 - podReplacementPolicy: TerminatingOrFailed - selector: - matchLabels: {} - suspend: false - template: - metadata: - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/restore-job-name: restore-job-on-demand-backup-azure-demand-backup-cloud - spec: - containers: - - command: - - /opt/percona/backup/recovery-cloud.sh - env: - - name: PXC_SERVICE - value: demand-backup-cloud-pxc - - name: PXC_USER - value: xtrabackup - - name: PXC_PASS - valueFrom: - secretKeyRef: - key: xtrabackup - name: my-cluster-secrets - - name: VERIFY_TLS - value: "true" - - name: XTRABACKUP_ENABLED - value: "true" - - name: AZURE_STORAGE_ACCOUNT - valueFrom: - secretKeyRef: - key: AZURE_STORAGE_ACCOUNT_NAME - name: azure-secret - - name: AZURE_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AZURE_STORAGE_ACCOUNT_KEY - name: azure-secret - - name: AZURE_ENDPOINT - - name: AZURE_STORAGE_CLASS - - name: XB_USE_MEMORY - value: 100MB - imagePullPolicy: Always - name: xtrabackup - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /datadir - name: datadir - - mountPath: /etc/mysql/vault-keyring-secret - name: vault-keyring-secret - - mountPath: /opt/percona - name: bin - dnsPolicy: ClusterFirst - initContainers: - - command: - - /backup-init-entrypoint.sh - imagePullPolicy: Always - name: backup-init - resources: - limits: - cpu: 50m - memory: 50M - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/percona - name: bin - restartPolicy: Never - schedulerName: default-scheduler - securityContext: - fsGroup: 1001 - supplementalGroups: - - 1001 - serviceAccount: default - serviceAccountName: default - terminationGracePeriodSeconds: 30 - volumes: - - name: datadir - persistentVolumeClaim: - claimName: datadir-demand-backup-cloud-pxc-0 - - name: vault-keyring-secret - secret: - defaultMode: 420 - optional: true - secretName: some-name-vault - - emptyDir: {} - name: bin diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml deleted file mode 100644 index f8490d705f..0000000000 --- a/e2e-tests/demand-backup-cloud/compare/job.batch_restore-job-on-demand-backup-s3-demand-backup-cloud-xtrabackup.yml +++ /dev/null @@ -1,117 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - generation: 1 - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/restore-job-name: restore-job-on-demand-backup-s3-demand-backup-cloud - name: restore-job-on-demand-backup-s3-demand-backup-cloud - ownerReferences: - - controller: true - kind: PerconaXtraDBClusterRestore - name: on-demand-backup-s3 -spec: - backoffLimit: 4 - completionMode: NonIndexed - completions: 1 - manualSelector: false - parallelism: 1 - podReplacementPolicy: TerminatingOrFailed - selector: - matchLabels: {} - suspend: false - template: - metadata: - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/restore-job-name: restore-job-on-demand-backup-s3-demand-backup-cloud - spec: - containers: - - command: - - /opt/percona/backup/recovery-cloud.sh - env: - - name: PXC_SERVICE - value: demand-backup-cloud-pxc - - name: PXC_USER - value: xtrabackup - - name: PXC_PASS - valueFrom: - secretKeyRef: - key: xtrabackup - name: my-cluster-secrets - - name: VERIFY_TLS - value: "true" - - name: XTRABACKUP_ENABLED - value: "true" - - name: ENDPOINT - - name: DEFAULT_REGION - value: us-east-1 - - name: ACCESS_KEY_ID - valueFrom: - secretKeyRef: - key: AWS_ACCESS_KEY_ID - name: aws-s3-secret - - name: SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AWS_SECRET_ACCESS_KEY - name: aws-s3-secret - - name: XBCLOUD_EXTRA_ARGS - value: --parallel=2 - - name: XBSTREAM_EXTRA_ARGS - value: --parallel=2 - - name: XB_USE_MEMORY - value: 100MB - imagePullPolicy: Always - name: xtrabackup - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /datadir - name: datadir - - mountPath: /etc/mysql/vault-keyring-secret - name: vault-keyring-secret - - mountPath: /opt/percona - name: bin - dnsPolicy: ClusterFirst - initContainers: - - command: - - /backup-init-entrypoint.sh - imagePullPolicy: Always - name: backup-init - resources: - limits: - cpu: 50m - memory: 50M - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/percona - name: bin - restartPolicy: Never - schedulerName: default-scheduler - securityContext: - fsGroup: 1001 - supplementalGroups: - - 1001 - serviceAccount: default - serviceAccountName: default - terminationGracePeriodSeconds: 30 - volumes: - - name: datadir - persistentVolumeClaim: - claimName: datadir-demand-backup-cloud-pxc-0 - - name: vault-keyring-secret - secret: - defaultMode: 420 - optional: true - secretName: some-name-vault - - emptyDir: {} - name: bin diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml deleted file mode 100644 index 287be89d04..0000000000 --- a/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-aws-s3-xtrabackup.yml +++ /dev/null @@ -1,132 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - generation: 1 - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/backup-job-name: xb-on-demand-backup-aws-s3 - percona.com/backup-name: on-demand-backup-aws-s3 - percona.com/backup-type: xtrabackup - percona.com/cluster: demand-backup-cloud - name: xb-on-demand-backup-aws-s3 - ownerReferences: - - controller: true - kind: PerconaXtraDBClusterBackup - name: on-demand-backup-aws-s3 -spec: - activeDeadlineSeconds: 7200 - backoffLimit: 6 - completionMode: NonIndexed - completions: 1 - manualSelector: true - parallelism: 1 - podReplacementPolicy: TerminatingOrFailed - selector: - matchLabels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/backup-job-name: xb-on-demand-backup-aws-s3 - percona.com/backup-name: on-demand-backup-aws-s3 - percona.com/backup-type: xtrabackup - percona.com/cluster: demand-backup-cloud - suspend: false - template: - metadata: - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/backup-job-name: xb-on-demand-backup-aws-s3 - percona.com/backup-name: on-demand-backup-aws-s3 - percona.com/backup-type: xtrabackup - percona.com/cluster: demand-backup-cloud - spec: - containers: - - command: - - /opt/percona/xtrabackup-run-backup - env: - - name: HOST - value: demand-backup-cloud-pxc-0.demand-backup-cloud-pxc.namespace.svc.cluster.local - - name: STORAGE_TYPE - value: s3 - - name: VERIFY_TLS - value: "true" - - name: BACKUP_NAME - value: on-demand-backup-aws-s3 - - name: ACCESS_KEY_ID - valueFrom: - secretKeyRef: - key: AWS_ACCESS_KEY_ID - name: aws-s3-secret - - name: SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AWS_SECRET_ACCESS_KEY - name: aws-s3-secret - - name: DEFAULT_REGION - value: us-east-1 - - name: ENDPOINT - - name: S3_BUCKET - value: operator-testing - - name: BACKUP_DEST - value: demand-backup-cloud-2025-12-04-15:51:44-full - imagePullPolicy: Always - name: xtrabackup - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/percona - name: bin - - mountPath: /etc/mysql/ssl - name: ssl - - mountPath: /etc/mysql/ssl-internal - name: ssl-internal - - mountPath: /etc/mysql/vault-keyring-secret - name: vault-keyring-secret - dnsPolicy: ClusterFirst - initContainers: - - command: - - /backup-init-entrypoint.sh - imagePullPolicy: Always - name: backup-init - resources: - limits: - cpu: 50m - memory: 50M - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/percona - name: bin - restartPolicy: Never - schedulerName: default-scheduler - securityContext: - fsGroup: 1001 - supplementalGroups: - - 1001 - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: bin - - name: ssl - secret: - defaultMode: 420 - optional: true - secretName: demand-backup-cloud-ssl - - name: ssl-internal - secret: - defaultMode: 420 - optional: true - secretName: demand-backup-cloud-ssl-internal - - name: vault-keyring-secret - secret: - defaultMode: 420 - optional: true - secretName: some-name-vault diff --git a/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml b/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml deleted file mode 100644 index a738169e12..0000000000 --- a/e2e-tests/demand-backup-cloud/compare/job.batch_xb-on-demand-backup-azure-blob-xtrabackup.yml +++ /dev/null @@ -1,129 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - generation: 1 - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/backup-job-name: xb-on-demand-backup-azure-blob - percona.com/backup-name: on-demand-backup-azure-blob - percona.com/backup-type: xtrabackup - percona.com/cluster: demand-backup-cloud - name: xb-on-demand-backup-azure-blob - ownerReferences: - - controller: true - kind: PerconaXtraDBClusterBackup - name: on-demand-backup-azure-blob -spec: - backoffLimit: 6 - completionMode: NonIndexed - completions: 1 - manualSelector: true - parallelism: 1 - podReplacementPolicy: TerminatingOrFailed - selector: - matchLabels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/backup-job-name: xb-on-demand-backup-azure-blob - percona.com/backup-name: on-demand-backup-azure-blob - percona.com/backup-type: xtrabackup - percona.com/cluster: demand-backup-cloud - suspend: false - template: - metadata: - labels: - app.kubernetes.io/instance: demand-backup-cloud - app.kubernetes.io/managed-by: percona-xtradb-cluster-operator - app.kubernetes.io/name: percona-xtradb-cluster - app.kubernetes.io/part-of: percona-xtradb-cluster - percona.com/backup-job-name: xb-on-demand-backup-azure-blob - percona.com/backup-name: on-demand-backup-azure-blob - percona.com/backup-type: xtrabackup - percona.com/cluster: demand-backup-cloud - spec: - containers: - - command: - - /opt/percona/xtrabackup-run-backup - env: - - name: HOST - value: demand-backup-cloud-pxc-0.demand-backup-cloud-pxc.namespace.svc.cluster.local - - name: STORAGE_TYPE - value: azure - - name: VERIFY_TLS - value: "true" - - name: BACKUP_NAME - value: on-demand-backup-azure-blob - - name: AZURE_STORAGE_ACCOUNT - valueFrom: - secretKeyRef: - key: AZURE_STORAGE_ACCOUNT_NAME - name: azure-secret - - name: AZURE_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AZURE_STORAGE_ACCOUNT_KEY - name: azure-secret - - name: AZURE_ENDPOINT - - name: AZURE_STORAGE_CLASS - value: Cool - - name: BACKUP_DEST - value: demand-backup-cloud-2025-12-04-16:25:39-full - imagePullPolicy: Always - name: xtrabackup - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/percona - name: bin - - mountPath: /etc/mysql/ssl - name: ssl - - mountPath: /etc/mysql/ssl-internal - name: ssl-internal - - mountPath: /etc/mysql/vault-keyring-secret - name: vault-keyring-secret - dnsPolicy: ClusterFirst - initContainers: - - command: - - /backup-init-entrypoint.sh - imagePullPolicy: Always - name: backup-init - resources: - limits: - cpu: 50m - memory: 50M - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/percona - name: bin - restartPolicy: Never - schedulerName: default-scheduler - securityContext: - fsGroup: 1001 - supplementalGroups: - - 1001 - terminationGracePeriodSeconds: 30 - volumes: - - emptyDir: {} - name: bin - - name: ssl - secret: - defaultMode: 420 - optional: true - secretName: demand-backup-cloud-ssl - - name: ssl-internal - secret: - defaultMode: 420 - optional: true - secretName: demand-backup-cloud-ssl-internal - - name: vault-keyring-secret - secret: - defaultMode: 420 - optional: true - secretName: some-name-vault diff --git a/e2e-tests/demand-backup-cloud/run b/e2e-tests/demand-backup-cloud/run index e02e75b0b4..a1618288d7 100755 --- a/e2e-tests/demand-backup-cloud/run +++ b/e2e-tests/demand-backup-cloud/run @@ -5,12 +5,6 @@ set -o errexit test_dir=$(realpath $(dirname $0)) . ${test_dir}/../functions -if is_feature_gate_enabled "BackupXtrabackup"; then - . ${test_dir}/utils/pxb.sh -else - . ${test_dir}/utils/sst.sh -fi - set_debug get_container_options() { @@ -74,11 +68,28 @@ run_recovery_from_source() { compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password" } +# If backup upload was started and failed for some reason the cloud storage should be cleaned up during second try +delete_backup_pod() { + local backup_name=$1 + + desc "Delete ${backup_name} pod during SST" + echo "Waiting for ${backup_name} pod to become Running" + sleep 1 + kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s + + backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') + + echo "Deleting pod/${backup_pod} during SST upload" + kubectl logs -f ${backup_pod} | while IFS= read -r line; do + if [[ $line =~ \.ibd\. ]]; then + kubectl delete pod --force ${backup_pod} + break + fi + done + +} + check_optins_in_restore_pod() { - if is_feature_gate_enabled "BackupXtrabackup"; then - return 0 - fi - local restore_name=$1 local cluster_name=$2 @@ -97,6 +108,36 @@ check_optins_in_restore_pod() { fi } +check_cloud_storage_cleanup() { + local backup_name=$1 + + desc "Check storage cleanup of ${backup_name}" + if [[ $(kubectl_bin get events --field-selector involvedObject.kind=Job,involvedObject.name=xb-${backup_name} | grep -c "Created pod") == '1' ]]; then + echo "There should be 2+ pods started by job. First backup finished too quick" + exit 1 + fi + local backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') + if [[ $IMAGE_PXC =~ 5\.7 ]]; then + # There are 2 deletes during backup: $backup_dir_sst_info & $backup_dir + deletes_num=$(kubectl_bin logs ${backup_pod} | grep -c 'Delete completed.') + if [[ ${deletes_num} -ge '2' ]]; then + echo "Bucket cleanup was successful" + else + echo "Something went wrong. Delete was performed for $deletes_num. Expected: 2." + kubectl_bin logs ${backup_pod} + exit 1 + fi + else + if kubectl_bin logs ${backup_pod} | grep 'Object deleted successfully before attempt 1. Exiting.'; then + echo "Something went wrong. Delete was not performed." + kubectl_bin logs ${backup_pod} + exit 1 + else + echo "Clenup was performed." + fi + fi +} + run_backup_with_delete() { local backup_name=$1 @@ -156,15 +197,10 @@ main() { desc 'Run recovery from s3 source without s3:// prefix in destination' run_recovery_from_source 's3' "${backup_name_aws}" '' 'remove_prefix_from_destination' - postfix="" - if is_feature_gate_enabled "BackupXtrabackup"; then - postfix="-xtrabackup" - fi - desc 'Run recovery from s3 source without bucket option' run_recovery_from_source 's3' "${backup_name_aws}" 'remove_bucket_name' - compare_kubectl job.batch/xb-"${backup_name_aws}" "${postfix}" - compare_kubectl job.batch/restore-job-on-demand-backup-s3-demand-backup-cloud "${postfix}" + compare_kubectl job.batch/xb-"${backup_name_aws}" + compare_kubectl job.batch/restore-job-on-demand-backup-s3-demand-backup-cloud desc "Run backup ${backup_name_gcp} for $cluster cluster" run_backup_with_delete "${backup_name_gcp}" @@ -186,34 +222,29 @@ main() { desc 'Run recovery from azure source without container option' run_recovery_from_source 'azure' "${backup_name_azure}" 'remove_container_name' - compare_kubectl job.batch/xb-"${backup_name_azure}" "${postfix}" - compare_kubectl job.batch/restore-job-on-demand-backup-azure-demand-backup-cloud "${postfix}" + compare_kubectl job.batch/xb-"${backup_name_azure}" + compare_kubectl job.batch/restore-job-on-demand-backup-azure-demand-backup-cloud backup_dest_aws=$(kubectl_bin get pxc-backup "$backup_name_aws" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) backup_dest_gcp=$(kubectl_bin get pxc-backup "$backup_name_gcp" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) backup_dest_azure=$(kubectl_bin get pxc-backup "$backup_name_azure" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 9-) - check_backup_file_name='.sst_info/sst_info.00000000000000000000' - if is_feature_gate_enabled "BackupXtrabackup"; then - check_backup_file_name='/xtrabackup_binlog_info.00000000000000000000' - fi - - # desc "Check backup existence" + desc "Check backup existence" setup_aws_credentials - check_backup_existence_aws "$backup_dest_aws" "$check_backup_file_name" + check_backup_existence_aws "$backup_dest_aws" ".sst_info/sst_info.00000000000000000000" setup_gcs_credentials - check_backup_existence_gcs "${backup_dest_gcp}" "$check_backup_file_name" + check_backup_existence_gcs "${backup_dest_gcp}" setup_azure_credentials - check_backup_existence_azure "${backup_dest_azure}" "$check_backup_file_name" + check_backup_existence_azure "${backup_dest_azure}" ".sst_info/sst_info.00000000000000000000" kubectl_bin delete pxc-backup --all desc "Check backup deletion" - check_backup_deletion_aws "$backup_dest_aws" "$check_backup_file_name" - check_backup_deletion_gcs "${backup_dest_gcp}" "$check_backup_file_name" - check_backup_deletion_azure "${backup_dest_azure}" "$check_backup_file_name" + check_backup_deletion_aws "$backup_dest_aws" ".sst_info/sst_info.00000000000000000000" + check_backup_deletion_gcs "${backup_dest_gcp}" + check_backup_deletion_azure "${backup_dest_azure}" ".sst_info/sst_info.00000000000000000000" if [ "$EKS" = 1 ]; then backup_name_aws_iam="on-demand-backup-aws-s3-iam" @@ -221,7 +252,7 @@ main() { run_backup_with_delete "${backup_name_aws_iam}" desc "Check backup existence for $backup_name_aws_iam" backup_dest_aws_iam=$(kubectl_bin get pxc-backup "$backup_name_aws_iam" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) - check_backup_existence_aws "${backup_dest_aws_iam}" "$check_backup_file_name" + check_backup_existence_aws "${backup_dest_aws_iam}" ".sst_info/sst_info.00000000000000000000" fi destroy $namespace diff --git a/e2e-tests/demand-backup-cloud/utils/pxb.sh b/e2e-tests/demand-backup-cloud/utils/pxb.sh deleted file mode 100644 index 27c2e5e505..0000000000 --- a/e2e-tests/demand-backup-cloud/utils/pxb.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -delete_backup_pod() { - local backup_name=$1 - - desc "Delete ${backup_name} pod during backup" - echo "Waiting for ${backup_name} pod to become Running" - sleep 1 - kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s - - backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - - # sleep for 25 seconds so that an upload is started - sleep 25 - - echo "Deleting pod/${backup_pod} during backup" - kubectl logs -f ${backup_pod} | while IFS= read -r line; do - if [[ $line =~ 'Backup requested' ]]; then - kubectl delete pod --force ${backup_pod} - break - fi - done - -} - -check_cloud_storage_cleanup() { - local backup_name=$1 - - desc "Check storage cleanup of ${backup_name}" - if [[ $(kubectl_bin get events --field-selector involvedObject.kind=Job,involvedObject.name=xb-${backup_name} | grep -c "Created pod") == '1' ]]; then - echo "There should be 2+ pods started by job. First backup finished too quick" - exit 1 - fi - - local cluster_name=$(kubectl_bin get pxc-backup ${backup_name} -o jsonpath='{.spec.pxcCluster}') - if [[ -z $cluster_name ]]; then - echo "Cluster name is not set on backup ${backup_name}" - exit 1 - fi - - local pxc_pod="${cluster_name}-pxc-0" - if kubectl_bin logs ${pxc_pod} -c xtrabackup | grep 'Deleting Backup'; then - echo "Cleanup was performed." - else - echo "Something went wrong. Delete was not performed." - kubectl_bin logs ${pxc_pod} -c xtrabackup - exit 1 - fi - -} \ No newline at end of file diff --git a/e2e-tests/demand-backup-cloud/utils/sst.sh b/e2e-tests/demand-backup-cloud/utils/sst.sh deleted file mode 100644 index 2c38f65d32..0000000000 --- a/e2e-tests/demand-backup-cloud/utils/sst.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -delete_backup_pod() { - local backup_name=$1 - - desc "Delete ${backup_name} pod during SST" - echo "Waiting for ${backup_name} pod to become Running" - sleep 1 - kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s - - backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - - echo "Deleting pod/${backup_pod} during SST upload" - kubectl logs -f ${backup_pod} | while IFS= read -r line; do - if [[ $line =~ \.ibd\. ]]; then - kubectl delete pod --force ${backup_pod} - break - fi - done - -} - -check_cloud_storage_cleanup() { - local backup_name=$1 - - desc "Check storage cleanup of ${backup_name}" - if [[ $(kubectl_bin get events --field-selector involvedObject.kind=Job,involvedObject.name=xb-${backup_name} | grep -c "Created pod") == '1' ]]; then - echo "There should be 2+ pods started by job. First backup finished too quick" - exit 1 - fi - local backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - if [[ $IMAGE_PXC =~ 5\.7 ]]; then - # There are 2 deletes during backup: $backup_dir_sst_info & $backup_dir - deletes_num=$(kubectl_bin logs ${backup_pod} | grep -c 'Delete completed.') - if [[ ${deletes_num} -ge '2' ]]; then - echo "Bucket cleanup was successful" - else - echo "Something went wrong. Delete was performed for $deletes_num. Expected: 2." - kubectl_bin logs ${backup_pod} - exit 1 - fi - else - if kubectl_bin logs ${backup_pod} | grep 'Object deleted successfully before attempt 1. Exiting.'; then - echo "Something went wrong. Delete was not performed." - kubectl_bin logs ${backup_pod} - exit 1 - else - echo "Clenup was performed." - fi - fi -} \ No newline at end of file diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/operator.log b/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/operator.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-1.sql b/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-1.sql new file mode 100644 index 0000000000..8e738f4cf2 --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-1.sql @@ -0,0 +1 @@ +100500 diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-2.sql b/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-2.sql new file mode 100644 index 0000000000..88cf282b62 --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/compare/select-2.sql @@ -0,0 +1,2 @@ +100500 +100501 diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/client.yml b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/client.yml new file mode 100644 index 0000000000..32b1c22ac5 --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/client.yml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backup-client +spec: + replicas: 1 + selector: + matchLabels: + name: backup-client + template: + metadata: + labels: + name: backup-client + spec: + containers: + - name: backup-client + command: ["bash", "-c", "sleep 100500"] + image: -backup + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /backup + name: backup + volumes: + - name: backup + persistentVolumeClaim: + claimName: -backup-pvc diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-aws-s3.yml b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-aws-s3.yml new file mode 100644 index 0000000000..d1f03a114c --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-aws-s3.yml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-aws-s3 +spec: + pxcCluster: some-name + storageName: aws-s3 diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-pvc.yml b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-pvc.yml new file mode 100644 index 0000000000..5084635f63 --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/on-demand-backup-pvc.yml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-pvc +spec: + pxcCluster: some-name + storageName: pvc diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-aws-s3.yaml b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-aws-s3.yaml new file mode 100644 index 0000000000..e88d562d67 --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-aws-s3.yaml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: on-demand-backup-aws-s3 +spec: + pxcCluster: some-name + backupName: on-demand-backup-aws-s3 diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-pvc.yaml b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-pvc.yaml new file mode 100644 index 0000000000..5301070f88 --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/conf/restore-on-demand-backup-pvc.yaml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: on-demand-backup-pvc +spec: + pxcCluster: some-name + backupName: on-demand-backup-pvc diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/run b/e2e-tests/demand-backup-encrypted-with-tls-pxb/run new file mode 100755 index 0000000000..ef88f8f8e9 --- /dev/null +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/run @@ -0,0 +1,63 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions + +set_debug + +PXCO_FEATURE_GATES="BackupXtrabackup=true" + +function jq_filter() { + local vault_root=$1 + jq -r "[ .[] | .=\"'$vault_root/\"+.+\"'\" ] | join(\", \")" +} + +function get_secret_mount_point() { + if [[ $IMAGE_PXC =~ 8\.4 ]]; then + kubectl_bin get -f ${conf_dir}/vault-secret-84.yaml -o yaml \ + | yq '.data."keyring_vault.conf"' | base64 -d \ + | jq -r .secret_mount_point + else + kubectl_bin get -f "${conf_dir}/vault-secret.yaml" -o json \ + | grep -E -o "secret_mount_point = \w+" \ + | awk -F "=[ ]*" '{print $2}' + fi +} + +main() { + if [ -n "$SKIP_REMOTE_BACKUPS" ]; then + echo "Skipping test because SKIP_REMOTE_BACKUPS variable is set!" + exit 0 + fi + + create_infra $namespace + + vault1="vault-service-1-${RANDOM}" + protocol="https" + start_vault $vault1 $protocol + token1=$(jq -r ".root_token" <"$tmp_dir/$vault1") + ip1="$protocol://$vault1.$vault1.svc.cluster.local" + + cluster="some-name" + spinup_pxc "$cluster" "$conf_dir/$cluster.yml" + keyring_plugin_must_be_in_use "$cluster" + table_must_be_encrypted "$cluster" "myApp" + + run_backup "$cluster" "on-demand-backup-aws-s3" + run_recovery_check "$cluster" "on-demand-backup-aws-s3" + kubectl_bin delete -f "$test_dir/conf/restore-on-demand-backup-aws-s3.yaml" + table_must_be_encrypted "$cluster" "myApp" + keyring_plugin_must_be_in_use "$cluster" + + for i in $vault1 $vault2; do + helm uninstall $i || : + kubectl_bin delete --grace-period=0 --force=true namespace $i & + done + + destroy $namespace + desc "test passed" +} + +main diff --git a/e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run b/e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run deleted file mode 100755 index 0a1284269e..0000000000 --- a/e2e-tests/demand-backup-encrypted-with-tls-xtrabackup/run +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -o errexit - -test_dir=$(realpath $(dirname $0)) -PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../demand-backup-encrypted-with-tls/run diff --git a/e2e-tests/demand-backup-encrypted-with-tls/run b/e2e-tests/demand-backup-encrypted-with-tls/run index ad9d7a10c4..7e9785f22a 100755 --- a/e2e-tests/demand-backup-encrypted-with-tls/run +++ b/e2e-tests/demand-backup-encrypted-with-tls/run @@ -38,15 +38,12 @@ main() { keyring_plugin_must_be_in_use "$cluster" table_must_be_encrypted "$cluster" "myApp" - # todo: add support for pvc - if !is_feature_gate_enabled "BackupXtrabackup"; then - run_backup "$cluster" "on-demand-backup-pvc" - run_recovery_check "$cluster" "on-demand-backup-pvc" - kubectl_bin delete -f "$test_dir/conf/restore-on-demand-backup-pvc.yaml" - check_pvc_md5 "on-demand-backup-pvc" - table_must_be_encrypted "$cluster" "myApp" - keyring_plugin_must_be_in_use "$cluster" - fi + run_backup "$cluster" "on-demand-backup-pvc" + run_recovery_check "$cluster" "on-demand-backup-pvc" + kubectl_bin delete -f "$test_dir/conf/restore-on-demand-backup-pvc.yaml" + check_pvc_md5 "on-demand-backup-pvc" + table_must_be_encrypted "$cluster" "myApp" + keyring_plugin_must_be_in_use "$cluster" if [ -z "$SKIP_REMOTE_BACKUPS" ]; then run_backup "$cluster" "on-demand-backup-aws-s3" @@ -56,40 +53,37 @@ main() { keyring_plugin_must_be_in_use "$cluster" fi - # in xtrabackup mode, we do not store our own transition keys, so this test is not applicable - if !is_feature_gate_enabled "BackupXtrabackup"; then - mountpt=$(get_secret_mount_point) - transition_keys=$(kubectl_bin exec --namespace="$vault1" -it $vault1-0 -- sh -c " - VAULT_TOKEN=$token1 vault kv list -format=json $mountpt/backup/" \ - | jq_filter "$mountpt/backup/") - - vault2="vault-service-2-${RANDOM}" - start_vault $vault2 $protocol - token2=$(jq -r ".root_token" <"$tmp_dir/$vault2") - ip2="$protocol://$vault2.$vault2.svc.cluster.local" - - kubectl_bin run -i --tty vault-cp --image=perconalab/vault-cp:latest --restart=Never -- sh -c " - sed -i 's/token=cfg.old_token)/token=cfg.old_token, verify=False)/' /src/vault-cp.py \ - && sed -i 's/token=cfg.new_token)/token=cfg.new_token, verify=False)/' /src/vault-cp.py \ - && echo \" + mountpt=$(get_secret_mount_point) + transition_keys=$(kubectl_bin exec --namespace="$vault1" -it $vault1-0 -- sh -c " + VAULT_TOKEN=$token1 vault kv list -format=json $mountpt/backup/" \ + | jq_filter "$mountpt/backup/") + + vault2="vault-service-2-${RANDOM}" + start_vault $vault2 $protocol + token2=$(jq -r ".root_token" <"$tmp_dir/$vault2") + ip2="$protocol://$vault2.$vault2.svc.cluster.local" + + kubectl_bin run -i --tty vault-cp --image=perconalab/vault-cp:latest --restart=Never -- sh -c " + sed -i 's/token=cfg.old_token)/token=cfg.old_token, verify=False)/' /src/vault-cp.py \ + && sed -i 's/token=cfg.new_token)/token=cfg.new_token, verify=False)/' /src/vault-cp.py \ + && echo \" old_url = '$ip1:8200' old_token = '$token1' new_url = '$ip2:8200' new_token = '$token2' secrets = [ $transition_keys ] \" > /src/config.py - python3 /src/vault-cp.py - " + python3 /src/vault-cp.py + " - run_recovery_check "$cluster" "on-demand-backup-pvc" + run_recovery_check "$cluster" "on-demand-backup-pvc" + table_must_be_encrypted "$cluster" "myApp" + keyring_plugin_must_be_in_use "$cluster" + + if [ -z "$SKIP_REMOTE_BACKUPS" ]; then + run_recovery_check "$cluster" "on-demand-backup-aws-s3" table_must_be_encrypted "$cluster" "myApp" keyring_plugin_must_be_in_use "$cluster" - - if [ -z "$SKIP_REMOTE_BACKUPS" ]; then - run_recovery_check "$cluster" "on-demand-backup-aws-s3" - table_must_be_encrypted "$cluster" "myApp" - keyring_plugin_must_be_in_use "$cluster" - fi fi for i in $vault1 $vault2; do diff --git a/e2e-tests/functions b/e2e-tests/functions index 4e6139eea5..77b59a2b5e 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -69,9 +69,6 @@ sed=$(which gsed || which sed) date=$(which gdate || which date) test_name=$(basename $test_dir) -if is_feature_gate_enabled "BackupXtrabackup"; then - test_name="${test_name}-xtrabackup" -fi namespace="${test_name}-${RANDOM}" replica_namespace="${test_name}-replica-${RANDOM}" conf_dir=$(realpath $test_dir/../conf || :) diff --git a/e2e-tests/pitr-pxb/compare/secret_pitr-mysql-init.yml b/e2e-tests/pitr-pxb/compare/secret_pitr-mysql-init.yml new file mode 100644 index 0000000000..c724b0d524 --- /dev/null +++ b/e2e-tests/pitr-pxb/compare/secret_pitr-mysql-init.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + init.sql: U0VUIFNFU1NJT04gd3NyZXBfb249T0ZGOwpTRVQgU0VTU0lPTiBzcWxfbG9nX2Jpbj0wOwpBTFRFUiBVU0VSICd4dHJhYmFja3VwJ0AnJScgSURFTlRJRklFRCBCWSAncGFzczEnOwpBTFRFUiBVU0VSICd4dHJhYmFja3VwJ0AnJScgSURFTlRJRklFRCBCWSAncGFzczInOwpBTFRFUiBVU0VSICd4dHJhYmFja3VwJ0AnJScgSURFTlRJRklFRCBCWSAncGFzczMnOwo= +kind: Secret +metadata: + name: pitr-mysql-init +type: Opaque diff --git a/e2e-tests/pitr-pxb/compare/select-1.sql b/e2e-tests/pitr-pxb/compare/select-1.sql new file mode 100644 index 0000000000..8e738f4cf2 --- /dev/null +++ b/e2e-tests/pitr-pxb/compare/select-1.sql @@ -0,0 +1 @@ +100500 diff --git a/e2e-tests/pitr-pxb/compare/select-2.sql b/e2e-tests/pitr-pxb/compare/select-2.sql new file mode 100644 index 0000000000..769df1828a --- /dev/null +++ b/e2e-tests/pitr-pxb/compare/select-2.sql @@ -0,0 +1,3 @@ +100500 +100501 +100502 diff --git a/e2e-tests/pitr-pxb/compare/select-3.sql b/e2e-tests/pitr-pxb/compare/select-3.sql new file mode 100644 index 0000000000..769df1828a --- /dev/null +++ b/e2e-tests/pitr-pxb/compare/select-3.sql @@ -0,0 +1,3 @@ +100500 +100501 +100502 diff --git a/e2e-tests/pitr-pxb/compare/select-4.sql b/e2e-tests/pitr-pxb/compare/select-4.sql new file mode 100644 index 0000000000..50d9931263 --- /dev/null +++ b/e2e-tests/pitr-pxb/compare/select-4.sql @@ -0,0 +1,6 @@ +100500 +100501 +100502 +100503 +100504 +100505 diff --git a/e2e-tests/pitr-pxb/conf/cert.yml b/e2e-tests/pitr-pxb/conf/cert.yml new file mode 100644 index 0000000000..5faf14d524 --- /dev/null +++ b/e2e-tests/pitr-pxb/conf/cert.yml @@ -0,0 +1,9 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls-minio +spec: + commonName: minio-service + secretName: tls-minio + issuerRef: + name: selfsigning-issuer \ No newline at end of file diff --git a/e2e-tests/pitr-pxb/conf/issuer.yml b/e2e-tests/pitr-pxb/conf/issuer.yml new file mode 100644 index 0000000000..275093f73f --- /dev/null +++ b/e2e-tests/pitr-pxb/conf/issuer.yml @@ -0,0 +1,6 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigning-issuer +spec: + selfSigned: {} \ No newline at end of file diff --git a/e2e-tests/pitr-pxb/conf/on-pitr-minio.yml b/e2e-tests/pitr-pxb/conf/on-pitr-minio.yml new file mode 100755 index 0000000000..42e996a800 --- /dev/null +++ b/e2e-tests/pitr-pxb/conf/on-pitr-minio.yml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-pitr-minio +spec: + pxcCluster: pitr + storageName: minio diff --git a/e2e-tests/pitr-pxb/conf/pitr.yml b/e2e-tests/pitr-pxb/conf/pitr.yml new file mode 100755 index 0000000000..4e65f52f36 --- /dev/null +++ b/e2e-tests/pitr-pxb/conf/pitr.yml @@ -0,0 +1,83 @@ +apiVersion: pxc.percona.com/v1-6-0 +kind: PerconaXtraDBCluster +metadata: + name: pitr + finalizers: + - percona.com/delete-pxc-pods-in-order + # annotations: + # percona.com/issue-vault-token: "true" +spec: + secretsName: my-cluster-secrets + vaultSecretName: some-name-vault + pause: false + pxc: + size: 3 + image: -pxc + configuration: | + [sst] + xbstream-opts=--decompress + [xtrabackup] + compress=lz4 + [mysqld] + require_secure_transport=ON + resources: + requests: + memory: 0.1G + cpu: 100m + limits: + memory: "2G" + cpu: "1" + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + proxysql: + enabled: true + size: 2 + image: -proxysql + resources: + requests: + memory: 0.1G + cpu: 100m + limits: + memory: 1G + cpu: 700m + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 2Gi + affinity: + antiAffinityTopologyKey: "kubernetes.io/hostname" + pmm: + enabled: false + image: perconalab/pmm-client:1.17.1 + serverHost: monitoring-service + serverUser: pmm + backup: + image: -backup + imagePullPolicy: Always + pitr: + enabled: true + storageName: minio-binlogs + timeBetweenUploads: 55 + storages: + minio: + type: s3 + verifyTLS: false + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: https://minio-service.#namespace:9000/ + minio-binlogs: + type: s3 + verifyTLS: false + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing/binlogs + endpointUrl: https://minio-service.#namespace:9000/ diff --git a/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-gtid.yaml b/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-gtid.yaml new file mode 100755 index 0000000000..6160779906 --- /dev/null +++ b/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-gtid.yaml @@ -0,0 +1,13 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: restore-on-pitr-minio-gtid +spec: + pxcCluster: pitr + backupName: on-pitr-minio + pitr: + type: transaction + gtid: + backupSource: + storageName: "minio-binlogs" + diff --git a/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-time.yaml b/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-time.yaml new file mode 100755 index 0000000000..6177a2f53b --- /dev/null +++ b/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio-time.yaml @@ -0,0 +1,13 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: restore-on-pitr-minio-time +spec: + pxcCluster: pitr + backupName: on-pitr-minio + pitr: + type: date + date: "" + backupSource: + storageName: "minio-binlogs" + diff --git a/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio.yaml b/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio.yaml new file mode 100755 index 0000000000..70dbcdbe95 --- /dev/null +++ b/e2e-tests/pitr-pxb/conf/restore-on-pitr-minio.yaml @@ -0,0 +1,24 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: restore-on-pitr-minio +spec: + pxcCluster: pitr + backupSource: + verifyTLS: false + destination: + s3: + bucket: operator-testing + credentialsSecret: minio-secret + endpointUrl: https://minio-service.#namespace:9000/ + region: us-east-1 + pitr: + type: latest + backupSource: + verifyTLS: false + s3: + bucket: operator-testing/binlogs + credentialsSecret: minio-secret + endpointUrl: https://minio-service.#namespace:9000/ + region: us-east-1 + diff --git a/e2e-tests/pitr-pxb/run b/e2e-tests/pitr-pxb/run new file mode 100755 index 0000000000..02093d6a37 --- /dev/null +++ b/e2e-tests/pitr-pxb/run @@ -0,0 +1,177 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions + +set_debug + +GTID_PATTERN='[A-F0-9a-f]{8}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{12}:[0-9]+' + +if [[ $IMAGE_PXC =~ 5\.7 ]]; then + echo "Skipping PITR test because 5.7 doesn't support it!" + exit 0 +fi + +write_test_data() { + local cluster=$1 + local config=$2 + local size="${3:-3}" + local sleep="${4:-10}" + local secretsFile="${5:-$conf_dir/secrets.yml}" + local pxcClientFile="${6:-$conf_dir/client.yml}" + + local proxy=$(get_proxy "$cluster") + + desc 'write test data' + if [[ $IMAGE_PXC =~ 5\.7 ]] && [[ "$(is_keyring_plugin_in_use "$cluster")" ]]; then + encrypt='ENCRYPTION=\"Y\"' + fi + run_mysql \ + "CREATE DATABASE IF NOT EXISTS test; use test; CREATE TABLE IF NOT EXISTS test (id int PRIMARY KEY) $encrypt;" \ + "-h $proxy -uroot -proot_password" + run_mysql \ + 'INSERT test.test (id) VALUES (100500); INSERT test.test (id) VALUES (100501); INSERT test.test (id) VALUES (100502);' \ + "-h $proxy -uroot -proot_password" + sleep 30 + for i in $(seq 0 $((size - 1))); do + compare_mysql_cmd "select-3" "SELECT * from test.test;" "-h $cluster-pxc-$i.$cluster-pxc -uroot -proot_password" + done + + if [ "$(is_keyring_plugin_in_use "$cluster")" ]; then + table_must_be_encrypted "$cluster" "test" + fi +} + +write_data_for_pitr() { + local cluster=$1 + local proxy=$(get_proxy "$cluster") + + desc "write data for pitr" + run_mysql \ + 'INSERT test.test (id) VALUES (100503); INSERT test.test (id) VALUES (100504); INSERT test.test (id) VALUES (100505);' \ + "-h $proxy -uroot -proot_password" +} + +check_latest_restorable_time() { + local backup=$1 + + desc 'check latest restorable time' + latest_restorable_time=$(kubectl_bin get pxc-backup ${backup} -o jsonpath='{.status.latestRestorableTime}') + if [[ ${latest_restorable_time} =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}T ]]; then + echo "Latest restorable time is ${latest_restorable_time}" + else + echo "Latest restorable time is not set" + exit 1 + fi +} + +main() { + create_infra $namespace + deploy_cert_manager + kubectl_bin apply -f "$test_dir/conf/issuer.yml" + kubectl_bin apply -f "$test_dir/conf/cert.yml" + sleep 25 + # We are using minio with tls enabled to check if `verifyTLS: false` works fine + start_minio "tls-minio" + + cluster="pitr" + spinup_pxc "$cluster" "$test_dir/conf/$cluster.yml" + + run_backup "$cluster" "on-pitr-minio" + + # Temporarily skipping this check + # desc 'check for passwords leak' + # check_passwords_leak + + write_test_data "$cluster" + + # test changing xtrabackup password + desc "changing xtrabackup password multiple times" + patch_secret "my-cluster-secrets" "xtrabackup" "$(echo -n "pass1" | base64)" + wait_cluster_consistency ${cluster} 3 2 + patch_secret "my-cluster-secrets" "xtrabackup" "$(echo -n "pass2" | base64)" + wait_cluster_consistency ${cluster} 3 2 + patch_secret "my-cluster-secrets" "xtrabackup" "$(echo -n "pass3" | base64)" + wait_cluster_consistency ${cluster} 3 2 + + compare_kubectl secret/${cluster}-mysql-init + + desc 'show binlog events' + proxy=$(get_proxy "$cluster") + run_mysql "SHOW BINLOG EVENTS IN 'binlog.000005';" "-h ${proxy} -uroot -proot_password" + run_mysql "SHOW BINLOG EVENTS IN 'binlog.000006';" "-h ${proxy} -uroot -proot_password" + + time_now=$(run_mysql "SELECT now();" "-h ${proxy} -uroot -proot_password") + gtid=$(run_mysql "SELECT @@gtid_executed;" "-h ${proxy} -uroot -proot_password" | $sed 's/\([a-f0-9-]\{36\}\):[0-9]*-\([0-9]*\).*/\1:\2/') + + if [[ ! ${gtid} =~ ${GTID_PATTERN} ]]; then + printf "Some garbage --> %s <-- instead of legit GTID. Exiting" ${gtid} + exit 1 + fi + + write_data_for_pitr "$cluster" + sleep 120 # need to wait while collector catch new data + check_latest_restorable_time "on-pitr-minio" + + timeout=60 + binlogs_exist=0 + for i in $(seq 1 5); do + echo "Checking if binlogs exist in bucket (attempt $i)..." + binlogs_exist=$( + kubectl_bin run -n "${NAMESPACE}" -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ + /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ + /usr/bin/aws --endpoint-url https://minio-service:9000 --no-verify-ssl s3 ls operator-testing/binlogs/ | grep -c "binlog" | cat + exit "${PIPESTATUS[0]}" + ) + if [ "$binlogs_exist" -gt 0 ]; then + echo "${binlogs_exist} binlogs found in bucket" + break + else + d=$((timeout * i)) + echo "No binlogs found in bucket. Sleeping for ${d} seconds..." + sleep ${d} + fi + done + + if [ "$binlogs_exist" -eq 0 ]; then + echo "Binlogs are not found in S3" + exit 1 + fi + + run_recovery_check_pitr "$cluster" "restore-on-pitr-minio-gtid" "on-pitr-minio" "select-2" "" "" "$gtid" + desc "done gtid type" + + # Temporarily skipping this check + # desc 'check for passwords leak' + # check_passwords_leak + + sleep 60 + if [[ $(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.conditions}' | grep -c 'Binlog with GTID set') -eq 1 ]]; then + echo "Binlog gap detected" + exit 1 + fi + + run_recovery_check_pitr "$cluster" "restore-on-pitr-minio-time" "on-pitr-minio" "select-3" "$time_now" "" "" + desc "done date type" + sleep 60 + if [[ $(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.conditions}' | grep -c 'Binlog with GTID set') -eq 1 ]]; then + echo "Binlog gap detected" + exit 1 + fi + + dest=$(sed 's,/,\\/,g' <<<$(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.destination}')) + run_recovery_check_pitr "$cluster" "restore-on-pitr-minio" "on-pitr-minio" "select-4" "" "$dest" "" + desc "done latest type" + sleep 60 + if [[ $(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.conditions}' | grep -c 'Binlog with GTID set') -eq 1 ]]; then + echo "Binlog gap detected" + exit 1 + fi + + destroy $namespace + desc "test passed" +} + +main diff --git a/e2e-tests/pitr-xtrabackup/run b/e2e-tests/pitr-xtrabackup/run deleted file mode 100755 index c3cf46c736..0000000000 --- a/e2e-tests/pitr-xtrabackup/run +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -o errexit - -test_dir=$(realpath $(dirname $0)) -PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../pitr/run diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/compare/operator.log b/e2e-tests/restore-to-encrypted-cluster-pxb/compare/operator.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-1.sql b/e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-1.sql new file mode 100644 index 0000000000..8e738f4cf2 --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-1.sql @@ -0,0 +1 @@ +100500 diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-2.sql b/e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-2.sql new file mode 100644 index 0000000000..88cf282b62 --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/compare/select-2.sql @@ -0,0 +1,2 @@ +100500 +100501 diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/conf/client.yml b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/client.yml new file mode 100644 index 0000000000..32b1c22ac5 --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/client.yml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backup-client +spec: + replicas: 1 + selector: + matchLabels: + name: backup-client + template: + metadata: + labels: + name: backup-client + spec: + containers: + - name: backup-client + command: ["bash", "-c", "sleep 100500"] + image: -backup + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /backup + name: backup + volumes: + - name: backup + persistentVolumeClaim: + claimName: -backup-pvc diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-aws-s3.yml b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-aws-s3.yml new file mode 100644 index 0000000000..d1f03a114c --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-aws-s3.yml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-aws-s3 +spec: + pxcCluster: some-name + storageName: aws-s3 diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-pvc.yml b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-pvc.yml new file mode 100644 index 0000000000..5084635f63 --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/on-demand-backup-pvc.yml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterBackup +metadata: + name: on-demand-backup-pvc +spec: + pxcCluster: some-name + storageName: pvc diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-aws-s3.yaml b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-aws-s3.yaml new file mode 100644 index 0000000000..e88d562d67 --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-aws-s3.yaml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: on-demand-backup-aws-s3 +spec: + pxcCluster: some-name + backupName: on-demand-backup-aws-s3 diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-pvc.yaml b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-pvc.yaml new file mode 100644 index 0000000000..5301070f88 --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/conf/restore-on-demand-backup-pvc.yaml @@ -0,0 +1,7 @@ +apiVersion: pxc.percona.com/v1 +kind: PerconaXtraDBClusterRestore +metadata: + name: on-demand-backup-pvc +spec: + pxcCluster: some-name + backupName: on-demand-backup-pvc diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/run b/e2e-tests/restore-to-encrypted-cluster-pxb/run new file mode 100755 index 0000000000..9bedd4bd17 --- /dev/null +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/run @@ -0,0 +1,38 @@ +#!/bin/bash + +set -o errexit + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions + +set_debug + +main() { + if [ -n "$SKIP_REMOTE_BACKUPS" ]; then + echo "Skipping test because SKIP_REMOTE_BACKUPS variable is set!" + exit 0 + fi + + create_infra $namespace + + cluster="some-name" + spinup_pxc "$cluster" "$conf_dir/$cluster.yml" + keyring_plugin_must_not_be_in_use "$cluster" + table_must_not_be_encrypted "$cluster" "myApp" + + run_backup "$cluster" "on-demand-backup-aws-s3" + + vault1="vault-service-1-${RANDOM}" + start_vault $vault1 + + run_recovery_check "$cluster" "on-demand-backup-aws-s3" + keyring_plugin_must_be_in_use "$cluster" + table_must_not_be_encrypted "$cluster" "myApp" + + helm uninstall $vault1 || : + kubectl_bin delete --grace-period=0 --force=true namespace $vault1 & + destroy $namespace + desc "test passed" +} + +main diff --git a/e2e-tests/restore-to-encrypted-cluster-xtrabackup/run b/e2e-tests/restore-to-encrypted-cluster-xtrabackup/run deleted file mode 100755 index 6586dad5ac..0000000000 --- a/e2e-tests/restore-to-encrypted-cluster-xtrabackup/run +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -o errexit - -test_dir=$(realpath $(dirname $0)) -PXCO_FEATURE_GATES="BackupXtrabackup=true" ${test_dir}/../restore-to-encrypted-cluster/run diff --git a/e2e-tests/restore-to-encrypted-cluster/run b/e2e-tests/restore-to-encrypted-cluster/run index f0a4407f4d..32d0db88c6 100755 --- a/e2e-tests/restore-to-encrypted-cluster/run +++ b/e2e-tests/restore-to-encrypted-cluster/run @@ -15,25 +15,17 @@ main() { keyring_plugin_must_not_be_in_use "$cluster" table_must_not_be_encrypted "$cluster" "myApp" - # todo: add support for pvc - if !is_feature_gate_enabled "BackupXtrabackup"; then - run_backup "$cluster" "on-demand-backup-pvc" - fi - + run_backup "$cluster" "on-demand-backup-pvc" if [ -z "$SKIP_REMOTE_BACKUPS" ]; then run_backup "$cluster" "on-demand-backup-aws-s3" fi vault1="vault-service-1-${RANDOM}" start_vault $vault1 - - # todo: add support for pvc - if !is_feature_gate_enabled "BackupXtrabackup"; then - run_recovery_check "$cluster" "on-demand-backup-pvc" - check_pvc_md5 "on-demand-backup-pvc" - keyring_plugin_must_be_in_use "$cluster" - table_must_not_be_encrypted "$cluster" "myApp" - fi + run_recovery_check "$cluster" "on-demand-backup-pvc" + check_pvc_md5 "on-demand-backup-pvc" + keyring_plugin_must_be_in_use "$cluster" + table_must_not_be_encrypted "$cluster" "myApp" if [ -z "$SKIP_REMOTE_BACKUPS" ]; then run_recovery_check "$cluster" "on-demand-backup-aws-s3" diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 0c9d5f4cf4..32a3e56eca 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -4,10 +4,10 @@ backup-storage-tls,8.0 cross-site,8.0 custom-users,8.0 demand-backup-cloud,8.0 -demand-backup-cloud-xtrabackup,8.0 +demand-backup-cloud-pxb,8.0 demand-backup-encrypted-with-tls,8.0 demand-backup-encrypted-with-tls,8.4 -demand-backup-encrypted-with-tls-xtrabackup,8.0 +demand-backup-encrypted-with-tls-pxb,8.0 demand-backup,8.0 demand-backup-flow-control,8.0 demand-backup-parallel,8.0 @@ -23,7 +23,7 @@ monitoring-pmm3,8.0 one-pod,5.7 one-pod,8.0 pitr,8.0 -pitr-xtrabackup,8.0 +pitr-pxb,8.0 pitr-gap-errors,8.0 proxy-protocol,8.0 proxy-switch,8.0 @@ -34,7 +34,7 @@ pvc-resize,8.0 recreate,8.0 restore-to-encrypted-cluster,8.0 restore-to-encrypted-cluster,8.4 -restore-to-encrypted-cluster-xtrabackup,8.0 +restore-to-encrypted-cluster-pxb,8.0 scaling-proxysql,8.0 scaling,8.0 scheduled-backup,5.7 From c5a8cbec4dd056ff178cb6adf6769426c3123133 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:31:34 +0530 Subject: [PATCH 64/77] add missing feature flags to test Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud-pxb/run | 2 ++ e2e-tests/functions | 9 --------- e2e-tests/pitr-pxb/run | 2 ++ e2e-tests/restore-to-encrypted-cluster-pxb/run | 2 ++ 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index 7dfc3fcea1..e7cfb3b21d 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -7,6 +7,8 @@ test_dir=$(realpath $(dirname $0)) set_debug +PXCO_FEATURE_GATES="BackupXtrabackup=true" + get_container_options() { backup_name=$1 cluster_name=$2 diff --git a/e2e-tests/functions b/e2e-tests/functions index 77b59a2b5e..56919ef835 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -29,15 +29,6 @@ if oc get projects 2>/dev/null; then OPENSHIFT=$(oc version -o json | jq -r '.openshiftVersion' | grep -oE '^[0-9]+\.[0-9]+') fi -is_feature_gate_enabled() { - local feature_gate=$1 - if [[ "$PXCO_FEATURE_GATES" == *"$feature_gate=true"* ]]; then - return 0 - else - return 1 - fi -} - add_docker_reg() { local var=$1 diff --git a/e2e-tests/pitr-pxb/run b/e2e-tests/pitr-pxb/run index 02093d6a37..a2e80d25ec 100755 --- a/e2e-tests/pitr-pxb/run +++ b/e2e-tests/pitr-pxb/run @@ -7,6 +7,8 @@ test_dir=$(realpath $(dirname $0)) set_debug +PXCO_FEATURE_GATES="BackupXtrabackup=true" + GTID_PATTERN='[A-F0-9a-f]{8}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{12}:[0-9]+' if [[ $IMAGE_PXC =~ 5\.7 ]]; then diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/run b/e2e-tests/restore-to-encrypted-cluster-pxb/run index 9bedd4bd17..3275793540 100755 --- a/e2e-tests/restore-to-encrypted-cluster-pxb/run +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/run @@ -7,6 +7,8 @@ test_dir=$(realpath $(dirname $0)) set_debug +PXCO_FEATURE_GATES="BackupXtrabackup=true" + main() { if [ -n "$SKIP_REMOTE_BACKUPS" ]; then echo "Skipping test because SKIP_REMOTE_BACKUPS variable is set!" From 2d6e6066b6362846166d25e9b8182d3376f6cd33 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:44:14 +0530 Subject: [PATCH 65/77] add 8.4 to run-pr Signed-off-by: Mayank Shah --- e2e-tests/run-pr.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 32a3e56eca..e29b02e676 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -8,6 +8,7 @@ demand-backup-cloud-pxb,8.0 demand-backup-encrypted-with-tls,8.0 demand-backup-encrypted-with-tls,8.4 demand-backup-encrypted-with-tls-pxb,8.0 +demand-backup-encrypted-with-tls-pxb,8.4 demand-backup,8.0 demand-backup-flow-control,8.0 demand-backup-parallel,8.0 From c01503381c50bc44913bd22d3f22d694a7ec936d Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:53:05 +0530 Subject: [PATCH 66/77] move functions to pxc package Signed-off-by: Mayank Shah --- pkg/controller/pxc/replication.go | 3 +- pkg/controller/pxc/upgrade.go | 6 +- pkg/controller/pxcbackup/controller.go | 3 +- pkg/k8s/cluster.go | 136 ----------------------- pkg/pxc/pxc.go | 148 +++++++++++++++++++++++++ 5 files changed, 155 insertions(+), 141 deletions(-) diff --git a/pkg/controller/pxc/replication.go b/pkg/controller/pxc/replication.go index 78affdb2d3..0461e4584d 100644 --- a/pkg/controller/pxc/replication.go +++ b/pkg/controller/pxc/replication.go @@ -18,6 +18,7 @@ import ( api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/queries" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" @@ -126,7 +127,7 @@ func (r *ReconcilePerconaXtraDBCluster) reconcileReplication(ctx context.Context } } - primary, err := k8s.GetPrimaryPod(ctx, r.client, cr) + primary, err := pxc.GetPrimaryPod(ctx, r.client, cr) if err != nil { return errors.Wrap(err, "get primary pxc pod") } diff --git a/pkg/controller/pxc/upgrade.go b/pkg/controller/pxc/upgrade.go index 28da349f07..a903a20603 100644 --- a/pkg/controller/pxc/upgrade.go +++ b/pkg/controller/pxc/upgrade.go @@ -245,7 +245,7 @@ func (r *ReconcilePerconaXtraDBCluster) smartUpdate(ctx context.Context, sfs api return nil } - primary, err := k8s.GetPrimaryPod(ctx, r.client, cr) + primary, err := pxc.GetPrimaryPod(ctx, r.client, cr) if err != nil { return errors.Wrap(err, "get primary pod") } @@ -351,7 +351,7 @@ func (r *ReconcilePerconaXtraDBCluster) waitHostgroups( } log := logf.FromContext(ctx) - database, err := k8s.GetProxyConnection(cr, r.client) + database, err := pxc.GetProxyConnection(cr, r.client) if err != nil { return errors.Wrap(err, "connect to proxy") } @@ -396,7 +396,7 @@ func (r *ReconcilePerconaXtraDBCluster) waitUntilOnline( return nil } - database, err := k8s.GetProxyConnection(cr, r.client) + database, err := pxc.GetProxyConnection(cr, r.client) if err != nil { return errors.Wrap(err, "failed to get proxySQL db") } diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 6a50723c62..4ee0d794d6 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -32,6 +32,7 @@ import ( "github.com/percona/percona-xtradb-cluster-operator/pkg/features" "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" "github.com/percona/percona-xtradb-cluster-operator/pkg/naming" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/binlogcollector" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/backup/storage" @@ -326,7 +327,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( xtrabackupEnabled := features.Enabled(ctx, features.BackupXtrabackup) getJobSpec := func() (batchv1.JobSpec, error) { if xtrabackupEnabled { - srcNode, err := k8s.GetPrimaryPodDNSName(ctx, r.client, cluster) + srcNode, err := pxc.GetPrimaryPodDNSName(ctx, r.client, cluster) if err != nil { return batchv1.JobSpec{}, errors.Wrap(err, "failed to get primary pod dns name") } diff --git a/pkg/k8s/cluster.go b/pkg/k8s/cluster.go index 8555967340..ee9b869d23 100644 --- a/pkg/k8s/cluster.go +++ b/pkg/k8s/cluster.go @@ -2,12 +2,9 @@ package k8s import ( "context" - "fmt" "strings" "time" - stdErrors "errors" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -16,8 +13,6 @@ import ( api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" - "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/queries" - "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" ) func PauseCluster(ctx context.Context, cl client.Client, cr *api.PerconaXtraDBCluster) (bool, error) { @@ -147,137 +142,6 @@ func PauseClusterWithWait(ctx context.Context, cl client.Client, cr *api.Percona return nil } -func GetPrimaryPodDNSName(ctx context.Context, cl client.Client, cr *api.PerconaXtraDBCluster) (string, error) { - primary, err := GetPrimaryPod(ctx, cl, cr) - if err != nil { - return "", errors.Wrap(err, "get primary pod") - } - pxcSet := statefulset.NewNode(cr) - podList := corev1.PodList{} - if err := cl.List(ctx, &podList, &client.ListOptions{ - Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(pxcSet.Labels()), - }); err != nil { - return "", errors.Wrap(err, "get pod list") - } - pxcSts := pxcSet.StatefulSet() - for _, pod := range podList.Items { - if pod.Status.PodIP == primary || pod.Name == primary { - primary = fmt.Sprintf("%s.%s.%s", pod.Name, pxcSts.GetName(), pxcSts.GetNamespace()) - break - } - } - return primary, nil -} - -var NoProxyDetectedError = errors.New("can't detect enabled proxy, please enable HAProxy or ProxySQL") - -// GetPrimaryPod returns the primary pod -func GetPrimaryPod( - ctx context.Context, - cl client.Client, - cr *api.PerconaXtraDBCluster) (string, error) { - conn, err := GetProxyConnection(cr, cl) - if err != nil { - if errors.Is(err, NoProxyDetectedError) && cr.Spec.PXC.Size == 1 { - firstReadyPod := func() (string, error) { - sts := statefulset.NewNode(cr) - - podList := new(corev1.PodList) - if err := cl.List(ctx, podList, &client.ListOptions{ - Namespace: cr.Namespace, - LabelSelector: labels.SelectorFromSet(sts.Labels()), - }); err != nil { - return "", errors.Wrap(err, "get pod list") - } - - readyPods := make([]corev1.Pod, 0) - for _, pod := range podList.Items { - if IsPodReady(pod) { - readyPods = append(readyPods, pod) - } - } - if len(readyPods) == 0 { - return "", errors.New("no ready pxc pods") - } - if len(readyPods) != int(cr.Spec.PXC.Size) { - return "", errors.New("waiting for pxc resize") - } - - return readyPods[0].Status.PodIP, nil - } - host, rerr := firstReadyPod() - if rerr == nil { - return host, nil - } - - err = stdErrors.Join(rerr, err) - } - return "", errors.Wrap(err, "failed to get proxy connection") - } - defer conn.Close() - - if cr.HAProxyEnabled() { - host, err := conn.Hostname() - if err != nil { - return "", err - } - - return host, nil - } - - return conn.PrimaryHost() -} - -// GetProxyConnection returns a new connection through the proxy (ProxySQL or HAProxy) -func GetProxyConnection(cr *api.PerconaXtraDBCluster, cl client.Client) (queries.Database, error) { - var database queries.Database - var user, host string - var port, proxySize int32 - - if cr.ProxySQLEnabled() { - user = users.ProxyAdmin - host = fmt.Sprintf("%s-proxysql-unready.%s", cr.ObjectMeta.Name, cr.Namespace) - proxySize = cr.Spec.ProxySQL.Size - port = 6032 - } else if cr.HAProxyEnabled() { - user = users.Monitor - host = fmt.Sprintf("%s-haproxy.%s", cr.Name, cr.Namespace) - proxySize = cr.Spec.HAProxy.Size - - hasKey, err := cr.ConfigHasKey("mysqld", "proxy_protocol_networks") - if err != nil { - return database, errors.Wrap(err, "check if config has proxy_protocol_networks key") - } - - port = 3306 - if hasKey && cr.CompareVersionWith("1.6.0") >= 0 { - port = 33062 - } - } else { - return database, NoProxyDetectedError - } - - secrets := cr.Spec.SecretsName - if cr.CompareVersionWith("1.6.0") >= 0 { - secrets = "internal-" + cr.Name - } - - for i := 0; ; i++ { - db, err := queries.New(cl, cr.Namespace, secrets, user, host, port, cr.Spec.PXC.ReadinessProbes.TimeoutSeconds) - if err != nil && i < int(proxySize) { - time.Sleep(time.Second) - } else if err != nil && i == int(proxySize) { - return database, err - } else { - database = db - break - } - } - - return database, nil -} - func waitForPodsShutdown(ctx context.Context, cl client.Client, ls map[string]string, namespace string, gracePeriodSec int64) error { for i := int64(0); i < waitLimitSec+gracePeriodSec; i++ { pods := corev1.PodList{} diff --git a/pkg/pxc/pxc.go b/pkg/pxc/pxc.go index 897e458d62..1b1e960d30 100644 --- a/pkg/pxc/pxc.go +++ b/pkg/pxc/pxc.go @@ -1,3 +1,151 @@ package pxc +import ( + "context" + stdErrors "errors" + "fmt" + "time" + + api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" + "github.com/percona/percona-xtradb-cluster-operator/pkg/k8s" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app/statefulset" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/queries" + "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + const appName = "pxc" + +var NoProxyDetectedError = errors.New("can't detect enabled proxy, please enable HAProxy or ProxySQL") + +func GetPrimaryPodDNSName(ctx context.Context, cl client.Client, cr *api.PerconaXtraDBCluster) (string, error) { + primary, err := GetPrimaryPod(ctx, cl, cr) + if err != nil { + return "", errors.Wrap(err, "get primary pod") + } + pxcSet := statefulset.NewNode(cr) + podList := corev1.PodList{} + if err := cl.List(ctx, &podList, &client.ListOptions{ + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(pxcSet.Labels()), + }); err != nil { + return "", errors.Wrap(err, "get pod list") + } + pxcSts := pxcSet.StatefulSet() + for _, pod := range podList.Items { + if pod.Status.PodIP == primary || pod.Name == primary { + primary = fmt.Sprintf("%s.%s.%s", pod.Name, pxcSts.GetName(), pxcSts.GetNamespace()) + break + } + } + return primary, nil +} + +// GetPrimaryPod returns the primary pod +func GetPrimaryPod( + ctx context.Context, + cl client.Client, + cr *api.PerconaXtraDBCluster) (string, error) { + conn, err := GetProxyConnection(cr, cl) + if err != nil { + if errors.Is(err, NoProxyDetectedError) && cr.Spec.PXC.Size == 1 { + firstReadyPod := func() (string, error) { + sts := statefulset.NewNode(cr) + + podList := new(corev1.PodList) + if err := cl.List(ctx, podList, &client.ListOptions{ + Namespace: cr.Namespace, + LabelSelector: labels.SelectorFromSet(sts.Labels()), + }); err != nil { + return "", errors.Wrap(err, "get pod list") + } + + readyPods := make([]corev1.Pod, 0) + for _, pod := range podList.Items { + if k8s.IsPodReady(pod) { + readyPods = append(readyPods, pod) + } + } + if len(readyPods) == 0 { + return "", errors.New("no ready pxc pods") + } + if len(readyPods) != int(cr.Spec.PXC.Size) { + return "", errors.New("waiting for pxc resize") + } + + return readyPods[0].Status.PodIP, nil + } + host, rerr := firstReadyPod() + if rerr == nil { + return host, nil + } + + err = stdErrors.Join(rerr, err) + } + return "", errors.Wrap(err, "failed to get proxy connection") + } + defer conn.Close() + + if cr.HAProxyEnabled() { + host, err := conn.Hostname() + if err != nil { + return "", err + } + + return host, nil + } + + return conn.PrimaryHost() +} + +// GetProxyConnection returns a new connection through the proxy (ProxySQL or HAProxy) +func GetProxyConnection(cr *api.PerconaXtraDBCluster, cl client.Client) (queries.Database, error) { + var database queries.Database + var user, host string + var port, proxySize int32 + + if cr.ProxySQLEnabled() { + user = users.ProxyAdmin + host = fmt.Sprintf("%s-proxysql-unready.%s", cr.ObjectMeta.Name, cr.Namespace) + proxySize = cr.Spec.ProxySQL.Size + port = 6032 + } else if cr.HAProxyEnabled() { + user = users.Monitor + host = fmt.Sprintf("%s-haproxy.%s", cr.Name, cr.Namespace) + proxySize = cr.Spec.HAProxy.Size + + hasKey, err := cr.ConfigHasKey("mysqld", "proxy_protocol_networks") + if err != nil { + return database, errors.Wrap(err, "check if config has proxy_protocol_networks key") + } + + port = 3306 + if hasKey && cr.CompareVersionWith("1.6.0") >= 0 { + port = 33062 + } + } else { + return database, NoProxyDetectedError + } + + secrets := cr.Spec.SecretsName + if cr.CompareVersionWith("1.6.0") >= 0 { + secrets = "internal-" + cr.Name + } + + for i := 0; ; i++ { + db, err := queries.New(cl, cr.Namespace, secrets, user, host, port, cr.Spec.PXC.ReadinessProbes.TimeoutSeconds) + if err != nil && i < int(proxySize) { + time.Sleep(time.Second) + } else if err != nil && i == int(proxySize) { + return database, err + } else { + database = db + break + } + } + + return database, nil +} From a6b9cd9682472de1b75a3d1fdd41de6b1f609ac6 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:55:16 +0530 Subject: [PATCH 67/77] rename to BackupSidecar Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud-pxb/run | 2 +- e2e-tests/demand-backup-encrypted-with-tls-pxb/run | 2 +- e2e-tests/pitr-pxb/run | 2 +- e2e-tests/restore-to-encrypted-cluster-pxb/run | 2 +- pkg/controller/pxcbackup/controller.go | 4 ++-- pkg/features/features.go | 6 +++--- pkg/features/features_test.go | 12 ++++++------ pkg/pxc/app/statefulset/node.go | 2 +- pkg/pxc/backup/restore.go | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index e7cfb3b21d..ab0e3058f6 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -7,7 +7,7 @@ test_dir=$(realpath $(dirname $0)) set_debug -PXCO_FEATURE_GATES="BackupXtrabackup=true" +PXCO_FEATURE_GATES="BackupSidecar=true" get_container_options() { backup_name=$1 diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/run b/e2e-tests/demand-backup-encrypted-with-tls-pxb/run index ef88f8f8e9..8616b548e7 100755 --- a/e2e-tests/demand-backup-encrypted-with-tls-pxb/run +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/run @@ -7,7 +7,7 @@ test_dir=$(realpath $(dirname $0)) set_debug -PXCO_FEATURE_GATES="BackupXtrabackup=true" +PXCO_FEATURE_GATES="BackupSidecar=true" function jq_filter() { local vault_root=$1 diff --git a/e2e-tests/pitr-pxb/run b/e2e-tests/pitr-pxb/run index a2e80d25ec..749a7c1166 100755 --- a/e2e-tests/pitr-pxb/run +++ b/e2e-tests/pitr-pxb/run @@ -7,7 +7,7 @@ test_dir=$(realpath $(dirname $0)) set_debug -PXCO_FEATURE_GATES="BackupXtrabackup=true" +PXCO_FEATURE_GATES="BackupSidecar=true" GTID_PATTERN='[A-F0-9a-f]{8}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{12}:[0-9]+' diff --git a/e2e-tests/restore-to-encrypted-cluster-pxb/run b/e2e-tests/restore-to-encrypted-cluster-pxb/run index 3275793540..f985feda51 100755 --- a/e2e-tests/restore-to-encrypted-cluster-pxb/run +++ b/e2e-tests/restore-to-encrypted-cluster-pxb/run @@ -7,7 +7,7 @@ test_dir=$(realpath $(dirname $0)) set_debug -PXCO_FEATURE_GATES="BackupXtrabackup=true" +PXCO_FEATURE_GATES="BackupSidecar=true" main() { if [ -n "$SKIP_REMOTE_BACKUPS" ]; then diff --git a/pkg/controller/pxcbackup/controller.go b/pkg/controller/pxcbackup/controller.go index 4ee0d794d6..e55a4ee6fa 100644 --- a/pkg/controller/pxcbackup/controller.go +++ b/pkg/controller/pxcbackup/controller.go @@ -174,7 +174,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) Reconcile(ctx context.Context, req } // TODO: implement support - if storage.Type == api.BackupStorageFilesystem && features.Enabled(ctx, features.BackupXtrabackup) { + if storage.Type == api.BackupStorageFilesystem && features.Enabled(ctx, features.BackupSidecar) { err := errors.New("pvc backup is not supported for xtrabackup mode") if err := r.setFailedStatus(ctx, cr, err); err != nil { @@ -324,7 +324,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) createBackupJob( return nil, errors.Wrap(err, "failed to get initImage") } - xtrabackupEnabled := features.Enabled(ctx, features.BackupXtrabackup) + xtrabackupEnabled := features.Enabled(ctx, features.BackupSidecar) getJobSpec := func() (batchv1.JobSpec, error) { if xtrabackupEnabled { srcNode, err := pxc.GetPrimaryPodDNSName(ctx, r.client, cluster) diff --git a/pkg/features/features.go b/pkg/features/features.go index a2c0b49755..5ead53341f 100644 --- a/pkg/features/features.go +++ b/pkg/features/features.go @@ -10,8 +10,8 @@ import ( ) const ( - // BackupXtrabackup is a feature flag for the BackupXtrabackup feature - BackupXtrabackup featuregate.Feature = "BackupXtrabackup" + // BackupSidecar is a feature flag for the BackupSidecar feature + BackupSidecar featuregate.Feature = "BackupSidecar" ) // NewGate returns a new FeatureGate. @@ -19,7 +19,7 @@ func NewGate() featuregate.MutableFeatureGate { gate := featuregate.NewFeatureGate() if err := gate.Add(map[featuregate.Feature]featuregate.FeatureSpec{ - BackupXtrabackup: {Default: false, PreRelease: featuregate.Alpha}, + BackupSidecar: {Default: false, PreRelease: featuregate.Alpha}, }); err != nil { panic(err) } diff --git a/pkg/features/features_test.go b/pkg/features/features_test.go index b57b8bb6d9..ba210e1c9e 100644 --- a/pkg/features/features_test.go +++ b/pkg/features/features_test.go @@ -11,7 +11,7 @@ func TestDefaults(t *testing.T) { t.Parallel() gate := NewGate() - assert.Assert(t, false == gate.Enabled(BackupXtrabackup)) + assert.Assert(t, false == gate.Enabled(BackupSidecar)) } func TestStringFormat(t *testing.T) { @@ -19,8 +19,8 @@ func TestStringFormat(t *testing.T) { gate := NewGate() assert.NilError(t, gate.Set("")) - assert.NilError(t, gate.Set("BackupXtrabackup=true")) - assert.Assert(t, true == gate.Enabled(BackupXtrabackup)) + assert.NilError(t, gate.Set("BackupSidecar=true")) + assert.Assert(t, true == gate.Enabled(BackupSidecar)) } @@ -31,7 +31,7 @@ func TestContext(t *testing.T) { assert.Equal(t, ShowAssigned(ctx), "") - assert.NilError(t, gate.Set("BackupXtrabackup=true")) - assert.Assert(t, Enabled(ctx, BackupXtrabackup)) - assert.Equal(t, ShowAssigned(ctx), "BackupXtrabackup=true") + assert.NilError(t, gate.Set("BackupSidecar=true")) + assert.Assert(t, Enabled(ctx, BackupSidecar)) + assert.Equal(t, ShowAssigned(ctx), "BackupSidecar=true") } diff --git a/pkg/pxc/app/statefulset/node.go b/pkg/pxc/app/statefulset/node.go index 46314bb052..d6e39ea873 100644 --- a/pkg/pxc/app/statefulset/node.go +++ b/pkg/pxc/app/statefulset/node.go @@ -438,7 +438,7 @@ func (c *Node) LogCollectorContainer(spec *api.LogCollectorSpec, logPsecrets str } func (c *Node) XtrabackupContainer(ctx context.Context, cr *api.PerconaXtraDBCluster) (*corev1.Container, error) { - if !features.Enabled(ctx, features.BackupXtrabackup) { + if !features.Enabled(ctx, features.BackupSidecar) { return nil, nil } container := &corev1.Container{ diff --git a/pkg/pxc/backup/restore.go b/pkg/pxc/backup/restore.go index 11976a4a65..d2329e27d0 100644 --- a/pkg/pxc/backup/restore.go +++ b/pkg/pxc/backup/restore.go @@ -496,7 +496,7 @@ func restoreJobEnvs( Value: strconv.FormatBool(verifyTLS), }) - if features.Enabled(ctx, features.BackupXtrabackup) { + if features.Enabled(ctx, features.BackupSidecar) { envs = append(envs, corev1.EnvVar{ Name: "XTRABACKUP_ENABLED", Value: "true", From fdc4485fa593b121851885a3a5c473bbe4c9209f Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:57:54 +0530 Subject: [PATCH 68/77] fix test Signed-off-by: Mayank Shah --- pkg/xtrabackup/job_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/xtrabackup/job_test.go b/pkg/xtrabackup/job_test.go index 0463088b8f..f50a1b3f66 100644 --- a/pkg/xtrabackup/job_test.go +++ b/pkg/xtrabackup/job_test.go @@ -208,7 +208,7 @@ func TestJobSpec(t *testing.T) { assert.Equal(t, app.BinVolumeMountPath, container.VolumeMounts[0].MountPath) // Assert Environment Variables - assert.Len(t, container.Env, 3) + assert.Len(t, container.Env, 4) envMap := make(map[string]string) for _, env := range container.Env { envMap[env.Name] = env.Value From d8c3bff27742e576d2e5955c87a6ac0f781c3f94 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 15:59:11 +0530 Subject: [PATCH 69/77] update statufulset Signed-off-by: Mayank Shah --- pkg/pxc/statefulset.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/pxc/statefulset.go b/pkg/pxc/statefulset.go index 1d239f47f7..bc5ff2c075 100644 --- a/pkg/pxc/statefulset.go +++ b/pkg/pxc/statefulset.go @@ -60,7 +60,7 @@ func StatefulSet( pod.Volumes = sfsVolume.Volumes } - if features.Enabled(ctx, features.BackupXtrabackup) { + if features.Enabled(ctx, features.BackupSidecar) { pod.Volumes = append(pod.Volumes, corev1.Volume{ Name: "backup-logs", VolumeSource: corev1.VolumeSource{ From cb0892284d7b2d5f2f4ae3acd1e67de13e3c4cf2 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 16:14:55 +0530 Subject: [PATCH 70/77] Update e2e-tests/demand-backup-cloud-pxb/run Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- e2e-tests/demand-backup-cloud-pxb/run | 1 - 1 file changed, 1 deletion(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index ab0e3058f6..c119ad4890 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -212,7 +212,6 @@ main() { setup_gcs_credentials check_backup_existence_gcs "${backup_dest_gcp}" "/.xtrabackup_binlog_info.00000000000000000000" - setup_azure_credentials check_backup_existence_azure "${backup_dest_azure}" "/.xtrabackup_binlog_info.00000000000000000000" From e59ffbaeec5cfff0634932c40d15b92d342eadde Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 16:15:07 +0530 Subject: [PATCH 71/77] Update build/backup/recovery-cloud.sh Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- build/backup/recovery-cloud.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/backup/recovery-cloud.sh b/build/backup/recovery-cloud.sh index 04dcf06b8d..bfffdd9ff0 100644 --- a/build/backup/recovery-cloud.sh +++ b/build/backup/recovery-cloud.sh @@ -120,7 +120,7 @@ if ! check_for_version "$XTRABACKUP_VERSION" '8.0.0'; then fi DEFAULTS_GROUP="--defaults-group=mysqld" -if [[ "${XTRABACKUP_ENABLED}" == "true" ]]; then +if [[ ${XTRABACKUP_ENABLED} == "true" ]]; then # these must not be set for pxb DEFAULTS_GROUP="" DEFAULTS_FILE="" From 6b6359206a6d8d07d36932c05ba679d9b5fa51cd Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 16:15:16 +0530 Subject: [PATCH 72/77] Update e2e-tests/demand-backup-cloud-pxb/run Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- e2e-tests/demand-backup-cloud-pxb/run | 1 - 1 file changed, 1 deletion(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index c119ad4890..f05d45e252 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -209,7 +209,6 @@ main() { desc "Check backup existence" setup_aws_credentials check_backup_existence_aws "$backup_dest_aws" "/xtrabackup_binlog_info.00000000000000000000" - setup_gcs_credentials check_backup_existence_gcs "${backup_dest_gcp}" "/.xtrabackup_binlog_info.00000000000000000000" setup_azure_credentials From e74bf1d1f631f1b1f1c68fce26ec93fcfcd9666b Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 18:58:35 +0530 Subject: [PATCH 73/77] improve test Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud-pxb/run | 32 +++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index f05d45e252..132d7ebddf 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -70,20 +70,44 @@ run_recovery_from_source() { compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password" } +wait_for_upload_to_start() { + local pod_name=$1 + local max_attempts=100 + + attempts=0 + until kubectl_bin logs ${pod_name} -c xtrabackup | grep -q 'successfully uploaded chunk'; do + sleep 3 + let attempts+=1 + if [ $attempts -ge $max_attempts ]; then + echo "Upload did not start on time" + exit 1 + fi + done + + return 0 +} + # If backup upload was started and failed for some reason the cloud storage should be cleaned up during second try delete_backup_pod() { local backup_name=$1 - desc "Delete ${backup_name} pod during backup" + echo "Waiting for ${backup_name} pod to become Running" sleep 1 kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - # sleep for 25 seconds so that an upload is started - # todo: improve this by monitoring the sidecar logs instead - sleep 25 + local cluster_name=$(kubectl_bin get pxc-backup ${backup_name} -o jsonpath='{.spec.pxcCluster}') + if [[ -z $cluster_name ]]; then + echo "Cluster name is not set on backup ${backup_name}" + exit 1 + fi + + local pxc_pod="${cluster_name}-pxc-0" + + echo "Waiting for upload to start" + wait_for_upload_to_start ${pxc_pod} echo "Deleting pod/${backup_pod} during backup" kubectl logs -f ${backup_pod} | while IFS= read -r line; do From eec67ac706a3a058810899d6fdeb8cd139d90e32 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Fri, 5 Dec 2025 20:48:50 +0530 Subject: [PATCH 74/77] fix recovery-cloud.sh Signed-off-by: Mayank Shah --- build/backup/recovery-cloud.sh | 33 ++++++++++--------- .../demand-backup-encrypted-with-tls-pxb/run | 1 + 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/build/backup/recovery-cloud.sh b/build/backup/recovery-cloud.sh index bfffdd9ff0..c313d3a5f8 100644 --- a/build/backup/recovery-cloud.sh +++ b/build/backup/recovery-cloud.sh @@ -54,6 +54,23 @@ fi # shellcheck disable=SC2086 xbcloud get --parallel="$(grep -c processor /proc/cpuinfo)" ${XBCLOUD_ARGS} "$(destination)" | xbstream -x -C "${tmp}" --parallel="$(grep -c processor /proc/cpuinfo)" $XBSTREAM_EXTRA_ARGS +set +o xtrace +if [[ -f "${tmp}/sst_info" ]]; then + transition_key=$(vault_get "$tmp/sst_info") + if [[ -n $transition_key && $transition_key != null ]]; then + MYSQL_VERSION=$(parse_ini 'mysql-version' "$tmp/sst_info") + if ! check_for_version "$MYSQL_VERSION" '5.7.29' \ + && [[ $MYSQL_VERSION != '5.7.28-31-57.2' ]]; then + + # shellcheck disable=SC2016 + transition_key='$transition_key' + fi + + transition_option="--transition-key=$transition_key" + echo transition-key exists + fi +fi + PXB_VAULT_PREPARE_ARGS="" PXB_VAULT_MOVEBACK_ARGS="" VAULT_CONFIG_FILE=/etc/mysql/vault-keyring-secret/keyring_vault.conf @@ -73,23 +90,7 @@ if [[ -f ${VAULT_CONFIG_FILE} ]]; then fi fi -set +o xtrace - -if [[ -f "${tmp}/sst_info" ]]; then - transition_key=$(vault_get "$tmp/sst_info") - if [[ -n $transition_key && $transition_key != null ]]; then - MYSQL_VERSION=$(parse_ini 'mysql-version' "$tmp/sst_info") - if ! check_for_version "$MYSQL_VERSION" '5.7.29' \ - && [[ $MYSQL_VERSION != '5.7.28-31-57.2' ]]; then - # shellcheck disable=SC2016 - transition_key='$transition_key' - fi - - transition_option="--transition-key=$transition_key" - echo transition-key exists - fi -fi if [ -f "${tmp}/xtrabackup_keys" ]; then master_key_options="--generate-new-master-key" diff --git a/e2e-tests/demand-backup-encrypted-with-tls-pxb/run b/e2e-tests/demand-backup-encrypted-with-tls-pxb/run index 8616b548e7..50b153753a 100755 --- a/e2e-tests/demand-backup-encrypted-with-tls-pxb/run +++ b/e2e-tests/demand-backup-encrypted-with-tls-pxb/run @@ -27,6 +27,7 @@ function get_secret_mount_point() { } main() { + # todo: support PVC for pxb if [ -n "$SKIP_REMOTE_BACKUPS" ]; then echo "Skipping test because SKIP_REMOTE_BACKUPS variable is set!" exit 0 From ab150a0ee3d8f86197a8aed15ab9a5ffdc83b584 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Sat, 6 Dec 2025 01:03:10 +0530 Subject: [PATCH 75/77] fix flaky test Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud-pxb/run | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index 132d7ebddf..2f6aa2933f 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -76,7 +76,7 @@ wait_for_upload_to_start() { attempts=0 until kubectl_bin logs ${pod_name} -c xtrabackup | grep -q 'successfully uploaded chunk'; do - sleep 3 + sleep 1 let attempts+=1 if [ $attempts -ge $max_attempts ]; then echo "Upload did not start on time" @@ -92,31 +92,21 @@ delete_backup_pod() { local backup_name=$1 desc "Delete ${backup_name} pod during backup" - echo "Waiting for ${backup_name} pod to become Running" - sleep 1 - kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod --selector=percona.com/backup-job-name=xb-${backup_name} --timeout=120s - - backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') - local cluster_name=$(kubectl_bin get pxc-backup ${backup_name} -o jsonpath='{.spec.pxcCluster}') if [[ -z $cluster_name ]]; then echo "Cluster name is not set on backup ${backup_name}" exit 1 fi + + backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') + echo "Waiting for pod/${backup_pod} to become Running" + kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod/${backup_pod} --timeout=120s local pxc_pod="${cluster_name}-pxc-0" echo "Waiting for upload to start" wait_for_upload_to_start ${pxc_pod} - - echo "Deleting pod/${backup_pod} during backup" - kubectl logs -f ${backup_pod} | while IFS= read -r line; do - if [[ $line =~ 'Backup requested' ]]; then - kubectl delete pod --force ${backup_pod} - break - fi - done - + kubectl delete pod --force ${backup_pod} } check_cloud_storage_cleanup() { From d47f880fb659f5b0c4ba4e26bf1539d92c7a7be7 Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Sat, 6 Dec 2025 01:31:02 +0530 Subject: [PATCH 76/77] add sleep Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud-pxb/run | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index 2f6aa2933f..c7e2fc8a2a 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -97,7 +97,8 @@ delete_backup_pod() { echo "Cluster name is not set on backup ${backup_name}" exit 1 fi - + + sleep 2 backup_pod=$(kubectl_bin get pods --selector=percona.com/backup-job-name=xb-${backup_name} -o jsonpath='{.items[].metadata.name}') echo "Waiting for pod/${backup_pod} to become Running" kubectl_bin wait --for=jsonpath='{.status.phase}'=Running pod/${backup_pod} --timeout=120s From ff41c2b39f097d665bc68354256cda559766f9ae Mon Sep 17 00:00:00 2001 From: Mayank Shah Date: Sat, 6 Dec 2025 13:43:10 +0530 Subject: [PATCH 77/77] typos Signed-off-by: Mayank Shah --- e2e-tests/demand-backup-cloud-pxb/run | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/e2e-tests/demand-backup-cloud-pxb/run b/e2e-tests/demand-backup-cloud-pxb/run index c7e2fc8a2a..75060e66f3 100755 --- a/e2e-tests/demand-backup-cloud-pxb/run +++ b/e2e-tests/demand-backup-cloud-pxb/run @@ -225,16 +225,16 @@ main() { setup_aws_credentials check_backup_existence_aws "$backup_dest_aws" "/xtrabackup_binlog_info.00000000000000000000" setup_gcs_credentials - check_backup_existence_gcs "${backup_dest_gcp}" "/.xtrabackup_binlog_info.00000000000000000000" + check_backup_existence_gcs "${backup_dest_gcp}" "/xtrabackup_binlog_info.00000000000000000000" setup_azure_credentials - check_backup_existence_azure "${backup_dest_azure}" "/.xtrabackup_binlog_info.00000000000000000000" + check_backup_existence_azure "${backup_dest_azure}" "/xtrabackup_binlog_info.00000000000000000000" kubectl_bin delete pxc-backup --all desc "Check backup deletion" - check_backup_deletion_aws "$backup_dest_aws" "/.xtrabackup_binlog_info.00000000000000000000" - check_backup_deletion_gcs "${backup_dest_gcp}" "/.xtrabackup_binlog_info.00000000000000000000" - check_backup_deletion_azure "${backup_dest_azure}" "/.xtrabackup_binlog_info.00000000000000000000" + check_backup_deletion_aws "$backup_dest_aws" "/xtrabackup_binlog_info.00000000000000000000" + check_backup_deletion_gcs "${backup_dest_gcp}" "/xtrabackup_binlog_info.00000000000000000000" + check_backup_deletion_azure "${backup_dest_azure}" "/xtrabackup_binlog_info.00000000000000000000" if [ "$EKS" = 1 ]; then backup_name_aws_iam="on-demand-backup-aws-s3-iam"