From 5d4f2d1a11fb6a3e16da89478470f3e6261ec4a4 Mon Sep 17 00:00:00 2001 From: jitendra1411 Date: Thu, 21 Aug 2025 10:21:53 +0530 Subject: [PATCH] Operator for backup with plugin Signed-off-by: jitendra1411 --- cmd/mockserver/main.go | 58 ++++ cmd/operator/app/main.go | 20 ++ cmd/operator/app/thread_backup.go | 100 +++++++ ...backup.clickhouse-backup.altinity.com.yaml | 133 +++++++++ deploy/operator/parts/crd.yaml | 2 +- dev/run_code_generator.sh | 2 + docs/chb-examples/01-simple-1.yaml | 26 ++ go.mod | 8 +- go.sum | 8 +- .../api_group.go | 20 ++ .../v1/api_register.go | 30 +++ .../v1/api_version.go | 20 ++ .../v1/types.go | 85 ++++++ .../v1/zz_generated.deepcopy.go | 252 ++++++++++++++++++ pkg/chop/chop.go | 14 + pkg/chop/config_manager.go | 10 + pkg/controller/chb/controller.go | 101 +++++++ pkg/plugin/backup/backup.pb.go | 252 ++++++++++++++++++ pkg/plugin/backup/backup_grpc.pb.go | 127 +++++++++ pkg/plugin/proto/backup.proto | 40 +++ 20 files changed, 1302 insertions(+), 6 deletions(-) create mode 100644 cmd/mockserver/main.go create mode 100644 cmd/operator/app/thread_backup.go create mode 100644 deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousebackup.clickhouse-backup.altinity.com.yaml create mode 100644 docs/chb-examples/01-simple-1.yaml create mode 100644 pkg/apis/clickhouse-backup.altinity.com/api_group.go create mode 100644 pkg/apis/clickhouse-backup.altinity.com/v1/api_register.go create mode 100644 pkg/apis/clickhouse-backup.altinity.com/v1/api_version.go create mode 100644 pkg/apis/clickhouse-backup.altinity.com/v1/types.go create mode 100644 pkg/apis/clickhouse-backup.altinity.com/v1/zz_generated.deepcopy.go create mode 100644 pkg/controller/chb/controller.go create mode 100644 pkg/plugin/backup/backup.pb.go create mode 100644 pkg/plugin/backup/backup_grpc.pb.go create mode 100644 pkg/plugin/proto/backup.proto diff --git a/cmd/mockserver/main.go b/cmd/mockserver/main.go new file mode 100644 index 000000000..ede9ea047 --- /dev/null +++ b/cmd/mockserver/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "context" + "fmt" + "log" + "net" + "time" + + pb "github.com/altinity/clickhouse-operator/pkg/plugin/backup" + + "google.golang.org/grpc" +) + +// BackupServer implements pb.BackupServer +type BackupServer struct { + pb.UnimplementedBackupServer +} + +// Backup handles incoming BackupRequest calls +func (s *BackupServer) Backup(ctx context.Context, req *pb.BackupRequest) (*pb.BackupResult, error) { + // Log what we received + log.Printf("Received Backup request: chi=%d bytes, backup=%d bytes, params=%v", + len(req.ChiDefinition), len(req.BackupDefinition), req.Parameters) + + // Mock result + start := time.Now().Unix() + time.Sleep(2 * time.Second) // simulate work + stop := time.Now().Unix() + + result := &pb.BackupResult{ + BackupId: "mock-backup-123", + BackupName: "example-backup", + StartedAt: start, + StoppedAt: stop, + Metadata: map[string]string{ + "status": "success", + "note": "this is a mock backup", + }, + } + + return result, nil +} + +func main() { + lis, err := net.Listen("tcp", ":50051") + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + + grpcServer := grpc.NewServer() + pb.RegisterBackupServer(grpcServer, &BackupServer{}) + + fmt.Println("Mock Backup gRPC server running on :50051") + if err := grpcServer.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/cmd/operator/app/main.go b/cmd/operator/app/main.go index da6711278..b4c5ce02b 100644 --- a/cmd/operator/app/main.go +++ b/cmd/operator/app/main.go @@ -77,6 +77,7 @@ func Run() { launchClickHouse(ctx, &wg) launchClickHouseReconcilerMetricsExporter(ctx, &wg) launchKeeper(ctx, &wg) + launchBackup(ctx, &wg) // Wait for completion <-ctx.Done() @@ -120,6 +121,25 @@ func launchKeeper(ctx context.Context, wg *sync.WaitGroup) { }() } +func launchBackup(ctx context.Context, wg *sync.WaitGroup) { + backupErr := initBackup(ctx) + wg.Add(1) + go func() { + defer wg.Done() + if backupErr == nil { + log.Info("Starting backup") + backupErr = runBackup(ctx) + if backupErr == nil { + log.Info("Starting backup OK") + } else { + log.Warning("Starting backup FAILED with err: %v", backupErr) + } + } else { + log.Warning("Starting backup skipped due to failed initialization with err: %v", backupErr) + } + }() +} + // setupSignalsNotification sets up OS signals func setupSignalsNotification(cancel context.CancelFunc) { stopChan := make(chan os.Signal, 2) diff --git a/cmd/operator/app/thread_backup.go b/cmd/operator/app/thread_backup.go new file mode 100644 index 000000000..39b6c1aa5 --- /dev/null +++ b/cmd/operator/app/thread_backup.go @@ -0,0 +1,100 @@ +package app + +import ( + "context" + + api "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-backup.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/chop" + controller "github.com/altinity/clickhouse-operator/pkg/controller/chb" + apiMachineryRuntime "k8s.io/apimachinery/pkg/runtime" + clientGoScheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + ctrlRuntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +func initBackup(ctx context.Context) error { + var err error + + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + + logger = ctrl.Log.WithName("backup-runner") + + scheme = apiMachineryRuntime.NewScheme() + if err = clientGoScheme.AddToScheme(scheme); err != nil { + logger.Error(err, "init backup - unable to clientGoScheme.AddToScheme") + return err + } + if err = api.AddToScheme(scheme); err != nil { + logger.Error(err, "init backup - unable to api.AddToScheme") + return err + } + + manager, err = ctrlRuntime.NewManager(ctrlRuntime.GetConfigOrDie(), ctrlRuntime.Options{ + Scheme: scheme, + Cache: cache.Options{ + Namespaces: []string{chop.Config().GetInformerNamespace()}, + }, + MetricsBindAddress: "0", + HealthProbeBindAddress: "0", + }) + if err != nil { + logger.Error(err, "init backup - unable to ctrlRuntime.NewManager") + return err + } + + err = ctrlRuntime. + NewControllerManagedBy(manager). + For(&api.ClickHouseBackup{}, builder.WithPredicates(backupPredicate())). + Complete( + &controller.Controller{ + Client: manager.GetClient(), + Scheme: manager.GetScheme(), + }, + ) + if err != nil { + logger.Error(err, "init backup - unable to ctrlRuntime.NewControllerManagedBy") + return err + } + + // Initialization successful + return nil +} + +func runBackup(ctx context.Context) error { + if err := manager.Start(ctx); err != nil { + logger.Error(err, "run backup - unable to manager.Start") + return err + } + // Run successful + return nil +} + +func backupPredicate() predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + chb, ok := e.Object.(*api.ClickHouseBackup) + if !ok { + return false + } + if chb.Status.State == "Completed" { + return false + } + + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return true + }, + } +} diff --git a/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousebackup.clickhouse-backup.altinity.com.yaml b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousebackup.clickhouse-backup.altinity.com.yaml new file mode 100644 index 000000000..7ef0ad2f1 --- /dev/null +++ b/deploy/helm/clickhouse-operator/crds/CustomResourceDefinition-clickhousebackup.clickhouse-backup.altinity.com.yaml @@ -0,0 +1,133 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clickhousebackups.clickhouse-backup.altinity.com +spec: + group: clickhouse-backup.altinity.com + names: + kind: ClickHouseBackup + listKind: ClickHouseBackupList + plural: clickhousebackups + shortNames: + - chb + singular: clickhousebackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: Status + type: string + - jsonPath: .status.message + name: Message + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + ClickhouseInstallation: + description: ClickhouseInstallation represents the CHI (ClickHouseInstallation) + that a backup is tied to + properties: + cluster: + description: ClickhouseCluster represents a single cluster inside + a CHI + properties: + name: + type: string + type: object + name: + type: string + type: object + backup: + properties: + dbTable: + properties: + blackList: + items: + type: string + type: array + whiteList: + items: + type: string + type: array + type: object + s3: + properties: + destinationPath: + type: string + endpointURL: + type: string + s3Credentials: + properties: + accessKeyId: + properties: + key: + type: string + name: + type: string + type: object + secretAccessKey: + properties: + key: + type: string + name: + type: string + type: object + type: object + type: object + type: object + method: + type: string + pluginConfiguration: + properties: + name: + type: string + type: object + type: object + status: + properties: + backupId: + type: string + backupName: + type: string + message: + type: string + startedAt: + format: date-time + type: string + state: + type: string + stoppedAt: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/operator/parts/crd.yaml b/deploy/operator/parts/crd.yaml index a8bde5a91..58d3e9ba1 100644 --- a/deploy/operator/parts/crd.yaml +++ b/deploy/operator/parts/crd.yaml @@ -6341,4 +6341,4 @@ spec: describe behavior of generated Service More info: https://kubernetes.io/docs/concepts/services-networking/service/ # nullable: true - x-kubernetes-preserve-unknown-fields: true + x-kubernetes-preserve-unknown-fields: true \ No newline at end of file diff --git a/dev/run_code_generator.sh b/dev/run_code_generator.sh index 81d86be3e..82939d185 100755 --- a/dev/run_code_generator.sh +++ b/dev/run_code_generator.sh @@ -58,6 +58,8 @@ bash "${CODE_GENERATOR_DIR}/generate-groups.sh" \ -o "${GENERATOR_ROOT}" \ --go-header-file "${SRC_ROOT}/hack/boilerplate.go.txt" + #TODO need to add generator for clickhuose-backup.altinity.com:v1 + echo "Copy generated sources into: ${PKG_ROOT}" cp -r "${GENERATOR_ROOT}/${REPO}/pkg/"* "${PKG_ROOT}" diff --git a/docs/chb-examples/01-simple-1.yaml b/docs/chb-examples/01-simple-1.yaml new file mode 100644 index 000000000..0e14e1a57 --- /dev/null +++ b/docs/chb-examples/01-simple-1.yaml @@ -0,0 +1,26 @@ +apiVersion: "clickhouse-backup.altinity.com/v1" +kind: "ClickHouseBackup" +metadata: + name: backup-example +spec: + backup: + dbTable: + whiteList: ["db1.table1","db1.table2","db2.table1"] + blackList: ["db1.test", "db1.logs", "db2.temporary"] + s3: + destinationPath: s3://backups/ + endpointURL: http://minio:9000 + s3Credentials: + accessKeyId: + name: minio + key: ACCESS_KEY_ID + secretAccessKey: + name: minio + key: ACCESS_SECRET_KEY + method: plugin + pluginConfiguration: + name: clickhouse.backup.altinity.com + ClickhouseInstallation: + name: simple-01 + cluster: + name: chcluster1 \ No newline at end of file diff --git a/go.mod b/go.mod index c7e8221f1..3544f0a4e 100644 --- a/go.mod +++ b/go.mod @@ -23,8 +23,8 @@ require ( github.com/Masterminds/semver/v3 v3.2.0 github.com/go-logr/logr v1.4.1 github.com/go-zookeeper/zk v1.0.3 - github.com/golang/glog v1.0.0 - github.com/google/uuid v1.4.0 + github.com/golang/glog v1.2.0 + github.com/google/uuid v1.6.0 github.com/imdario/mergo v0.3.15 github.com/juliangruber/go-intersect v1.0.0 github.com/kubernetes-sigs/yaml v1.1.0 @@ -41,6 +41,8 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.24.0 golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e golang.org/x/sync v0.12.0 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.33.0 gopkg.in/d4l3k/messagediff.v1 v1.2.1 gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/controller-runtime v0.15.1 @@ -97,7 +99,7 @@ require ( golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/component-base v0.29.14 // indirect diff --git a/go.sum b/go.sum index cfaad41cb..b2fe7ab7d 100644 --- a/go.sum +++ b/go.sum @@ -220,8 +220,8 @@ github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4Mgqvf github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gookit/color v1.4.2 h1:tXy44JFSFkKnELV6WaMo/lLfu/meqITX3iAV52do7lk= @@ -788,6 +788,8 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -802,6 +804,8 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/pkg/apis/clickhouse-backup.altinity.com/api_group.go b/pkg/apis/clickhouse-backup.altinity.com/api_group.go new file mode 100644 index 000000000..753308b70 --- /dev/null +++ b/pkg/apis/clickhouse-backup.altinity.com/api_group.go @@ -0,0 +1,20 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clickhouse_backup_altinity_com + +const ( + // APIGroupName is the group name of the ClickHouse Operator API. + APIGroupName = "clickhouse-backup.altinity.com" +) diff --git a/pkg/apis/clickhouse-backup.altinity.com/v1/api_register.go b/pkg/apis/clickhouse-backup.altinity.com/v1/api_register.go new file mode 100644 index 000000000..266bf3c93 --- /dev/null +++ b/pkg/apis/clickhouse-backup.altinity.com/v1/api_register.go @@ -0,0 +1,30 @@ +package v1 + +import ( + clickhouse_backup_altinity_com "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-backup.altinity.com" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{ + Group: clickhouse_backup_altinity_com.APIGroupName, + Version: APIVersion, + } + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{ + GroupVersion: SchemeGroupVersion, + } + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + SchemeBuilder.Register( + &ClickHouseBackup{}, + &ClickHouseBackupList{}, + ) +} diff --git a/pkg/apis/clickhouse-backup.altinity.com/v1/api_version.go b/pkg/apis/clickhouse-backup.altinity.com/v1/api_version.go new file mode 100644 index 000000000..516c6a0e9 --- /dev/null +++ b/pkg/apis/clickhouse-backup.altinity.com/v1/api_version.go @@ -0,0 +1,20 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // APIVersion is the version of the ClickHouse backup Operator API. + APIVersion = "v1" +) diff --git a/pkg/apis/clickhouse-backup.altinity.com/v1/types.go b/pkg/apis/clickhouse-backup.altinity.com/v1/types.go new file mode 100644 index 000000000..a3114052d --- /dev/null +++ b/pkg/apis/clickhouse-backup.altinity.com/v1/types.go @@ -0,0 +1,85 @@ +package v1 + +import ( + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:resource:shortName=chb +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.state` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClickHouseBackup struct { + meta.TypeMeta `json:",inline" yaml:",inline"` + meta.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + + Spec ClickhouseBackupSpec `json:"spec,omitempty" yaml:"spec,omitempty"` + Status Status `json:"status,omitempty" yaml:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClickHouseBackupList struct { + meta.TypeMeta `json:",inline" yaml:",inline"` + meta.ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + Items []ClickHouseBackup `json:"items" yaml:"items"` +} + +// TODO we can add more matrix to the status +type Status struct { + State string `json:"state,omitempty" yaml:"state,omitempty"` + Message string `json:"message,omitempty" yaml:"message,omitempty"` + BackupId string `json:"backupId,omitempty" yaml:"backupId,omitempty"` + BackupName string `json:"backupName,omitempty" yaml:"backupName,omitempty"` + StartedAt meta.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"` + StoppedAt meta.Time `json:"stoppedAt,omitempty" yaml:"stoppedAt,omitempty"` +} + +type ClickhouseBackupSpec struct { + Backup BackupConfig `json:"backup,omitempty" yaml:"backup,omitempty"` + Method string `json:"method,omitempty" yaml:"method,omitempty"` + PluginConfiguration PluginConfiguration `json:"pluginConfiguration,omitempty" yaml:"pluginConfiguration,omitempty"` + ClickhouseInstallation ClickhouseInstallation `json:"ClickhouseInstallation,omitempty" yaml:"ClickhouseInstallation,omitempty"` +} + +type BackupConfig struct { + DBTable DBTableConfig `json:"dbTable,omitempty" yaml:"dbTable,omitempty"` + S3 S3Config `json:"s3,omitempty" yaml:"s3,omitempty"` +} + +type DBTableConfig struct { + WhiteList []string `json:"whiteList,omitempty" yaml:"whiteList,omitempty"` + BlackList []string `json:"blackList,omitempty" yaml:"blackList,omitempty"` +} + +type S3Config struct { + DestinationPath string `json:"destinationPath,omitempty" yaml:"destinationPath,omitempty"` + EndpointURL string `json:"endpointURL,omitempty" yaml:"endpointURL,omitempty"` + S3Credentials S3Credentials `json:"s3Credentials,omitempty" yaml:"s3Credentials,omitempty"` +} + +type S3Credentials struct { + AccessKeyID SecretKeyRef `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty"` + SecretAccessKey SecretKeyRef `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty"` +} + +type SecretKeyRef struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Key string `json:"key,omitempty" yaml:"key,omitempty"` +} + +type PluginConfiguration struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} + +// ClickhouseInstallation represents the CHI (ClickHouseInstallation) that a backup is tied to +type ClickhouseInstallation struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Cluster ClickhouseCluster `json:"cluster,omitempty" yaml:"cluster,omitempty"` +} + +// ClickhouseCluster represents a single cluster inside a CHI +type ClickhouseCluster struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} diff --git a/pkg/apis/clickhouse-backup.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse-backup.altinity.com/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..b290b6e25 --- /dev/null +++ b/pkg/apis/clickhouse-backup.altinity.com/v1/zz_generated.deepcopy.go @@ -0,0 +1,252 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupConfig) DeepCopyInto(out *BackupConfig) { + *out = *in + in.DBTable.DeepCopyInto(&out.DBTable) + out.S3 = in.S3 + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfig. +func (in *BackupConfig) DeepCopy() *BackupConfig { + if in == nil { + return nil + } + out := new(BackupConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickHouseBackup) DeepCopyInto(out *ClickHouseBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseBackup. +func (in *ClickHouseBackup) DeepCopy() *ClickHouseBackup { + if in == nil { + return nil + } + out := new(ClickHouseBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClickHouseBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickHouseBackupList) DeepCopyInto(out *ClickHouseBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClickHouseBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickHouseBackupList. +func (in *ClickHouseBackupList) DeepCopy() *ClickHouseBackupList { + if in == nil { + return nil + } + out := new(ClickHouseBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClickHouseBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseBackupSpec) DeepCopyInto(out *ClickhouseBackupSpec) { + *out = *in + in.Backup.DeepCopyInto(&out.Backup) + out.PluginConfiguration = in.PluginConfiguration + out.ClickhouseInstallation = in.ClickhouseInstallation + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseBackupSpec. +func (in *ClickhouseBackupSpec) DeepCopy() *ClickhouseBackupSpec { + if in == nil { + return nil + } + out := new(ClickhouseBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseCluster) DeepCopyInto(out *ClickhouseCluster) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseCluster. +func (in *ClickhouseCluster) DeepCopy() *ClickhouseCluster { + if in == nil { + return nil + } + out := new(ClickhouseCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClickhouseInstallation) DeepCopyInto(out *ClickhouseInstallation) { + *out = *in + out.Cluster = in.Cluster + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClickhouseInstallation. +func (in *ClickhouseInstallation) DeepCopy() *ClickhouseInstallation { + if in == nil { + return nil + } + out := new(ClickhouseInstallation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBTableConfig) DeepCopyInto(out *DBTableConfig) { + *out = *in + if in.WhiteList != nil { + in, out := &in.WhiteList, &out.WhiteList + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BlackList != nil { + in, out := &in.BlackList, &out.BlackList + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBTableConfig. +func (in *DBTableConfig) DeepCopy() *DBTableConfig { + if in == nil { + return nil + } + out := new(DBTableConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginConfiguration) DeepCopyInto(out *PluginConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfiguration. +func (in *PluginConfiguration) DeepCopy() *PluginConfiguration { + if in == nil { + return nil + } + out := new(PluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Config) DeepCopyInto(out *S3Config) { + *out = *in + out.S3Credentials = in.S3Credentials + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Config. +func (in *S3Config) DeepCopy() *S3Config { + if in == nil { + return nil + } + out := new(S3Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Credentials) DeepCopyInto(out *S3Credentials) { + *out = *in + out.AccessKeyID = in.AccessKeyID + out.SecretAccessKey = in.SecretAccessKey + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Credentials. +func (in *S3Credentials) DeepCopy() *S3Credentials { + if in == nil { + return nil + } + out := new(S3Credentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeyRef) DeepCopyInto(out *SecretKeyRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeyRef. +func (in *SecretKeyRef) DeepCopy() *SecretKeyRef { + if in == nil { + return nil + } + out := new(SecretKeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.StoppedAt.DeepCopyInto(&out.StoppedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/chop/chop.go b/pkg/chop/chop.go index 7c2b6f849..09477a2d3 100644 --- a/pkg/chop/chop.go +++ b/pkg/chop/chop.go @@ -70,6 +70,20 @@ func (c *CHOp) Config() *api.OperatorConfig { return c.ConfigManager.Config() } +func (c *CHOp) KubeClient() *kube.Clientset { + if c == nil { + return nil + } + return c.ConfigManager.KubeClient() +} + +func (c *CHOp) ChopClient() *chopclientset.Clientset { + if c == nil { + return nil + } + return c.ConfigManager.ChopClient() +} + // GetRuntimeParam returns operator runtime parameter by name func (c *CHOp) GetRuntimeParam(name string) (string, bool) { if c == nil { diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go index 9dd3dc807..d595931c4 100644 --- a/pkg/chop/config_manager.go +++ b/pkg/chop/config_manager.go @@ -121,6 +121,16 @@ func (cm *ConfigManager) Config() *api.OperatorConfig { return cm.config } +// KubeClient is an access wrapper +func (cm *ConfigManager) KubeClient() *kube.Clientset { + return cm.kubeClient +} + +// ChopClient is an access wrapper +func (cm *ConfigManager) ChopClient() *chopClientSet.Clientset { + return cm.chopClient +} + // getAllCRBasedConfigs reads all ClickHouseOperatorConfiguration objects in specified namespace func (cm *ConfigManager) getAllCRBasedConfigs(namespace string) { // We need to have chop kube client available in order to fetch ClickHouseOperatorConfiguration objects diff --git a/pkg/controller/chb/controller.go b/pkg/controller/chb/controller.go new file mode 100644 index 000000000..f9f61c9bb --- /dev/null +++ b/pkg/controller/chb/controller.go @@ -0,0 +1,101 @@ +package chb + +import ( + "context" + "encoding/json" + "fmt" + log "github.com/altinity/clickhouse-operator/pkg/announcer" + apiChb "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse-backup.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/chop" + "github.com/altinity/clickhouse-operator/pkg/interfaces" + pb "github.com/altinity/clickhouse-operator/pkg/plugin/backup" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + apiMachinery "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "time" +) + +type Controller struct { + client.Client + Scheme *apiMachinery.Scheme + + namer interfaces.INameManager + kube interfaces.IKube +} + +func (c *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + fmt.Println("Reconcile called for request:", req) + log.Info("inside backup reconcile", "request", req) + + newChb := &apiChb.ClickHouseBackup{} + if err := c.Client.Get(ctx, req.NamespacedName, newChb); err != nil { + if apiErrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + // Return and don't requeue + return ctrl.Result{}, nil + } + // Return and requeue + return ctrl.Result{}, err + } + + //TODO need to rethink for already in progress backup + defer c.Status().Update(ctx, newChb) + if len(newChb.Status.State) > 0 { + newChb.Status.State = "Failed" + return ctrl.Result{}, nil + } + + backupChi, err := chop.Get().ChopClient().ClickhouseV1().ClickHouseInstallations(req.Namespace). + Get(ctx, newChb.Spec.ClickhouseInstallation.Name, meta.GetOptions{}) + if err != nil { + return ctrl.Result{}, err + } + + chiDefinition, err := json.Marshal(backupChi) + if err != nil { + return ctrl.Result{}, err + } + + backupDefinition, err := json.Marshal(newChb) + if err != nil { + return ctrl.Result{}, err + } + + // Example request + backupReq := &pb.BackupRequest{ + ChiDefinition: chiDefinition, + BackupDefinition: backupDefinition, + Parameters: map[string]string{}, + } + + //TODO write a plugin discovery controller + conn, err := grpc.NewClient("localhost:50051", grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Error("did not connect: %v", err) + return ctrl.Result{}, err + } + defer conn.Close() + backupClient := pb.NewBackupClient(conn) + newChb.Status.State = "InProgress" + c.Status().Update(ctx, newChb) + resp, err := backupClient.Backup(ctx, backupReq) + fmt.Printf("Backup started: %s at %d\n", resp.BackupId, resp.StartedAt) + if err != nil { + newChb.Status.State = "Failed" + log.Error("could not trigger backup: %v", err) + return ctrl.Result{}, err + } + newChb.Status.State = "Completed" + newChb.Status.BackupId = resp.BackupId + newChb.Status.BackupName = resp.BackupName + newChb.Status.StartedAt = meta.Time{Time: time.Unix(resp.StartedAt, 0)} + newChb.Status.StoppedAt = meta.Time{Time: time.Unix(resp.StoppedAt, 0)} + + return ctrl.Result{}, nil +} diff --git a/pkg/plugin/backup/backup.pb.go b/pkg/plugin/backup/backup.pb.go new file mode 100644 index 000000000..5e79f2384 --- /dev/null +++ b/pkg/plugin/backup/backup.pb.go @@ -0,0 +1,252 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.7 +// protoc v5.29.3 +// source: pkg/plugin/proto/backup.proto + +package backup + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request to initiate a backup +type BackupRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // REQUIRED. JSON serialization of the CHI being backed up + ChiDefinition []byte `protobuf:"bytes,1,opt,name=chi_definition,json=chiDefinition,proto3" json:"chi_definition,omitempty"` + // REQUIRED. JSON serialization of the Backup object + BackupDefinition []byte `protobuf:"bytes,2,opt,name=backup_definition,json=backupDefinition,proto3" json:"backup_definition,omitempty"` + // } Backup configuration parameters (from Backup or ScheduledBackup object) + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupRequest) Reset() { + *x = BackupRequest{} + mi := &file_pkg_plugin_proto_backup_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupRequest) ProtoMessage() {} + +func (x *BackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_plugin_proto_backup_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. +func (*BackupRequest) Descriptor() ([]byte, []int) { + return file_pkg_plugin_proto_backup_proto_rawDescGZIP(), []int{0} +} + +func (x *BackupRequest) GetChiDefinition() []byte { + if x != nil { + return x.ChiDefinition + } + return nil +} + +func (x *BackupRequest) GetBackupDefinition() []byte { + if x != nil { + return x.BackupDefinition + } + return nil +} + +func (x *BackupRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters + } + return nil +} + +// Result returned after backup completes +type BackupResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + // REQUIRED. Machine-readable ID of the backup + BackupId string `protobuf:"bytes,1,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"` + // OPTIONAL. Human-readable name of the backup + BackupName string `protobuf:"bytes,2,opt,name=backup_name,json=backupName,proto3" json:"backup_name,omitempty"` + // REQUIRED. Unix timestamp of the start time of the backup + StartedAt int64 `protobuf:"varint,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + // REQUIRED. Unix timestamp of the stop time of the backup + StoppedAt int64 `protobuf:"varint,4,opt,name=stopped_at,json=stoppedAt,proto3" json:"stopped_at,omitempty"` + // OPTIONAL. Plugin-specific metadata key/values + Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupResult) Reset() { + *x = BackupResult{} + mi := &file_pkg_plugin_proto_backup_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupResult) ProtoMessage() {} + +func (x *BackupResult) ProtoReflect() protoreflect.Message { + mi := &file_pkg_plugin_proto_backup_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupResult.ProtoReflect.Descriptor instead. +func (*BackupResult) Descriptor() ([]byte, []int) { + return file_pkg_plugin_proto_backup_proto_rawDescGZIP(), []int{1} +} + +func (x *BackupResult) GetBackupId() string { + if x != nil { + return x.BackupId + } + return "" +} + +func (x *BackupResult) GetBackupName() string { + if x != nil { + return x.BackupName + } + return "" +} + +func (x *BackupResult) GetStartedAt() int64 { + if x != nil { + return x.StartedAt + } + return 0 +} + +func (x *BackupResult) GetStoppedAt() int64 { + if x != nil { + return x.StoppedAt + } + return 0 +} + +func (x *BackupResult) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +var File_pkg_plugin_proto_backup_proto protoreflect.FileDescriptor + +const file_pkg_plugin_proto_backup_proto_rawDesc = "" + + "\n" + + "\x1dpkg/plugin/proto/backup.proto\x12\x12altinity.backup.v1\"\xf5\x01\n" + + "\rBackupRequest\x12%\n" + + "\x0echi_definition\x18\x01 \x01(\fR\rchiDefinition\x12+\n" + + "\x11backup_definition\x18\x02 \x01(\fR\x10backupDefinition\x12Q\n" + + "\n" + + "parameters\x18\x03 \x03(\v21.altinity.backup.v1.BackupRequest.ParametersEntryR\n" + + "parameters\x1a=\n" + + "\x0fParametersEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x93\x02\n" + + "\fBackupResult\x12\x1b\n" + + "\tbackup_id\x18\x01 \x01(\tR\bbackupId\x12\x1f\n" + + "\vbackup_name\x18\x02 \x01(\tR\n" + + "backupName\x12\x1d\n" + + "\n" + + "started_at\x18\x03 \x01(\x03R\tstartedAt\x12\x1d\n" + + "\n" + + "stopped_at\x18\x04 \x01(\x03R\tstoppedAt\x12J\n" + + "\bmetadata\x18\x05 \x03(\v2..altinity.backup.v1.BackupResult.MetadataEntryR\bmetadata\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x012W\n" + + "\x06Backup\x12M\n" + + "\x06Backup\x12!.altinity.backup.v1.BackupRequest\x1a .altinity.backup.v1.BackupResultB;Z9github.com/altinity/clickhouse-operator/pkg/plugin/backupb\x06proto3" + +var ( + file_pkg_plugin_proto_backup_proto_rawDescOnce sync.Once + file_pkg_plugin_proto_backup_proto_rawDescData []byte +) + +func file_pkg_plugin_proto_backup_proto_rawDescGZIP() []byte { + file_pkg_plugin_proto_backup_proto_rawDescOnce.Do(func() { + file_pkg_plugin_proto_backup_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pkg_plugin_proto_backup_proto_rawDesc), len(file_pkg_plugin_proto_backup_proto_rawDesc))) + }) + return file_pkg_plugin_proto_backup_proto_rawDescData +} + +var file_pkg_plugin_proto_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_pkg_plugin_proto_backup_proto_goTypes = []any{ + (*BackupRequest)(nil), // 0: altinity.backup.v1.BackupRequest + (*BackupResult)(nil), // 1: altinity.backup.v1.BackupResult + nil, // 2: altinity.backup.v1.BackupRequest.ParametersEntry + nil, // 3: altinity.backup.v1.BackupResult.MetadataEntry +} +var file_pkg_plugin_proto_backup_proto_depIdxs = []int32{ + 2, // 0: altinity.backup.v1.BackupRequest.parameters:type_name -> altinity.backup.v1.BackupRequest.ParametersEntry + 3, // 1: altinity.backup.v1.BackupResult.metadata:type_name -> altinity.backup.v1.BackupResult.MetadataEntry + 0, // 2: altinity.backup.v1.Backup.Backup:input_type -> altinity.backup.v1.BackupRequest + 1, // 3: altinity.backup.v1.Backup.Backup:output_type -> altinity.backup.v1.BackupResult + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_plugin_proto_backup_proto_init() } +func file_pkg_plugin_proto_backup_proto_init() { + if File_pkg_plugin_proto_backup_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_pkg_plugin_proto_backup_proto_rawDesc), len(file_pkg_plugin_proto_backup_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_plugin_proto_backup_proto_goTypes, + DependencyIndexes: file_pkg_plugin_proto_backup_proto_depIdxs, + MessageInfos: file_pkg_plugin_proto_backup_proto_msgTypes, + }.Build() + File_pkg_plugin_proto_backup_proto = out.File + file_pkg_plugin_proto_backup_proto_goTypes = nil + file_pkg_plugin_proto_backup_proto_depIdxs = nil +} diff --git a/pkg/plugin/backup/backup_grpc.pb.go b/pkg/plugin/backup/backup_grpc.pb.go new file mode 100644 index 000000000..1fb3ed78f --- /dev/null +++ b/pkg/plugin/backup/backup_grpc.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: pkg/plugin/proto/backup.proto + +package backup + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Backup_Backup_FullMethodName = "/altinity.backup.v1.Backup/Backup" +) + +// BackupClient is the client API for Backup service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Service definition for managing backups +type BackupClient interface { + // Trigger a ClickHouse backup + Backup(ctx context.Context, in *BackupRequest, opts ...grpc.CallOption) (*BackupResult, error) +} + +type backupClient struct { + cc grpc.ClientConnInterface +} + +func NewBackupClient(cc grpc.ClientConnInterface) BackupClient { + return &backupClient{cc} +} + +func (c *backupClient) Backup(ctx context.Context, in *BackupRequest, opts ...grpc.CallOption) (*BackupResult, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(BackupResult) + err := c.cc.Invoke(ctx, Backup_Backup_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServer is the server API for Backup service. +// All implementations must embed UnimplementedBackupServer +// for forward compatibility. +// +// Service definition for managing backups +type BackupServer interface { + // Trigger a ClickHouse backup + Backup(context.Context, *BackupRequest) (*BackupResult, error) + mustEmbedUnimplementedBackupServer() +} + +// UnimplementedBackupServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedBackupServer struct{} + +func (UnimplementedBackupServer) Backup(context.Context, *BackupRequest) (*BackupResult, error) { + return nil, status.Errorf(codes.Unimplemented, "method Backup not implemented") +} +func (UnimplementedBackupServer) mustEmbedUnimplementedBackupServer() {} +func (UnimplementedBackupServer) testEmbeddedByValue() {} + +// UnsafeBackupServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to BackupServer will +// result in compilation errors. +type UnsafeBackupServer interface { + mustEmbedUnimplementedBackupServer() +} + +func RegisterBackupServer(s grpc.ServiceRegistrar, srv BackupServer) { + // If the following call pancis, it indicates UnimplementedBackupServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Backup_ServiceDesc, srv) +} + +func _Backup_Backup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BackupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServer).Backup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Backup_Backup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServer).Backup(ctx, req.(*BackupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Backup_ServiceDesc is the grpc.ServiceDesc for Backup service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Backup_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "altinity.backup.v1.Backup", + HandlerType: (*BackupServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Backup", + Handler: _Backup_Backup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/plugin/proto/backup.proto", +} diff --git a/pkg/plugin/proto/backup.proto b/pkg/plugin/proto/backup.proto new file mode 100644 index 000000000..956b84537 --- /dev/null +++ b/pkg/plugin/proto/backup.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package altinity.backup.v1; +option go_package = "github.com/altinity/clickhouse-operator/pkg/plugin/backup"; + +// Service definition for managing backups +service Backup { + // Trigger a ClickHouse backup + rpc Backup (BackupRequest) returns (BackupResult); +} + +// Request to initiate a backup +message BackupRequest { + // REQUIRED. JSON serialization of the CHI being backed up + bytes chi_definition = 1; + + // REQUIRED. JSON serialization of the Backup object + bytes backup_definition = 2; + + //} Backup configuration parameters (from Backup or ScheduledBackup object) + map parameters = 3; +} + +// Result returned after backup completes +message BackupResult { + // REQUIRED. Machine-readable ID of the backup + string backup_id = 1; + + // OPTIONAL. Human-readable name of the backup + string backup_name = 2; + + // REQUIRED. Unix timestamp of the start time of the backup + int64 started_at = 3; + + // REQUIRED. Unix timestamp of the stop time of the backup + int64 stopped_at = 4; + + // OPTIONAL. Plugin-specific metadata key/values + map metadata = 5; +}