|
| 1 | +// Copyright 2025 NVIDIA CORPORATION |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +package app |
| 5 | + |
| 6 | +import ( |
| 7 | + "flag" |
| 8 | + |
| 9 | + "github.com/NVIDIA/KAI-scheduler/pkg/apis/scheduling/v2alpha2" |
| 10 | + "github.com/NVIDIA/KAI-scheduler/pkg/podgroupcontroller/controllers" |
| 11 | + |
| 12 | + "go.uber.org/zap/zapcore" |
| 13 | + v1 "k8s.io/api/core/v1" |
| 14 | + schedulingv1 "k8s.io/api/scheduling/v1" |
| 15 | + "k8s.io/apimachinery/pkg/fields" |
| 16 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 17 | + |
| 18 | + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) |
| 19 | + // to ensure that exec-entrypoint and run can make use of them. |
| 20 | + _ "k8s.io/client-go/plugin/pkg/client/auth" |
| 21 | + |
| 22 | + "k8s.io/apimachinery/pkg/runtime" |
| 23 | + utilruntime "k8s.io/apimachinery/pkg/util/runtime" |
| 24 | + clientgoscheme "k8s.io/client-go/kubernetes/scheme" |
| 25 | + ctrl "sigs.k8s.io/controller-runtime" |
| 26 | + "sigs.k8s.io/controller-runtime/pkg/healthz" |
| 27 | + "sigs.k8s.io/controller-runtime/pkg/log/zap" |
| 28 | + |
| 29 | + "sigs.k8s.io/controller-runtime/pkg/cache" |
| 30 | + // +kubebuilder:scaffold:imports |
| 31 | +) |
| 32 | + |
| 33 | +const ( |
| 34 | + schedulerNameField = "spec.schedulerName" |
| 35 | +) |
| 36 | + |
| 37 | +var ( |
| 38 | + scheme = runtime.NewScheme() |
| 39 | + setupLog = ctrl.Log.WithName("setup") |
| 40 | +) |
| 41 | + |
| 42 | +func init() { |
| 43 | + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) |
| 44 | + utilruntime.Must(v2alpha2.AddToScheme(scheme)) |
| 45 | + |
| 46 | + // +kubebuilder:scaffold:scheme |
| 47 | +} |
| 48 | + |
| 49 | +func Run() error { |
| 50 | + options := InitOptions() |
| 51 | + opts := zap.Options{ |
| 52 | + Development: true, |
| 53 | + TimeEncoder: zapcore.ISO8601TimeEncoder, |
| 54 | + Level: zapcore.Level(-1 * options.LogLevel), |
| 55 | + } |
| 56 | + opts.BindFlags(flag.CommandLine) |
| 57 | + flag.Parse() |
| 58 | + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) |
| 59 | + |
| 60 | + clientConfig := ctrl.GetConfigOrDie() |
| 61 | + clientConfig.QPS = float32(options.Qps) |
| 62 | + clientConfig.Burst = options.Burst |
| 63 | + |
| 64 | + schedulerSelector := fields.Set{schedulerNameField: options.SchedulerName}.AsSelector() |
| 65 | + cacheOptions := cache.Options{} |
| 66 | + cacheOptions.ByObject = map[client.Object]cache.ByObject{ |
| 67 | + &v1.Pod{}: {Field: schedulerSelector}, |
| 68 | + &v1.Node{}: {}, // TODO: filter by strict/non-strict runai nodes |
| 69 | + &schedulingv1.PriorityClass{}: {}, |
| 70 | + &v2alpha2.PodGroup{}: {}, |
| 71 | + } |
| 72 | + |
| 73 | + mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{ |
| 74 | + Scheme: scheme, |
| 75 | + Cache: cacheOptions, |
| 76 | + HealthProbeBindAddress: options.ProbeAddr, |
| 77 | + LeaderElection: options.EnableLeaderElection, |
| 78 | + LeaderElectionID: "3f770c00.run.ai", |
| 79 | + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily |
| 80 | + // when the Manager ends. This requires the binary to immediately end when the |
| 81 | + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly |
| 82 | + // speeds up voluntary leader transitions as the new leader don't have to wait |
| 83 | + // LeaseDuration time first. |
| 84 | + // |
| 85 | + // In the default scaffold provided, the program ends immediately after |
| 86 | + // the manager stops, so would be fine to enable this option. However, |
| 87 | + // if you are doing or is intended to do any operation such as perform cleanups |
| 88 | + // after the manager stops then its usage might be unsafe. |
| 89 | + // LeaderElectionReleaseOnCancel: true, |
| 90 | + }) |
| 91 | + if err != nil { |
| 92 | + setupLog.Error(err, "unable to start manager") |
| 93 | + return err |
| 94 | + } |
| 95 | + |
| 96 | + configs := controllers.Configs{ |
| 97 | + MaxConcurrentReconciles: options.MaxConcurrentReconciles, |
| 98 | + } |
| 99 | + if err = (&controllers.PodGroupReconciler{ |
| 100 | + Client: mgr.GetClient(), |
| 101 | + Scheme: mgr.GetScheme(), |
| 102 | + }).SetupWithManager(mgr, configs); err != nil { |
| 103 | + setupLog.Error(err, "unable to create controller", "controller", "Pod") |
| 104 | + return err |
| 105 | + } |
| 106 | + // +kubebuilder:scaffold:builder |
| 107 | + |
| 108 | + if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { |
| 109 | + setupLog.Error(err, "unable to set up health check") |
| 110 | + return err |
| 111 | + } |
| 112 | + if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { |
| 113 | + setupLog.Error(err, "unable to set up ready check") |
| 114 | + return err |
| 115 | + } |
| 116 | + |
| 117 | + setupLog.Info("starting manager") |
| 118 | + if err = mgr.Start(ctrl.SetupSignalHandler()); err != nil { |
| 119 | + setupLog.Error(err, "problem running manager") |
| 120 | + return err |
| 121 | + } |
| 122 | + |
| 123 | + return nil |
| 124 | +} |
0 commit comments