diff --git a/.github/workflows/build-eviction-controller.yml b/.github/workflows/build-eviction-controller.yml
index 864dc58..450adb0 100644
--- a/.github/workflows/build-eviction-controller.yml
+++ b/.github/workflows/build-eviction-controller.yml
@@ -52,6 +52,20 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Sanitize branch name for Docker tag
+ id: sanitize
+ run: |
+ BRANCH_NAME="${{ github.ref_name }}"
+ # Replace any character that's not alphanumeric, dot, underscore, or hyphen with a hyphen
+ SANITIZED_BRANCH=$(echo "$BRANCH_NAME" | sed 's/[^a-zA-Z0-9._-]/-/g' | sed 's/--*/-/g' | sed 's/^-*\|-*$//g')
+ # Ensure it's not empty and doesn't start with a dot or hyphen
+ if [[ -z "$SANITIZED_BRANCH" || "$SANITIZED_BRANCH" =~ ^[.-] ]]; then
+ SANITIZED_BRANCH="branch-$SANITIZED_BRANCH"
+ fi
+ echo "branch_name=$SANITIZED_BRANCH" >> $GITHUB_OUTPUT
+ echo "Original branch: $BRANCH_NAME"
+ echo "Sanitized branch: $SANITIZED_BRANCH"
+
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
@@ -67,8 +81,11 @@ jobs:
type=semver,pattern={{major}}
# Latest tag for main branch
type=raw,value=latest,enable={{is_default_branch}}
- # SHA-based tag
- type=sha,prefix={{branch}}-
+ # SHA-based tag (sanitized branch name)
+ type=sha,prefix=${{ steps.sanitize.outputs.branch_name }}-
+ flavor: |
+ suffix=
+ latest=auto
- name: Build and push Docker image
uses: docker/build-push-action@v5
diff --git a/eviction-controller/Dockerfile b/eviction-controller/Dockerfile
index 43bceaa..abe59e7 100644
--- a/eviction-controller/Dockerfile
+++ b/eviction-controller/Dockerfile
@@ -10,7 +10,12 @@ RUN apk --no-cache add \
postgresql-client
# Install kubectl
-RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(uname -m)/kubectl" && \
+RUN echo "Build timestamp: $(date)" && \
+ ARCH=$(uname -m) && \
+ if [ "$ARCH" = "aarch64" ]; then ARCH="arm64"; fi && \
+ if [ "$ARCH" = "x86_64" ]; then ARCH="amd64"; fi && \
+ echo "Downloading kubectl for architecture: $ARCH" && \
+ curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$ARCH/kubectl" && \
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl && \
rm kubectl
diff --git a/skaffold.yaml b/skaffold.yaml
index 46e996b..849cf2b 100644
--- a/skaffold.yaml
+++ b/skaffold.yaml
@@ -26,12 +26,6 @@ deploy:
valuesFiles:
- zoo-project-dru/values_minikube.yaml
- hooks:
- after:
- - host:
- command: ["sh", "-c", "./wait-for-it.sh"]
- os: [darwin, linux]
-
profiles:
- name: hostpath
@@ -48,7 +42,7 @@ profiles:
value:
name: kyverno
remoteChart: kyverno/kyverno
- version: "3.1.4"
+ version: "3.5.2"
namespace: kyverno-system
createNamespace: true
values:
@@ -70,14 +64,23 @@ profiles:
path: /deploy/helm/releases/1/setValues
value:
keda.enabled: "true"
+ keda.skipScaledObject: "false" # Override to enable ScaledObject creation
keda.triggers.postgresql.enabled: "true"
+ keda.triggers.postgresql.useConfigMap: "false" # Use simple inline query
+ keda.triggers.postgresql.query: "SELECT COUNT(*) FROM workers WHERE status = 1"
keda.triggers.rabbitmq.enabled: "true"
keda.kyverno.enabled: "false"
- keda.evictionController.enabled: "true"
- keda.evictionController.image.tag: ""
keda.kyverno.policies.zoofpmProtection.enabled: "true"
+ keda.evictionController.enabled: "true"
+ keda.evictionController.image.pullPolicy: "Never"
zoofpm.autoscaling.enabled: "false"
+ - op: add
+ path: /deploy/helm/releases/1/setValueTemplates
+ value:
+ keda.evictionController.image.repository: "{{.IMAGE_REPO_ghcr_io_zoo_project_zoofpm_eviction_controller}}"
+ keda.evictionController.image.tag: "{{.IMAGE_TAG_ghcr_io_zoo_project_zoofpm_eviction_controller}}"
+
- name: webui
patches:
- op: add
@@ -106,7 +109,7 @@ profiles:
port: 9000
localPort: 9000
- resourceType: service
- resourceName: s3-service
+ resourceName: s3-service-console
namespace: zoo
port: 9001
localPort: 9001
@@ -124,17 +127,17 @@ profiles:
port: 2746
localPort: 2746
# Prometheus
- - resourceType: service
- resourceName: zoo-project-dru-kube-prome-prometheus
- namespace: zoo
- port: 9090
- localPort: 9090
- # Grafana
- - resourceType: service
- resourceName: zoo-project-dru-grafana
- namespace: zoo
- port: 80
- localPort: 3000
+ # - resourceType: service
+ # resourceName: zoo-project-dru-kube-prome-prometheus
+ # namespace: zoo
+ # port: 9090
+ # localPort: 9090
+ # # Grafana
+ # - resourceType: service
+ # resourceName: zoo-project-dru-grafana
+ # namespace: zoo
+ # port: 80
+ # localPort: 3000
- resourceType: service
resourceName: zoo-project-dru-service
namespace: zoo
@@ -151,7 +154,7 @@ profiles:
port: 9000
localPort: 9000
- resourceType: service
- resourceName: s3-service
+ resourceName: s3-service-console
namespace: zoo
port: 9001
localPort: 9001
@@ -181,7 +184,7 @@ portForward:
port: 9000
localPort: 9000
- resourceType: service
- resourceName: s3-service
+ resourceName: s3-service-console
namespace: zoo
port: 9001
localPort: 9001
\ No newline at end of file
diff --git a/zoo-project-dru/Chart.yaml b/zoo-project-dru/Chart.yaml
index dba9e23..0f124ca 100644
--- a/zoo-project-dru/Chart.yaml
+++ b/zoo-project-dru/Chart.yaml
@@ -23,7 +23,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 0.7.5
+version: 0.8.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
@@ -40,24 +40,9 @@ keywords:
- stac
dependencies:
- - name: postgresql
- version: "16.6.0"
- repository: https://charts.bitnami.com/bitnami
- condition: postgresql.enabled
-
- - name: rabbitmq
- version: "15.4.1"
- repository: https://charts.bitnami.com/bitnami
- condition: broker.enabled
-
- - name: redis
- version: "20.11.4"
- repository: https://charts.bitnami.com/bitnami
- condition: redis.enabled
-
- name: minio
- version: "16.0.0"
- repository: https://charts.bitnami.com/bitnami
+ version: 5.4.0
+ repository: https://charts.min.io/
condition: minio.enabled
- name: keda
@@ -66,7 +51,7 @@ dependencies:
condition: keda.enabled
- name: kyverno
- version: "3.1.4"
+ version: "3.5.2"
repository: https://kyverno.github.io/kyverno/
condition: keda.kyverno.enabled
@@ -87,24 +72,29 @@ dependencies:
annotations:
artifacthub.io/changes: |
- - kind: changed
- description: "Update to the latest ZOO-Project version with eoap-cwlwrap and cwl2ogc support"
+ - kind: removed
+ description: "Remove bitnami/rabbitmq dependency and integrate the official rabbitmq Docker image"
links:
- - name: GitHub Commit
- url: https://github.com/ZOO-Project/ZOO-Project/commit/39b6fc74b6592b4584be5a0b2740dde11a1ffafc
- - kind: changed
- description: "Update the cwlwrapper-assets templates for stagein and stageout"
- - kind: added
- description: "Add stagein-file.yaml to the ConfigMap"
- - kind: changed
- description: "Update the postgresql scripts to add support for cwl2ogc schema definition"
- - kind: added
- description: "Add processes_profiles section to support profile links in the response headers"
+ - name: RabbitMQ Docker Hub
+ url: https://hub.docker.com/_/rabbitmq
+ - kind: removed
+ description: "Remove bitnami/postgresql dependency and integrate the official PostgreSQL Docker image"
+ links:
+ - name: PostgreSQL Docker Hub
+ url: https://hub.docker.com/_/postgres
+ - kind: removed
+ description: "Remove bitnami/redis dependency and integrate the official Redis Docker image"
+ links:
+ - name: Redis Docker Hub
+ url: https://hub.docker.com/_/redis
- kind: changed
- description: "Update the webui to use the latest nuxt-client image (0.0.3)"
+ description: "Migrate from bitnami/minio to the official one from https://charts.min.io/"
links:
- - name: DockerHub
- url: https://hub.docker.com/r/zooproject/nuxt-client/tags?name=0.0.3
+ - name: Artifact Hub
+ url: https://artifacthub.io/packages/helm/minio-official/minio
+ - kind: fixed
+ description: "Update documentation to reflect new dependencies and official Docker images integration"
+
artifacthub.io/links: |
- name: source
diff --git a/zoo-project-dru/README.md b/zoo-project-dru/README.md
index 5827734..6370959 100644
--- a/zoo-project-dru/README.md
+++ b/zoo-project-dru/README.md
@@ -15,6 +15,8 @@ This chart bootstraps a [ZOO-Project](http://zoo-project.org) deployment on a cl
* Kubernetes 1.19+
* Helm 3.2.0+
* PV provisioner support in the underlying infrastructure
+ * **Optional**: KEDA 2.14+ for autoscaling capabilities
+ * **Optional**: Kyverno 3.5+ for advanced pod protection
## Installing the Chart
@@ -22,7 +24,7 @@ To install the chart with the release name `my-zoo-project-dru`:
````bash
helm repo add zoo-project https://zoo-project.github.io/charts/
-helm install my-zoo-project-dru zoo-project/zoo-project-dru --version 0.7.5
+helm install my-zoo-project-dru zoo-project/zoo-project-dru --version 0.8.0
````
## Parameters
@@ -48,7 +50,7 @@ There are two persistent storage: `procServices` and `tmp`. The ZOO-Project uses
### Global parameters
-See the reference [PostgreSQL chart documentation](https://artifacthub.io/packages/helm/bitnami/postgresql#global-parameters) for other options.
+See the configuration parameters below for the official PostgreSQL Docker image integration.
| Name | Description | Value |
|:-------------------------------------------|:---------------------------------------------------------|:-------------------------|
@@ -88,55 +90,98 @@ global.postgresql.auth.existingSecret: postgresql-secret
If an environment variable for PostgreSQL is available from the ZOO-Kernel or ZOO-FPM pods, it means that the database setting will use these variables rather than the one defined in the `main.cfg` available from the configmap.
-### Dependencies
+### Dependency Management
+
+#### MinIO
+
+See the reference [MinIO chart documentation](https://artifacthub.io/packages/helm/minio-official/minio) for more informations.
+
+| Name | Description | Value |
+|:---------------------------------------------------------|:-----------------------------------|:----------------------------------------------------------------------|
+| minio.enabled | Enable MinIO for storage | false |
+| minio.mode | MinIO deployment mode | "standalone" |
+| minio.replicas | Number of MinIO replicas | 1 |
+| minio.rootUser | MinIO root username | "minio-admin" |
+| minio.rootPassword | MinIO root password | "minio-secret-password" |
+| minio.fullnameOverride | MinIO service name override | "s3-service" |
+| minio.buckets | Default buckets to create | [{"name": "eoepca", "policy": "none"}, {"name": "results", "policy": "none"}] |
+| minio.persistence.enabled | Enable MinIO persistence | true |
+| minio.persistence.size | MinIO storage size | "10Gi" |
+| minio.persistence.storageClass | Storage class for MinIO | "" |
+| minio.service.type | MinIO service type | "ClusterIP" |
+| minio.service.port | MinIO service port | 9000 |
+| minio.consoleService.port | MinIO console port | 9001 |
#### PostgreSQL
-See the reference [PostgreSQL chart documentation](https://artifacthub.io/packages/helm/bitnami/postgresql) for more parameters.
+This chart deploys PostgreSQL using the official [PostgreSQL Docker image](https://hub.docker.com/_/postgres).
+
| Name | Description | Value |
|:-------------------------------------------|:----------------------------------------------------------------|:-------------------------|
-| postgresql.defineEnvironmentVariables | Set it to true to get PostgreSQL environment variables defined | false |
-| postgresql.enabled | Is database used to store process execution status | true |
-| postgresql.primary.initdb.scriptsConfigMap | The init script config map | true |
-
-When `postgresql.defineEnvironmentVariables` is set to true, the environment variables for PostgreSQL (`PGHOST`,`PGPORT`,`PGUSER`,`PGPASSWORD`,`PGDATABASE`) will be defined for the ZOO-Kernel and the ZOO-FPM pods.
+| postgresql.enabled | Enable PostgreSQL deployment | true |
+| postgresql.name | Name of the PostgreSQL deployment | postgresql-db |
+| postgresql.serviceName | Name of the PostgreSQL service | postgresql-db-service |
+| postgresql.image.repository | PostgreSQL Docker image repository | postgres |
+| postgresql.image.tag | PostgreSQL Docker image tag | 16-alpine |
+| postgresql.image.pullPolicy | Image pull policy | IfNotPresent |
+| postgresql.resources.limits.cpu | CPU limit for PostgreSQL | 1000m |
+| postgresql.resources.limits.memory | Memory limit for PostgreSQL | 1Gi |
+| postgresql.resources.requests.cpu | CPU request for PostgreSQL | 250m |
+| postgresql.resources.requests.memory | Memory request for PostgreSQL | 256Mi |
+| postgresql.persistence.enabled | Enable persistent storage | true |
+| postgresql.persistence.size | Size of the persistent volume | 8Gi |
+| postgresql.persistence.accessMode | Access mode for the persistent volume | ReadWriteOnce |
+| postgresql.persistence.storageClass | Storage class for the persistent volume | "" |
+| postgresql.auth.createSecret | Automatically create a secret for PostgreSQL credentials | false |
+| postgresql.primary.initdb.scriptsConfigMap | ConfigMap containing initialization scripts | postgresql-primary-init-scripts |
+
+When `postgresql.enabled` is set to true, the environment variables for PostgreSQL (`PGHOST`,`PGPORT`,`PGUSER`,`PGPASSWORD`,`PGDATABASE`) will be defined for the ZOO-Kernel and the ZOO-FPM pods.
If an environment variable for PostgreSQL is available from the ZOO-Kernel or ZOO-FPM pods, it means that the database setting will use these variables rather than the one defined in the `main.cfg` available from the configmap.
#### RabbitMQ
-See the reference [RabbitMQ chart documentation](https://artifacthub.io/packages/helm/bitnami/rabbitmq) for more parameters.
+This chart now integrates RabbitMQ using the [official Docker image](https://hub.docker.com/_/rabbitmq).
+
| Name | Description | Value |
|:-------------------------------------------|:---------------------------------------------------------|:---------------------------------------------|
-| rabbitmq.auth.username | User that will be used to connect to RabbitMQ | RABBITMQ_USERNAME |
-| rabbitmq.auth.password | Password for the user | CHANGEME |
-| rabbitmq.loadDefinition.enabled | Enable loading a RabbitMQ definitions file to configure RabbitMQ | true |
-| rabbitmq.loadDefinition.existingSecret | Existing secret with the load definitions file | load-definition |
-| rabbitmq.extraConfiguration | Configuration file content: extra configuration to be appended to RabbitMQ configuration | load_definitions = /app/load_definition.json |
-
-#### MinIO
-
-See the reference [MinIO chart documentation](https://artifacthub.io/packages/helm/bitnami/minio) for more parameters.
-
-| Name | Description | Value |
-|:--------------|:-----------------------------------------------------|:------|
-| minio.enabled | Is MinIO used for storage in place of AWS | false |
-| minio.defaultBuckets | Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) | "processingresults" |
-| minio.fullnameOverride | String to fully override the MinIO's common.names.fullname template | "s3-service" |
-
+| rabbitmq.enabled | Enable integrated RabbitMQ deployment | true |
+| rabbitmq.image.repository | RabbitMQ image repository | rabbitmq |
+| rabbitmq.image.tag | RabbitMQ image tag | 4.1.4-alpine |
+| rabbitmq.auth.username | RabbitMQ default user | zoo |
+| rabbitmq.auth.password | RabbitMQ default password | CHANGEME |
+| rabbitmq.config | Override RabbitMQ configuration (if empty, uses files/rabbitmq/rabbitmq.conf) | "" |
+| rabbitmq.autoSetup.enabled | Enable automatic RabbitMQ configuration via HTTP API | true |
+| rabbitmq.autoSetup.ttlSecondsAfterFinished | Cleanup setup job after completion (seconds) | 30 |
+| rabbitmq.definitions | RabbitMQ definitions for queues, exchanges, bindings | Automatically templated |
#### Redis
-See the reference [Redis chart documentation](https://artifacthub.io/packages/helm/bitnami/redis) for more parameters.
-
-| Name | Description | Value |
-|:--------------|:-----------------------------------------------------|:------|
-| redis.enabled | Is Redis used by the current deployment | false |
-| redis.replica.replicaCount | Number of Redis replica | 1 |
-| redis.auth.enabled | Number of Redis replica | false |
+This chart deploys Redis using the official [Redis Docker image](https://hub.docker.com/_/redis).
+| Name | Description | Value |
+|:-------------------------------------------|:---------------------------------------------------------|:-------------------------|
+| redis.enabled | Enable Redis deployment | true |
+| redis.name | Name of the Redis deployment | redis-db |
+| redis.serviceName | Name of the Redis service | redis-db-service |
+| redis.port | Redis port | 6379 |
+| redis.image.repository | Redis Docker image repository | redis |
+| redis.image.tag | Redis Docker image tag | 7-alpine |
+| redis.image.pullPolicy | Image pull policy | IfNotPresent |
+| redis.auth.enabled | Enable Redis authentication | false |
+| redis.auth.password | Redis password (if auth enabled) | "" |
+| redis.resources.limits.cpu | CPU limit for Redis | 500m |
+| redis.resources.limits.memory | Memory limit for Redis | 512Mi |
+| redis.resources.requests.cpu | CPU request for Redis | 100m |
+| redis.resources.requests.memory | Memory request for Redis | 128Mi |
+| redis.persistence.enabled | Enable persistent storage | true |
+| redis.persistence.size | Size of the persistent volume | 4Gi |
+| redis.persistence.accessMode | Access mode for the persistent volume | ReadWriteOnce |
+| redis.persistence.storageClass | Storage class for the persistent volume | "" |
+
+For high-availability requirements, consider external Redis cluster solutions
### CookieCutter
@@ -156,13 +201,11 @@ See the reference [Redis chart documentation](https://artifacthub.io/packages/he
| zookernel.extraMountPoints | In case you add files in one or more `files/
` subdirectories and want to access them from the ZOO-Kernel | [] |
| zoofpm.extraMountPoints | In case you add files in one or more `files/` subdirectories and want to access them from the ZOO-FPM | [] |
-#### KEDA
+### KEDA
-KEDA (Kubernetes Event-driven Autoscaler) is used to provide event-driven autoscaling based on PostgreSQL and RabbitMQ metrics.
+KEDA (Kubernetes Event-driven Autoscaler) provides intelligent event-driven autoscaling with worker protection and scale-to-zero capabilities.
-##### KEDA Autoscaling
-
-KEDA provides event-driven autoscaling with intelligent worker protection and scale-to-zero capabilities.
+#### KEDA Autoscaling Configuration
| Name | Description | Value |
|:-------------------------------------------|:---------------------------------------------------------|:-------------------------|
@@ -171,16 +214,10 @@ KEDA provides event-driven autoscaling with intelligent worker protection and sc
| keda.maxReplicas | Maximum number of replicas | 10 |
| keda.pollingInterval | Interval for checking metrics (seconds) | 10 |
| keda.cooldownPeriod | Cooldown period after scaling (seconds) | 60 |
-| keda.triggers.postgresql.enabled | Enable PostgreSQL worker count trigger | true |
-| keda.triggers.postgresql.targetQueryValue | Target value for PostgreSQL query | 0.5 |
-| keda.triggers.postgresql.activationTargetQueryValue | Activation threshold for PostgreSQL query | 0.1 |
-| keda.triggers.rabbitmq.enabled | Enable RabbitMQ queue length trigger | true |
-| keda.triggers.rabbitmq.queueName | RabbitMQ queue name to monitor | zoo_service_queue |
-| keda.triggers.rabbitmq.value | Target queue length for scaling | 1 |
-#### PostgreSQL Trigger Configuration
+#### PostgreSQL Trigger
-The PostgreSQL trigger monitors both active services and workers using an intelligent query:
+Monitors active processing jobs and workers for intelligent scaling:
| Name | Description | Value |
|:--------------------------------------------|:---------------------------------------------------------------|:-------------------------|
@@ -192,21 +229,19 @@ The PostgreSQL trigger monitors both active services and workers using an intell
| keda.triggers.postgresql.dbName | Database name (uses global.postgresql.auth.database if empty) | "" |
| keda.triggers.postgresql.userName | Username (uses global.postgresql.auth.username if empty) | "" |
| keda.triggers.postgresql.sslmode | SSL mode for connection | "disable" |
-| keda.triggers.postgresql.query | Custom PostgreSQL query for scaling metrics | See below |
-
-#### RabbitMQ Trigger Configuration
+#### RabbitMQ Trigger
-The RabbitMQ trigger monitors queue length for immediate scaling:
+Monitors queue length for immediate scaling response:
-| Name | Description | Value |
-|:--------------------------------------------|:---------------------------------------------------------------|:-------------------------|
-| keda.triggers.rabbitmq.enabled | Enable RabbitMQ trigger for scaling | true |
-| keda.triggers.rabbitmq.protocol | RabbitMQ protocol | "amqp" |
-| keda.triggers.rabbitmq.queueName | Queue name to monitor | "zoo_service_queue" |
-| keda.triggers.rabbitmq.mode | Scaling mode | "QueueLength" |
-| keda.triggers.rabbitmq.value | Target messages per replica | "1" |
-| keda.triggers.rabbitmq.host | RabbitMQ host (auto-generated if empty) | "" |
+| Name | Description | Value |
+|:-------------------------------------------|:---------------------------------------------------------|:-------------------------|
+| keda.triggers.rabbitmq.enabled | Enable RabbitMQ queue length trigger | true |
+| keda.triggers.rabbitmq.queueName | RabbitMQ queue name to monitor | zoo_service_queue |
+| keda.triggers.rabbitmq.value | Target queue length for scaling | 1 |
+| keda.triggers.rabbitmq.host | RabbitMQ host (auto-generated if empty) | "" |
+| keda.triggers.rabbitmq.username | RabbitMQ username for trigger auth | zoo |
+| keda.triggers.rabbitmq.password | RabbitMQ password for trigger auth | CHANGEME |
#### KEDA Authentication
@@ -228,7 +263,7 @@ global:
When no existing secret is configured, KEDA uses values from `global.postgresql.auth.*` and creates a dedicated secret.
-##### KEDA Eviction Controller
+#### KEDA Eviction Controller
The eviction controller provides intelligent worker protection and pod annotation management.
@@ -243,7 +278,7 @@ The eviction controller provides intelligent worker protection and pod annotatio
| keda.evictionController.resources.limits.cpu | CPU resource limits | 200m |
| keda.evictionController.resources.limits.memory | Memory resource limits | 128Mi |
-##### Kyverno Integration
+#### Kyverno Integration
Kyverno provides admission-level protection for pods with active workers.
@@ -258,6 +293,28 @@ Kyverno provides admission-level protection for pods with active workers.
**Note:** Kyverno must be installed separately in the cluster. The chart only creates policy definitions.
+#### Quick Start with KEDA
+
+For rapid deployment with KEDA autoscaling capabilities:
+
+```bash
+# Deploy with Skaffold (includes KEDA operator installation)
+skaffold dev -p keda
+
+# Or manual Helm deployment (requires KEDA operator pre-installed)
+helm install zoo-project-dru ./zoo-project-dru \
+ --values ./zoo-project-dru/values_minikube.yaml \
+ --set keda.enabled=true \
+ --set keda.skipScaledObject=false \
+ --namespace zoo --create-namespace
+```
+
+**Important Configuration Notes**:
+- KEDA operator must be installed cluster-wide before enabling `keda.enabled=true`
+- Set `keda.skipScaledObject=false` to enable automatic ScaledObject creation
+- PostgreSQL triggers require worker tracking tables (automatically created)
+- Scale-to-zero works when `keda.minReplicas=0` (default)
+
#### Scaling Logic and Protection
The autoscaler implements a hybrid approach:
@@ -288,7 +345,6 @@ kubectl get events -n --sort-by='.lastTimestamp'
# Check worker protection status
kubectl get pods -n -o jsonpath='{range .items[*]}{.metadata.name}{": safe-to-evict="}{.metadata.annotations.cluster-autoscaler\.kubernetes\.io/safe-to-evict}{", workers="}{.metadata.annotations.zoo-project\.org/has-active-workers}{"\n"}{end}'
```
-
### Identity and Access Management
@@ -505,7 +561,7 @@ workflow:
- name: my-secret
````
-#### WES support
+### WES support
ZOO-Project-DRU can execute CWL workflows through the toil Workflow Execution Service (WES).
See [reference documentation](https://zoo-project.github.io/zoo-wes-runner/) for more informations.
@@ -517,7 +573,7 @@ See [reference documentation](https://zoo-project.github.io/zoo-wes-runner/) for
| workflow.inputs.WES_PASSWORD | The password to authenticate to access the WES | `"$$2y$$12$$ci.4U63YX83CwkyUrjqxAucnmi2xXOIlEF6T/KdP9824f1Rf1iyNG"` |
-#### Argo Workflows Support
+### Argo Workflows Support
ZOO-Project-DRU can execute CWL workflows through the official Argo Workflows chart (v3.7.1).
See [reference documentation](https://artifacthub.io/packages/helm/argo/argo-workflows) for more information.
@@ -589,7 +645,7 @@ See [reference documentation](https://artifacthub.io/packages/helm/argo/argo-eve
| argo-workflows.images.tag | Argo Workflows version | "v3.7.1" |
| argo-workflows.images.pullPolicy | Image pull policy | "IfNotPresent" |
-### Artifact Repository Configuration
+#### Artifact Repository Configuration
The chart automatically configures artifact storage using S3-compatible MinIO:
@@ -604,7 +660,7 @@ The chart automatically configures artifact storage using S3-compatible MinIO:
| argo-workflows.artifactRepository.s3.secretKeySecret.name | Secret name for S3 secret key | "s3-service" |
| argo-workflows.artifactRepository.s3.secretKeySecret.key | Secret key for S3 secret key | "root-password" |
-### Workflow Controller Configuration
+#### Workflow Controller Configuration
| Name | Description | Value |
|:---------------------------------------------------------|:-----------------------------------|:----------------------------------------------------------------------|
@@ -616,7 +672,7 @@ The chart automatically configures artifact storage using S3-compatible MinIO:
| argo-workflows.controller.clusterWorkflowTemplates.enabled | Enable cluster-wide workflow templates | false |
| argo-workflows.controller.workflowDefaults.spec.serviceAccountName | Default service account for workflows | "argo-workflow" |
-### Server Configuration
+#### Server Configuration
| Name | Description | Value |
|:---------------------------------------------------------|:-----------------------------------|:----------------------------------------------------------------------|
@@ -629,7 +685,7 @@ The chart automatically configures artifact storage using S3-compatible MinIO:
| argo-workflows.server.extraArgs | Additional server arguments | ["--namespaced"] |
| argo-workflows.server.clusterWorkflowTemplates.enabled | Enable cluster templates in server | false |
-### RBAC and Security
+#### RBAC and Security
| Name | Description | Value |
|:---------------------------------------------------------|:-----------------------------------|:----------------------------------------------------------------------|
@@ -638,22 +694,8 @@ The chart automatically configures artifact storage using S3-compatible MinIO:
| argo-workflows.crds.install | Install CRDs (disabled if already present) | false |
| argo-workflows.crds.keep | Keep CRDs on uninstall | true |
-### MinIO Integration
-The chart includes integrated MinIO for artifact storage:
-
-| Name | Description | Value |
-|:---------------------------------------------------------|:-----------------------------------|:----------------------------------------------------------------------|
-| minio.enabled | Enable MinIO for artifact storage | true |
-| minio.auth.rootUser | MinIO root username | "minio-admin" |
-| minio.auth.rootPassword | MinIO root password | "minio-secret-password" |
-| minio.persistence.enabled | Enable MinIO persistence | true |
-| minio.persistence.storageClass | Storage class for MinIO | "standard" |
-| minio.persistence.size | MinIO storage size | "2Gi" |
-| minio.defaultBuckets | Default buckets to create | "eoepca results" |
-| minio.fullnameOverride | MinIO service name override | "s3-service" |
-
-### Access Configuration
+#### Access Configuration
**Access the Argo Workflows UI**:
```bash
@@ -668,7 +710,7 @@ kubectl port-forward -n zoo svc/s3-service 9001:9001
# Credentials: minio-admin / minio-secret-password
```
-### Deployment Examples
+#### Deployment Examples
**Basic deployment with official Argo Workflows**:
```bash
@@ -694,11 +736,11 @@ argo-workflows:
bucket: "production-artifacts"
```
-## Argo Events Integration
+### Argo Events Integration
The chart includes optional Argo Events integration for real-time workflow monitoring and event-driven automation. This provides reactive capabilities that automatically respond to workflow state changes.
-### Overview
+#### Overview
Argo Events complements Argo Workflows by providing:
- **Real-time workflow monitoring**: Automatically capture workflow state changes (Running, Succeeded, Failed)
@@ -706,7 +748,7 @@ Argo Events complements Argo Workflows by providing:
- **Metrics integration**: Real-time updates to Prometheus metrics and Grafana dashboards
- **Webhook notifications**: Send notifications to external systems when workflows complete
-### Core Configuration
+#### Core Configuration
| Name | Description | Value |
|:---------------------------------------------------------|:-----------------------------------|:----------------------------------------------------------------------|
@@ -715,7 +757,7 @@ Argo Events complements Argo Workflows by providing:
| argo.events.webhook.port | Port for the webhook service | 8080 |
| argo.events.webhook.path | Path for webhook endpoint | "/webhook" |
-### Argo Events Chart Configuration
+#### Argo Events Chart Configuration
The chart uses the official Argo Events Helm chart (v2.4.8) to provide event-driven capabilities:
@@ -730,7 +772,7 @@ The chart uses the official Argo Events Helm chart (v2.4.8) to provide event-dri
| argo-events.controller.resources.limits.cpu | Controller CPU limits | "500m" |
| argo-events.controller.resources.limits.memory | Controller memory limits | "256Mi" |
-### EventSource Configuration
+#### EventSource Configuration
EventSources define what events to listen for. The chart automatically creates an EventSource for workflow monitoring:
@@ -756,7 +798,7 @@ workflow:
- Workflow completion (status: Succeeded)
- Workflow failure (status: Failed, Error)
-### Sensor Configuration
+#### Sensor Configuration
Sensors define what actions to take when events are received. The chart includes a webhook sensor for notifications:
@@ -767,7 +809,7 @@ Sensors define what actions to take when events are received. The chart includes
| argo.events.sensor.webhook.method | HTTP method for webhook calls | "POST" |
| argo.events.sensor.webhook.headers | Additional HTTP headers | {"Content-Type": "application/json"} |
-### EventBus Configuration
+#### EventBus Configuration
The EventBus handles event routing between EventSources and Sensors:
@@ -776,7 +818,7 @@ The EventBus handles event routing between EventSources and Sensors:
| argo-events.eventBusConfig.jetstream.versions | JetStream versions for EventBus | ["latest"] |
| argo-events.global.image.tag | Argo Events image tag | "v1.9.1" |
-### Monitoring Integration
+#### Monitoring Integration
When Argo Events is enabled with monitoring, additional metrics and dashboards are available:
@@ -787,7 +829,7 @@ When Argo Events is enabled with monitoring, additional metrics and dashboards a
| argo-events.controller.serviceMonitor.enabled | Enable ServiceMonitor for Prometheus discovery | true |
| argo-events.controller.serviceMonitor.additionalLabels | Additional labels for ServiceMonitor selection | {"release": "zoo-project-dru"} |
-### Real-time Dashboard Updates
+#### Real-time Dashboard Updates
With Argo Events enabled, Grafana dashboards receive real-time updates:
@@ -795,7 +837,7 @@ With Argo Events enabled, Grafana dashboards receive real-time updates:
2. **Event statistics**: Track event processing rates and success/failure ratios
3. **Latency monitoring**: Monitor time between workflow completion and notification delivery
-### Webhook Integration
+#### Webhook Integration
The chart includes a webhook service that receives event notifications:
@@ -828,7 +870,7 @@ spec:
}
```
-### RBAC Configuration
+#### RBAC Configuration
Argo Events requires specific permissions to monitor workflows:
@@ -839,7 +881,7 @@ Argo Events requires specific permissions to monitor workflows:
| configmaps | get, list, create, patch | EventBus configuration |
| secrets | get, list | EventBus authentication |
-### Troubleshooting
+#### Troubleshooting
**Check EventSource status**:
```bash
@@ -882,19 +924,19 @@ curl -X POST http://localhost:8080/webhook \
-d '{"test": "event", "workflowName": "test-workflow"}'
```
-### Security Considerations
+#### Security Considerations
- **Network policies**: Consider restricting EventBus network access
- **Authentication**: Use secrets for external webhook authentication
- **RBAC**: Apply principle of least privilege for service accounts
- **TLS**: Enable TLS for external webhook endpoints
-## Monitoring
+### Monitoring
The chart includes comprehensive monitoring capabilities using the Prometheus stack (Prometheus, Grafana, Alertmanager, and node-exporter) with real-time Argo Workflows integration.
See [reference documentation](https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack) for more information.
-### Core Monitoring Configuration
+#### Core Monitoring Configuration
| Name | Description | Value |
|:---------------------------------------------------------|:-----------------------------------|:----------------------------------------------------------------------|
@@ -910,6 +952,122 @@ See [reference documentation](https://artifacthub.io/packages/helm/prometheus-co
| monitoring.kube-prometheus-stack.grafana.persistence.size | Grafana storage size | "5Gi" |
| monitoring.kube-prometheus-stack.alertmanager.enabled | Enable Alertmanager for notifications | true |
+#### Helm Secret Size Limitations and Monitoring Deployment Strategies
+
+⚠️ **Important**: The complete monitoring stack (kube-prometheus-stack) adds ~670KB to the Helm secret, which can cause deployments to exceed Kubernetes' 1MB secret size limit, resulting in installation failures.
+
+##### Problem Description
+
+When deploying with full monitoring enabled (especially with Argo profiles), you may encounter this error:
+```
+Error: INSTALLATION FAILED: create: failed to create: Secret "sh.helm.release.v1.zoo-project-dru.v1" is invalid: data: Too long: must have at most 1048576 bytes
+```
+
+This happens because the monitoring stack includes:
+- **Grafana Dashboards**: ~400KB of dashboard configurations
+- **Prometheus Rules**: ~200KB of alerting and recording rules
+- **CRDs and Templates**: ~70KB of additional Kubernetes resources
+
+##### Recommended Solutions
+
+**Solution 1: Separate Monitoring Deployment (Recommended)**
+
+Deploy monitoring as a separate Helm release to avoid secret size issues:
+
+```bash
+# 1. Deploy ZOO-Project without integrated monitoring
+skaffold dev -p argo # Uses optimized values_argo.yaml with monitoring.enabled=false
+
+# 2. Deploy monitoring stack separately
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm install monitoring prometheus-community/kube-prometheus-stack \
+ --namespace monitoring --create-namespace \
+ --set grafana.adminPassword=admin \
+ --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false
+
+# 3. Access services
+kubectl port-forward -n zoo svc/zoo-project-dru-service 8080:80 # ZOO-Project
+kubectl port-forward -n zoo svc/zoo-project-dru-argo-workflows-server 2746:2746 # Argo UI
+kubectl port-forward -n monitoring svc/monitoring-grafana 3000:80 # Grafana
+```
+
+**Solution 2: Minimal Integrated Monitoring**
+
+Use a lightweight monitoring configuration that stays under the 1MB limit:
+
+```bash
+# Create optimized values file
+cat > values_argo_with_monitoring.yaml << EOF
+# Include all content from values_argo.yaml, then override:
+monitoring:
+ enabled: true
+ kube-prometheus-stack:
+ # Minimal Prometheus configuration
+ prometheus:
+ enabled: true
+ prometheusSpec:
+ retention: "7d"
+ storageSpec: null # Disable persistent storage
+ # Lightweight Grafana without default dashboards
+ grafana:
+ enabled: true
+ adminPassword: admin
+ defaultDashboardsEnabled: false # Saves ~400KB
+ sidecar:
+ dashboards:
+ enabled: false
+ # Disable heavy components
+ alertmanager:
+ enabled: false # Saves ~50KB
+ kube-state-metrics:
+ enabled: false # Saves ~100KB
+ prometheus-node-exporter:
+ enabled: false # Saves ~30KB
+ defaultRules:
+ create: false # Saves ~200KB
+EOF
+
+# Deploy with minimal monitoring
+helm install zoo-project-dru ./zoo-project-dru \
+ --values values_argo_with_monitoring.yaml \
+ --namespace zoo --create-namespace
+```
+
+**Solution 3: Post-Deployment Dashboard Addition**
+
+Add Grafana dashboards after the initial deployment:
+
+```bash
+# 1. Deploy with minimal monitoring (Solution 2)
+# 2. Add custom dashboards via ConfigMaps
+kubectl create configmap argo-workflows-dashboard \
+ --from-file=dashboard.json=files/argo-workflows/grafana-dashboard.json \
+ --namespace zoo
+kubectl label configmap argo-workflows-dashboard grafana_dashboard=1
+
+# Grafana will automatically detect and load the dashboard
+```
+
+##### Size Optimization Summary
+
+| Configuration | Helm Secret Size | Monitoring Level | Recommended Use |
+|:-------------|:-----------------|:-----------------|:----------------|
+| Full monitoring | ~970KB+ | Complete | Separate deployment |
+| Minimal monitoring | ~300KB | Basic metrics only | Integrated deployment |
+| No monitoring | ~240KB | None | Development only |
+| Separate deployment | ~240KB + separate | Complete | **Production** |
+
+##### Troubleshooting Size Issues
+
+Check your deployment size before installing:
+```bash
+# Check template size
+helm template zoo-project-dru ./zoo-project-dru \
+ --values ./zoo-project-dru/values_argo.yaml | wc -c
+
+# If > 900,000 bytes, consider using separate monitoring deployment
+```
+
### Prometheus Node Exporter Configuration
The node-exporter component is configured for compatibility with Docker Desktop and other development environments:
@@ -1019,13 +1177,13 @@ customConfig.main.mySection: |-
All these sections will be added to the `sections_list` from the `servicesNamespace` section.
-## Advanced Usage
+### Advanced Usage
-### Working with KEDA and Worker Protection
+#### Working with KEDA and Worker Protection
When KEDA is enabled, the system provides intelligent protection for running jobs:
-#### Monitoring Worker Status
+##### Monitoring Worker Status
Check the status of pods and their associated workers:
@@ -1040,7 +1198,7 @@ kubectl get scaledobjects -n zoo
kubectl logs -n zoo deployment/zoo-project-dru-eviction-controller --tail=50
```
-#### Understanding Protection Annotations
+##### Understanding Protection Annotations
The system uses several annotations to manage pod protection:
@@ -1049,7 +1207,7 @@ The system uses several annotations to manage pod protection:
- `zoo-project.org/last-check`: Timestamp of last worker status check
- `zoo-project.org/emergency-delete`: Emergency override for forced deletion
-#### Emergency Pod Deletion
+##### Emergency Pod Deletion
If you need to force delete a protected pod:
@@ -1061,7 +1219,7 @@ kubectl annotate pod -n zoo zoo-project.org/emergency-delete=true
kubectl delete pod -n zoo
```
-#### Scaling Behavior
+##### Scaling Behavior
The system implements intelligent scaling:
@@ -1070,33 +1228,6 @@ The system implements intelligent scaling:
3. **Scale-down**: Only pods without active workers can be terminated
4. **Scale-to-zero**: When no workers are active, all pods can be terminated after grace period
-### Troubleshooting
-
-#### Common Issues
-
-**Pods not scaling to zero:**
-```bash
-# Check if pods have active workers
-kubectl describe pods -n zoo -l app.kubernetes.io/name=zoo-project-dru-zoofpm
-
-# Check ScaledObject configuration
-kubectl describe scaledobject -n zoo
-
-# Verify eviction controller is running
-kubectl get pods -n zoo -l app.kubernetes.io/component=eviction-controller
-```
-
-**Protection not working:**
-```bash
-# Check Kyverno policies (if enabled)
-kubectl get clusterpolicy
-
-# Test pod deletion (dry run)
-kubectl delete pod -n zoo --dry-run=server
-
-# Check eviction controller permissions
-kubectl auth can-i patch pods --as=system:serviceaccount:zoo:zoo-project-dru-eviction-controller -n zoo
-```
### Notification using Knative
@@ -1214,3 +1345,76 @@ Then, using the command below, you can get the pod name to access its log (using
````bash
kubectl get pods -n default
````
+
+## Troubleshooting
+
+### Common Migration Issues
+
+**Helm secret size limit exceeded**:
+```bash
+# Error: Secret "sh.helm.release.v1.zoo-project-dru.v1" is invalid:
+# data: Too long: must have at most 1048576 bytes
+
+# Common causes: Full monitoring stack (~670KB), Large dashboards, Complex configurations
+
+# Solution 1: Use optimized values with disabled optional components
+helm install zoo-project-dru ./zoo-project-dru \
+ --values ./zoo-project-dru/values_minikube.yaml \
+ --namespace zoo --create-namespace
+
+# Solution 2: Deploy monitoring separately (recommended for full features)
+# See "Helm Secret Size Limitations and Monitoring Deployment Strategies" section above
+```
+
+**PostgreSQL connection issues after migration**:
+```bash
+# Check if PostgreSQL is running with new image
+kubectl get pods -n zoo -l app.kubernetes.io/name=zoo-project-dru-postgresql
+
+# Check initialization logs
+kubectl logs -n zoo deployment/zoo-project-dru-postgresql --tail=50
+
+# Verify database creation
+kubectl exec -it -n zoo deployment/zoo-project-dru-postgresql -- psql -U zoo -d zoo -c "\dt"
+```
+
+**RabbitMQ setup issues**:
+```bash
+# Check auto-setup job completion
+kubectl get jobs -n zoo -l app.kubernetes.io/component=rabbitmq-setup
+
+# Check management plugin status
+kubectl port-forward -n zoo svc/zoo-project-dru-rabbitmq 15672:15672
+# Access: http://localhost:15672 (zoo/CHANGEME)
+
+# Verify queue creation
+kubectl logs -n zoo -l app.kubernetes.io/component=rabbitmq-setup
+```
+
+### KEDA-Specific Issues
+
+#### Common KEDA Issues
+
+**Pods not scaling to zero:**
+```bash
+# Check if pods have active workers
+kubectl describe pods -n zoo -l app.kubernetes.io/name=zoo-project-dru-zoofpm
+
+# Check ScaledObject configuration
+kubectl describe scaledobject -n zoo
+
+# Verify eviction controller is running
+kubectl get pods -n zoo -l app.kubernetes.io/component=eviction-controller
+```
+
+**Protection not working:**
+```bash
+# Check Kyverno policies (if enabled)
+kubectl get clusterpolicy
+
+# Test pod deletion (dry run)
+kubectl delete pod -n zoo --dry-run=server
+
+# Check eviction controller permissions
+kubectl auth can-i patch pods --as=system:serviceaccount:zoo:zoo-project-dru-eviction-controller -n zoo
+```
diff --git a/zoo-project-dru/files/argo-events/controller-config.yaml b/zoo-project-dru/files/argo-events/controller-config.yaml
new file mode 100644
index 0000000..a3429b5
--- /dev/null
+++ b/zoo-project-dru/files/argo-events/controller-config.yaml
@@ -0,0 +1,48 @@
+eventBus:
+ nats:
+ versions:
+ - version: 0.22.1
+ natsStreamingImage: nats-streaming:0.22.1
+ metricsExporterImage: natsio/prometheus-nats-exporter:0.8.0
+ jetstream:
+ # Default JetStream settings, could be overridden by EventBus JetStream specs
+ settings: |
+ # https://docs.nats.io/running-a-nats-service/configuration#jetstream
+ # Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded.
+ # e.g. 1G. -1 means no limit, up to 75% of available memory
+ max_memory_store: -1
+ # e.g. 20G. -1 means no limit, Up to 1TB if available
+ max_file_store: 1TB
+ streamConfig: |
+ # The default properties of the streams to be created in this JetStream service
+ maxMsgs: 50000
+ maxAge: 168h
+ maxBytes: -1
+ replicas: 3
+ duplicates: 300s
+ versions:
+ - version: latest
+ natsImage: nats:2.9.16
+ metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
+ configReloaderImage: natsio/nats-server-config-reloader:0.7.0
+ startCommand: /nats-server
+ - version: 2.8.1
+ natsImage: nats:2.8.1
+ metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
+ configReloaderImage: natsio/nats-server-config-reloader:0.7.0
+ startCommand: /nats-server
+ - version: 2.8.1-alpine
+ natsImage: nats:2.8.1-alpine
+ metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
+ configReloaderImage: natsio/nats-server-config-reloader:0.7.0
+ startCommand: nats-server
+ - version: 2.8.2
+ natsImage: nats:2.8.2
+ metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
+ configReloaderImage: natsio/nats-server-config-reloader:0.7.0
+ startCommand: /nats-server
+logging:
+ level: info
+metrics:
+ enabled: true
+ port: 7777
diff --git a/zoo-project-dru/files/bin/publish.py b/zoo-project-dru/files/bin/publish.py
index 19f5a96..35a14bb 100755
--- a/zoo-project-dru/files/bin/publish.py
+++ b/zoo-project-dru/files/bin/publish.py
@@ -53,7 +53,7 @@
if "ZOO_REDIS_HOST" in os.environ:
r = redis.Redis(host=os.environ["ZOO_REDIS_HOST"], port=6379, db=0)
else:
- r = redis.Redis(host='{{ .Release.Name }}-redis-master', port=6379, db=0)
+ r = redis.Redis(host='{{ .Values.redis.serviceName }}', port=6379, db=0)
print(params,file=sys.stderr)
r.publish(params["jobid"][0],data)
print(data,file=sys.stderr)
diff --git a/zoo-project-dru/files/bin/subscriber.py b/zoo-project-dru/files/bin/subscriber.py
index 0c70a7b..d324f65 100755
--- a/zoo-project-dru/files/bin/subscriber.py
+++ b/zoo-project-dru/files/bin/subscriber.py
@@ -18,7 +18,7 @@
if "ZOO_REDIS_HOST" in os.environ:
r = redis.Redis(host=os.environ["ZOO_REDIS_HOST"], port=6379, db=0)
else:
- r = redis.Redis(host='{{ .Release.Name }}-redis-master', port=6379, db=0)
+ r = redis.Redis(host='{{ .Values.redis.serviceName }}', port=6379, db=0)
def send(t):
# send string to web page
diff --git a/zoo-project-dru/files/keda/postgresql-scaler-query.sql b/zoo-project-dru/files/keda/postgresql-scaler-query.sql
new file mode 100644
index 0000000..2e653b2
--- /dev/null
+++ b/zoo-project-dru/files/keda/postgresql-scaler-query.sql
@@ -0,0 +1,62 @@
+-- KEDA PostgreSQL Scaler Query
+-- Hybrid scaling logic: active workers AND pending services
+-- STRICT query - Absolute protection of pods by IP
+-- GUARANTEE: No pod with its IP in workers.host can be deleted
+
+WITH active_worker_ips AS (
+ -- IPs of pods that have active workers - ABSOLUTE PROTECTION
+ SELECT DISTINCT host as protected_ip
+ FROM workers
+ WHERE status = 1 AND host IS NOT NULL
+),
+protected_count AS (
+ -- MINIMUM number of pods to keep (those with workers)
+ SELECT COUNT(*) as must_keep_minimum
+ FROM active_worker_ips
+),
+workload_needs AS (
+ -- Calculation based on total workload
+ SELECT
+ SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END) as active_workers,
+ -- Pods needed for workload (async_worker=10 per pod)
+ -- Scale-to-zero if no active workers
+ CASE
+ WHEN SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END) > 0
+ THEN GREATEST(CEIL(SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END)::decimal / 10), 1)
+ ELSE 0 -- Scale-to-zero if no workers
+ END as pods_for_workload
+ FROM workers
+ WHERE host IS NOT NULL
+),
+service_needs AS (
+ -- Pods needed for running services
+ SELECT
+ COUNT(*) as running_services,
+ -- Only if workers exist AND services
+ CASE
+ WHEN EXISTS(SELECT 1 FROM workers WHERE status = 1 AND host IS NOT NULL)
+ AND COUNT(*) > 0
+ THEN GREATEST(CEIL(COUNT(*)::decimal / 10), 1)
+ ELSE 0 -- Scale-to-zero if no active workers
+ END as pods_for_services
+ FROM services
+ WHERE end_time IS NULL AND fstate NOT IN ('Succeeded', 'Failed')
+)
+SELECT
+ -- Hybrid logic: active workers AND pending services
+ GREATEST(
+ -- Calculation based on active workers (1 pod per 10 workers)
+ CASE
+ WHEN (SELECT COUNT(*) FROM workers WHERE status = 1 AND host IS NOT NULL) > 0
+ THEN CEIL((SELECT COUNT(*)::decimal FROM workers WHERE status = 1 AND host IS NOT NULL) / 10)
+ ELSE 0
+ END,
+ -- Calculation based on active services (1 pod per 5 services)
+ CASE
+ WHEN (SELECT COUNT(*) FROM services WHERE end_time IS NULL AND fstate NOT IN ('Succeeded', 'Failed')) > 0
+ THEN CEIL((SELECT COUNT(*)::decimal FROM services WHERE end_time IS NULL AND fstate NOT IN ('Succeeded', 'Failed')) / 5)
+ ELSE 0
+ END,
+ -- Scale-to-zero if no activity
+ 0
+ )
\ No newline at end of file
diff --git a/zoo-project-dru/files/postgres/zoo_collectiondb.sql b/zoo-project-dru/files/postgres/zoo_collectiondb.sql
index 7914d37..f2c72cc 100644
--- a/zoo-project-dru/files/postgres/zoo_collectiondb.sql
+++ b/zoo-project-dru/files/postgres/zoo_collectiondb.sql
@@ -196,7 +196,7 @@ INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace,sch
INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace,schemaUrl) VALUES ('geojson-geometry','MultiPolygon','geojson.yaml','https://geojson.org/schema/Geometry.json');
INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace,schemaUrl) VALUES ('geojson-geometry','GeometryCollection','geojson.yaml','https://geojson.org/schema/GeometryCollection.json');
-- It is not clear if the following short_name should be bound to a cwl_type
-INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace) VALUES ('epsg-code',NULL);
+INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace) VALUES ('epsg-code',NULL,NULL);
INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace) VALUES ('wkt2-def',NULL,NULL);
INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace) VALUES ('cql2-text',NULL,NULL);
INSERT INTO CollectionDB.PrimitiveDataFormats (short_name,cwl_type,namespace) VALUES ('cql2-json',NULL,NULL);
diff --git a/zoo-project-dru/files/rabbitmq/definitions.json.tpl b/zoo-project-dru/files/rabbitmq/definitions.json.tpl
new file mode 100644
index 0000000..d9ff770
--- /dev/null
+++ b/zoo-project-dru/files/rabbitmq/definitions.json.tpl
@@ -0,0 +1,100 @@
+{
+ "rabbit_version": "3.8.9",
+ "rabbitmq_version": "3.8.9",
+ "product_name": "RabbitMQ",
+ "product_version": "3.8.9",
+ "users": [
+ {
+ "name": "{{ .Values.rabbitmq.auth.username }}",
+ "password": "{{ .Values.rabbitmq.auth.password }}",
+ "tags": "administrator"
+ }
+ ],
+ "vhosts": [
+ {
+ "name": "{{ .Values.rabbitmq.auth.vhost }}"
+ }
+ ],
+ "permissions": [
+ {
+ "user": "{{ .Values.rabbitmq.auth.username }}",
+ "vhost": "{{ .Values.rabbitmq.auth.vhost }}",
+ "configure": ".*",
+ "write": ".*",
+ "read": ".*"
+ }
+ ],
+ "topic_permissions": [],
+ "parameters": [],
+ "global_parameters": [
+ {
+ "name": "cluster_name",
+ "value": "rabbit@{{ include "zoo-project-dru.fullname" . }}-rabbitmq"
+ },
+ {
+ "name": "internal_cluster_id",
+ "value": "rabbitmq-cluster-id-{{ randAlphaNum 32 }}"
+ }
+ ],
+ "policies": [],
+ "queues": [
+ {
+ "name": "zoo_service_queue",
+ "vhost": "{{ .Values.rabbitmq.auth.vhost }}",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+ "x-queue-type": "classic"
+ }
+ },
+ {
+ "name": "unroutable_messages_queue",
+ "vhost": "{{ .Values.rabbitmq.auth.vhost }}",
+ "durable": true,
+ "auto_delete": false,
+ "arguments": {
+ "x-queue-type": "classic"
+ }
+ }
+ ],
+ "exchanges": [
+ {
+ "name": "main_exchange",
+ "vhost": "{{ .Values.rabbitmq.auth.vhost }}",
+ "type": "direct",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {
+ "alternate-exchange": "unroutable_exchange"
+ }
+ },
+ {
+ "name": "unroutable_exchange",
+ "vhost": "{{ .Values.rabbitmq.auth.vhost }}",
+ "type": "fanout",
+ "durable": true,
+ "auto_delete": false,
+ "internal": false,
+ "arguments": {}
+ }
+ ],
+ "bindings": [
+ {
+ "source": "main_exchange",
+ "vhost": "{{ .Values.rabbitmq.auth.vhost }}",
+ "destination": "zoo_service_queue",
+ "destination_type": "queue",
+ "routing_key": "zoo",
+ "arguments": {}
+ },
+ {
+ "source": "unroutable_exchange",
+ "vhost": "{{ .Values.rabbitmq.auth.vhost }}",
+ "destination": "unroutable_messages_queue",
+ "destination_type": "queue",
+ "routing_key": "",
+ "arguments": {}
+ }
+ ]
+}
diff --git a/zoo-project-dru/files/rabbitmq/rabbitmq.conf b/zoo-project-dru/files/rabbitmq/rabbitmq.conf
new file mode 100644
index 0000000..7d9ae92
--- /dev/null
+++ b/zoo-project-dru/files/rabbitmq/rabbitmq.conf
@@ -0,0 +1,34 @@
+# RabbitMQ Configuration for ZOO-Project-DRU
+# This file contains the default RabbitMQ configuration for the ZOO-Project deployment
+#
+# To override this configuration, you can set the 'rabbitmq.config' value in values.yaml
+# If 'rabbitmq.config' is set, it will take precedence over this file
+
+## Management plugin - enables the web UI and HTTP API
+management.tcp.port = 15672
+
+## Disable disk free limit for development environments
+# In production, consider setting a more appropriate value like 2GB or 5GB
+disk_free_limit.absolute = 1GB
+
+## Enable console logging for better debugging
+log.console = true
+log.console.level = info
+
+## Default user configuration for ZOO-Project
+# These credentials must match the values in values.yaml under rabbitmq.auth
+default_user = zoo
+default_pass = CHANGEME
+default_vhost = /
+default_user_tags.administrator = true
+
+## Default permissions for the zoo user
+default_permissions.configure = .*
+default_permissions.read = .*
+default_permissions.write = .*
+
+## Additional configuration for production (commented out for development)
+# heartbeat = 60
+# frame_max = 131072
+# channel_max = 2047
+# collect_statistics_interval = 5000
\ No newline at end of file
diff --git a/zoo-project-dru/files/zoo-project/default.conf b/zoo-project-dru/files/zoo-project/default.conf
index afd7d65..6566477 100644
--- a/zoo-project-dru/files/zoo-project/default.conf
+++ b/zoo-project-dru/files/zoo-project/default.conf
@@ -26,7 +26,7 @@
SetEnv MAPSERVER_CONFIG_FILE /mapserver/etc/mapserver.conf
SetEnv ap_trust_cgilike_cl 1
PassEnv ZOO_REDIS_HOST
- {{- if or (.Values.postgresql.defineEnvironmentVariables) (.Values.global.postgresql.auth.existingSecret) }}
+ {{- if or (.Values.postgresql.enabled) (.Values.global.postgresql.auth.existingSecret) }}
# Configuration for the PostgreSQL database connection
PassEnv PGUSER
PassEnv PGPASSWORD
diff --git a/zoo-project-dru/files/zoo-project/htaccess b/zoo-project-dru/files/zoo-project/htaccess
index f2ec79f..967fa8b 100644
--- a/zoo-project-dru/files/zoo-project/htaccess
+++ b/zoo-project-dru/files/zoo-project/htaccess
@@ -1,6 +1,6 @@
{{- $hosturl := include "zoo-project-dru.localhosturl" . -}}
RewriteEngine On
-{{- if or (.Values.global.postgresql.auth.existingSecret) (.Values.postgresql.defineEnvironmentVariables) }}
+{{- if or (.Values.global.postgresql.auth.existingSecret) (.Values.postgresql.enabled) }}
PassEnv PGUSER
PassEnv PGPASSWORD
PassEnv PGHOST
diff --git a/zoo-project-dru/files/zoo-project/main.cfg b/zoo-project-dru/files/zoo-project/main.cfg
index 681c166..e1d1fd0 100644
--- a/zoo-project-dru/files/zoo-project/main.cfg
+++ b/zoo-project-dru/files/zoo-project/main.cfg
@@ -59,7 +59,7 @@ dbname=zoo
port=5432
user=zoo
password=zoo
-host={{ .Release.Name }}-postgresql
+host={{ include "zoo-project-dru.postgresql.servicename" . }}
type=PG
schema=public
@@ -68,7 +68,7 @@ dbname=zoo
port=5432
user=zoo
password=zoo
-host={{ .Release.Name }}-postgresql
+host={{ include "zoo-project-dru.postgresql.servicename" . }}
type=PG
schema=public
@@ -135,8 +135,8 @@ Access-Control-Allow-Credentials=true
[rabbitmq]
host={{ .Release.Name }}-rabbitmq
port=5672
-user=guest
-passwd=guest
+user={{ .Values.rabbitmq.auth.username }}
+passwd={{ .Values.rabbitmq.auth.password }}
exchange=main_exchange
routingkey=zoo
queue=zoo_service_queue
diff --git a/zoo-project-dru/templates/NOTES.txt b/zoo-project-dru/templates/NOTES.txt
index 7b04c95..46307cc 100644
--- a/zoo-project-dru/templates/NOTES.txt
+++ b/zoo-project-dru/templates/NOTES.txt
@@ -56,12 +56,12 @@
📦 MinIO Object Storage:
kubectl port-forward -n {{ .Release.Namespace }} svc/s3-service 9001:9001
Open: http://localhost:9001
- Username: {{ .Values.minio.auth.rootUser | default "minio-admin" }}
+ Username: {{ .Values.minio.rootUser | default "minioadmin" }}
Password: (check secret s3-service)
# Access MinIO credentials
- kubectl get secret s3-service -n {{ .Release.Namespace }} -o jsonpath='{.data.root-user}' | base64 --decode
- kubectl get secret s3-service -n {{ .Release.Namespace }} -o jsonpath='{.data.root-password}' | base64 --decode
+ kubectl get secret s3-service -n {{ .Release.Namespace }} -o jsonpath='{.data.rootUser}' | base64 --decode
+ kubectl get secret s3-service -n {{ .Release.Namespace }} -o jsonpath='{.data.rootPassword}' | base64 --decode
{{- end }}
diff --git a/zoo-project-dru/templates/_argo-token-init.tpl b/zoo-project-dru/templates/_argo-token-init.tpl
index 21a6c87..4c21a57 100644
--- a/zoo-project-dru/templates/_argo-token-init.tpl
+++ b/zoo-project-dru/templates/_argo-token-init.tpl
@@ -4,7 +4,7 @@
initContainers:
- name: argo-token-retriever
- image: bitnami/kubectl:latest
+ image: alpine/k8s:1.31.13
command:
- /bin/bash
- -c
diff --git a/zoo-project-dru/templates/_helpers.tpl b/zoo-project-dru/templates/_helpers.tpl
index ea7c376..66979bf 100644
--- a/zoo-project-dru/templates/_helpers.tpl
+++ b/zoo-project-dru/templates/_helpers.tpl
@@ -1,4 +1,4 @@
-x\{{/*
+{{/*
Expand the name of the chart.
*/}}
{{- define "zoo-project-dru.name" -}}
@@ -6,7 +6,20 @@ Expand the name of the chart.
{{- end }}
{{/*
-Create a default fully qualified app name.
+Redis service name
+*/}}
+{{- define "zoo-project-dru.redis.servicename" -}}
+{{- include "zoo-project-dru.fullname" . }}-redis-service
+{{- end }}
+
+{{/*
+RabbitMQ Service name
+*/}}
+{{- define "zoo-project-dru.rabbitmq.serviceName" -}}
+{{- include "zoo-project-dru.fullname" . }}-rabbitmq
+{{- end }}
+
+{{/* fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
@@ -121,12 +134,67 @@ Argo Workflows MinIO endpoint helper
Argo Workflows MinIO access key helper
*/}}
{{- define "zoo-project-dru.argo.minio.accessKey" -}}
-{{- .Values.minio.auth.rootUser | default "minio-admin" }}
+{{- .Values.minio.rootUser | default "minio-admin" }}
{{- end }}
{{/*
Argo Workflows MinIO secret key helper
*/}}
{{- define "zoo-project-dru.argo.minio.secretKey" -}}
-{{- .Values.minio.auth.rootPassword | default "minio-secret-password" }}
+{{- .Values.minio.rootPassword | default "minio-secret-password" }}
+{{- end }}
+
+{{/*
+RabbitMQ readiness init container
+This template creates an init container that waits for RabbitMQ to be ready
+with management API and definitions loaded.
+*/}}
+{{- define "zoo-project-dru.rabbitmq.initContainer" -}}
+- name: init-wait-for-dependencies-{{ .componentName }}
+ image: curlimages/curl:latest
+ imagePullPolicy: IfNotPresent
+ command: [ "/bin/sh" ]
+ args:
+ - -c
+ - |
+ set -e
+ echo "Waiting for RabbitMQ to be ready with management API and definitions loaded..."
+
+ while true; do
+ # Check if RabbitMQ management API is accessible
+ if curl -f -u {{ .Values.rabbitmq.auth.username }}:{{ .Values.rabbitmq.auth.password }} \
+ http://{{ include "zoo-project-dru.rabbitmq.serviceName" . }}:15672/api/overview >/dev/null 2>&1; then
+
+ # Check if both zoo_service_queue and unroutable_messages_queue exist
+ if curl -f -u {{ .Values.rabbitmq.auth.username }}:{{ .Values.rabbitmq.auth.password }} \
+ http://{{ include "zoo-project-dru.rabbitmq.serviceName" . }}:15672/api/queues/%2F/zoo_service_queue >/dev/null 2>&1 && \
+ curl -f -u {{ .Values.rabbitmq.auth.username }}:{{ .Values.rabbitmq.auth.password }} \
+ http://{{ include "zoo-project-dru.rabbitmq.serviceName" . }}:15672/api/queues/%2F/unroutable_messages_queue >/dev/null 2>&1; then
+ echo "RabbitMQ is fully ready!"
+ break
+ else
+ echo "RabbitMQ is up but zoo_service_queue not created yet..."
+ fi
+ else
+ echo "Waiting for RabbitMQ management API..."
+ fi
+ sleep 5
+ done
+ env:
+ - name: ZOO_RABBITMQ_HOST
+ value: {{ include "zoo-project-dru.rabbitmq.serviceName" . }}
+{{- end }}
+
+{{/*
+PostgreSQL service name
+*/}}
+{{- define "zoo-project-dru.postgresql.servicename" -}}
+{{- include "zoo-project-dru.fullname" . }}-postgresql-service
+{{- end }}
+
+{{/*
+KEDA PostgreSQL query ConfigMap name
+*/}}
+{{- define "zoo-project-dru.keda.postgresql.configmap" -}}
+{{- include "zoo-project-dru.fullname" . }}-keda-postgresql-query
{{- end }}
diff --git a/zoo-project-dru/templates/argo-events-config.yaml b/zoo-project-dru/templates/argo-events-config.yaml
index 4266e42..de71f4c 100644
--- a/zoo-project-dru/templates/argo-events-config.yaml
+++ b/zoo-project-dru/templates/argo-events-config.yaml
@@ -8,52 +8,5 @@ metadata:
app.kubernetes.io/component: argo-events
data:
controller-config.yaml: |
- eventBus:
- nats:
- versions:
- - version: 0.22.1
- natsStreamingImage: nats-streaming:0.22.1
- metricsExporterImage: natsio/prometheus-nats-exporter:0.8.0
- jetstream:
- # Default JetStream settings, could be overridden by EventBus JetStream specs
- settings: |
- # https://docs.nats.io/running-a-nats-service/configuration#jetstream
- # Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded.
- # e.g. 1G. -1 means no limit, up to 75% of available memory
- max_memory_store: -1
- # e.g. 20G. -1 means no limit, Up to 1TB if available
- max_file_store: 1TB
- streamConfig: |
- # The default properties of the streams to be created in this JetStream service
- maxMsgs: 50000
- maxAge: 168h
- maxBytes: -1
- replicas: 3
- duplicates: 300s
- versions:
- - version: latest
- natsImage: nats:2.9.16
- metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
- configReloaderImage: natsio/nats-server-config-reloader:0.7.0
- startCommand: /nats-server
- - version: 2.8.1
- natsImage: nats:2.8.1
- metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
- configReloaderImage: natsio/nats-server-config-reloader:0.7.0
- startCommand: /nats-server
- - version: 2.8.1-alpine
- natsImage: nats:2.8.1-alpine
- metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
- configReloaderImage: natsio/nats-server-config-reloader:0.7.0
- startCommand: nats-server
- - version: 2.8.2
- natsImage: nats:2.8.2
- metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1
- configReloaderImage: natsio/nats-server-config-reloader:0.7.0
- startCommand: /nats-server
- logging:
- level: info
- metrics:
- enabled: true
- port: 7777
+{{ .Files.Get "files/argo-events/controller-config.yaml" | indent 4 }}
{{ end }}
diff --git a/zoo-project-dru/templates/argo-post-install.yaml b/zoo-project-dru/templates/argo-post-install.yaml
index df2dedf..f92b5be 100644
--- a/zoo-project-dru/templates/argo-post-install.yaml
+++ b/zoo-project-dru/templates/argo-post-install.yaml
@@ -23,7 +23,7 @@ spec:
serviceAccountName: {{ .Values.argo.serviceAccount.name }}
containers:
- name: post-install
- image: bitnami/kubectl:1.28
+ image: alpine/k8s:1.31.13
command:
- /bin/bash
- -c
diff --git a/zoo-project-dru/templates/cm-cwlwrapper-assets.yaml b/zoo-project-dru/templates/cm-cwlwrapper-assets.yaml
index dd990db..13a1596 100644
--- a/zoo-project-dru/templates/cm-cwlwrapper-assets.yaml
+++ b/zoo-project-dru/templates/cm-cwlwrapper-assets.yaml
@@ -1,3 +1,4 @@
+{{ if not .Values.argo.enabled}}
apiVersion: v1
kind: ConfigMap
metadata:
@@ -12,3 +13,4 @@ data:
{{ $filename }}: |-
{{- $fileOverride | default ($files.Get $path) | nindent 4 }}
{{- end }}
+{{- end }}
diff --git a/zoo-project-dru/templates/cm-openapi-config.yaml b/zoo-project-dru/templates/cm-openapi-config.yaml
index b80b404..f140ea3 100644
--- a/zoo-project-dru/templates/cm-openapi-config.yaml
+++ b/zoo-project-dru/templates/cm-openapi-config.yaml
@@ -1,6 +1,8 @@
+{{- if .Values.zoo.openapi.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-openapi-config
data:
{{- (.Files.Glob "files/openapi-config/*").AsConfig | nindent 2 }}
+{{- end }}
diff --git a/zoo-project-dru/templates/dp-websocketd.yaml b/zoo-project-dru/templates/dp-websocketd.yaml
index 1ae95d6..c6fc598 100644
--- a/zoo-project-dru/templates/dp-websocketd.yaml
+++ b/zoo-project-dru/templates/dp-websocketd.yaml
@@ -38,10 +38,10 @@ spec:
image: docker.io/wshihadeh/wait_for:latest
imagePullPolicy: IfNotPresent
command: [ "/docker-entrypoint.sh" ]
- args: [ "wait_for", "redis:{{ .Release.Name }}-redis-master"]
+ args: [ "wait_for", "redis:{{ include "zoo-project-dru.redis.servicename" . }}"]
env:
- - name: ZOO_REDI_HOST
- value: {{ .Release.Name }}-redis-master
+ - name: ZOO_REDIS_HOST
+ value: {{ include "zoo-project-dru.redis.servicename" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
@@ -52,7 +52,7 @@ spec:
imagePullPolicy: {{ .Values.websocketd.pullPolicy }}
env:
- name: ZOO_REDIS_HOST
- value: {{ .Release.Name }}-redis-master
+ value: {{ include "zoo-project-dru.redis.servicename" . }}
ports:
- name: ws
containerPort: 8888
diff --git a/zoo-project-dru/templates/dp-zoofpm.yaml b/zoo-project-dru/templates/dp-zoofpm.yaml
index 3406757..e977790 100644
--- a/zoo-project-dru/templates/dp-zoofpm.yaml
+++ b/zoo-project-dru/templates/dp-zoofpm.yaml
@@ -36,7 +36,9 @@ spec:
checksum/cm-cwlwrapper-assets: {{ include (print .Template.BasePath "/cm-cwlwrapper-assets.yaml") . | sha256sum }}
checksum/cm-zoo-deploy-service-config: {{ include (print .Template.BasePath "/cm-zoo-deploy-service-config.yaml") . | sha256sum }}
checksum/cm-zoo-project-config: {{ include (print .Template.BasePath "/cm-zoo-project-config.yaml") . | sha256sum }}
+ {{- if .Values.zoo.openapi.enabled }}
checksum/cm-openapi-config: {{ include (print .Template.BasePath "/cm-openapi-config.yaml") . | sha256sum }}
+ {{- end }}
labels:
app.kubernetes.io/name: {{ include "zoo-project-dru.name" . }}-zoofpm
app.kubernetes.io/instance: {{ .Release.Name }}-zoofpm
@@ -50,14 +52,7 @@ spec:
{{- end }}
serviceAccountName: {{ .Release.Name }}-processing-manager
initContainers:
- - name: init-wait-for-dependencies-zoofpm
- image: docker.io/wshihadeh/wait_for:latest
- imagePullPolicy: IfNotPresent
- command: [ "/docker-entrypoint.sh" ]
- args: [ "wait_for", "rabbitmq:{{ .Release.Name }}-rabbitmq"]
- env:
- - name: ZOO_RABBITMQ_HOST
- value: {{ .Release.Name }}-rabbitmq
+ {{- include "zoo-project-dru.rabbitmq.initContainer" (dict "Values" .Values "Release" .Release "Chart" .Chart "componentName" "zoofpm") | nindent 8 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@@ -73,7 +68,7 @@ spec:
- name: ORIGIN_NAMESPACE
value: {{ .Release.Namespace }}
- name: ZOO_RABBITMQ_HOST
- value: {{ .Release.Name }}-rabbitmq
+ value: {{ include "zoo-project-dru.rabbitmq.serviceName" . }}
- name: STORAGE_CLASS
value: {{ .Values.workflow.storageClass }}
- name: DEFAULT_VOLUME_SIZE
@@ -180,16 +175,20 @@ spec:
key: database
optional: true
{{- else }}
- {{- if .Values.postgresql.defineEnvironmentVariables }}
+ {{- if .Values.postgresql.enabled }}
- name: PGHOST
- value: "{{ .Release.Name}}-postgresql-hl"
+ value: "{{ include "zoo-project-dru.postgresql.servicename" . }}"
- name: PGDATABASE
value: {{ .Values.global.postgresql.auth.database | quote}}
- name: PGPASSWORD
+ {{- if .Values.postgresql.useSecret }}
valueFrom:
secretKeyRef:
- name: {{ .Release.Name }}-postgresql
+ name: {{ .Values.postgresql.secretName | default (printf "%s-postgresql-secret" (include "zoo-project-dru.fullname" .)) }}
key: password
+ {{- else }}
+ value: {{ .Values.global.postgresql.auth.password | quote}}
+ {{- end }}
- name: PGUSER
value: {{ .Values.global.postgresql.auth.username | quote}}
- name: PGPORT
@@ -231,6 +230,7 @@ spec:
mountPath: {{ .Values.persistence.tmpPath }}/cookiecutter_config.yaml
subPath: cookiecutter_config.yaml
+ {{- if not .Values.argo.enabled }}
- name: cwlwrapper-config
mountPath: /assets/main.yaml
subPath: main.yaml
@@ -250,6 +250,7 @@ spec:
- name: cwlwrapper-config
mountPath: /assets/stagein-file.yaml
subPath: stagein-file.yaml
+ {{- end }}
- name: ades-processing-services
mountPath: {{ .Values.persistence.servicesNamespacePath }}
@@ -268,9 +269,11 @@ spec:
- name: zoo-kernel-config
configMap:
name: {{ .Release.Name }}-zoo-kernel-config
+ {{- if .Values.zoo.openapi.enabled }}
- name: openapi-config
configMap:
name: {{ .Release.Name }}-openapi-config
+ {{- end }}
- name: zoo-deploy-service-config
configMap:
name: {{ .Release.Name }}-zoo-deploy-service-config
diff --git a/zoo-project-dru/templates/dp-zookernel.yaml b/zoo-project-dru/templates/dp-zookernel.yaml
index 59b0414..8931573 100644
--- a/zoo-project-dru/templates/dp-zookernel.yaml
+++ b/zoo-project-dru/templates/dp-zookernel.yaml
@@ -20,7 +20,9 @@ spec:
checksum/cm-cwlwrapper-assets: {{ include (print .Template.BasePath "/cm-cwlwrapper-assets.yaml") . | sha256sum }}
checksum/cm-zoo-deploy-service-config: {{ include (print .Template.BasePath "/cm-zoo-deploy-service-config.yaml") . | sha256sum }}
checksum/cm-zoo-project-config: {{ include (print .Template.BasePath "/cm-zoo-project-config.yaml") . | sha256sum }}
+ {{- if .Values.zoo.openapi.enabled }}
checksum/cm-openapi-config: {{ include (print .Template.BasePath "/cm-openapi-config.yaml") . | sha256sum }}
+ {{- end }}
labels:
{{- include "zoo-project-dru.selectorLabels" . | nindent 8 }}
spec:
@@ -29,14 +31,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
initContainers:
- - name: init-wait-for-dependencies-zookernel
- image: docker.io/wshihadeh/wait_for:latest
- imagePullPolicy: IfNotPresent
- command: [ "/docker-entrypoint.sh" ]
- args: [ "wait_for", "rabbitmq:{{ .Release.Name }}-rabbitmq" ]
- env:
- - name: ZOO_RABBITMQ_HOST
- value: {{ .Release.Name }}-rabbitmq
+ {{- include "zoo-project-dru.rabbitmq.initContainer" (dict "Values" .Values "Release" .Release "Chart" .Chart "componentName" "zookernel") | nindent 8 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@@ -47,7 +42,7 @@ spec:
args: [ "-D", "FOREGROUND" ]
env:
- name: ZOO_REDIS_HOST
- value: {{ .Release.Name }}-redis-master
+ value: {{ include "zoo-project-dru.redis.servicename" . }}
{{- if (.Values.global.postgresql.auth.existingSecret) }}
- name: PGPASSWORD
@@ -81,16 +76,20 @@ spec:
key: database
optional: true
{{- else }}
- {{- if .Values.postgresql.defineEnvironmentVariables }}
+ {{- if .Values.postgresql.enabled }}
- name: PGHOST
- value: "{{ .Release.Name}}-postgresql-hl"
+ value: "{{ include "zoo-project-dru.postgresql.servicename" . }}"
- name: PGDATABASE
value: {{ .Values.global.postgresql.auth.database | quote}}
- name: PGPASSWORD
+ {{- if .Values.postgresql.useSecret }}
valueFrom:
secretKeyRef:
- name: {{ .Release.Name }}-postgresql
+ name: {{ .Values.postgresql.secretName | default (printf "%s-postgresql-secret" (include "zoo-project-dru.fullname" .)) }}
key: password
+ {{- else }}
+ value: {{ .Values.global.postgresql.auth.password | quote}}
+ {{- end }}
- name: PGUSER
value: {{ .Values.global.postgresql.auth.username | quote}}
- name: PGPORT
@@ -158,6 +157,7 @@ spec:
- name: zoo-example-config
mountPath: /var/www/html/examples/water-bodies/job-order1.json
subPath: job-order1.json
+ {{- if .Values.zoo.openapi.enabled }}
- name: openapi-config
mountPath: /var/www/header.md
subPath: header.md
@@ -182,6 +182,7 @@ spec:
- name: openapi-config
mountPath: /var/www/job-results.md
subPath: job-results.md
+ {{- end }}
- name: zoo-deploy-service-config
mountPath: {{ .Values.persistence.tmpPath }}/cookiecutter_config.yaml
subPath: cookiecutter_config.yaml
@@ -206,9 +207,11 @@ spec:
- name: zoo-example-config
configMap:
name: {{ .Release.Name }}-zoo-example-config
+ {{- if .Values.zoo.openapi.enabled }}
- name: openapi-config
configMap:
name: {{ .Release.Name }}-openapi-config
+ {{- end }}
- name: zoo-deploy-service-config
configMap:
name: {{ .Release.Name }}-zoo-deploy-service-config
diff --git a/zoo-project-dru/templates/eviction-controller-deployment.yaml b/zoo-project-dru/templates/eviction-controller-deployment.yaml
index 78d9154..3f3ade5 100644
--- a/zoo-project-dru/templates/eviction-controller-deployment.yaml
+++ b/zoo-project-dru/templates/eviction-controller-deployment.yaml
@@ -34,22 +34,74 @@ spec:
imagePullPolicy: {{ .Values.keda.evictionController.image.pullPolicy | default "IfNotPresent" }}
env:
- name: NAMESPACE
- value: {{ .Release.Namespace }}
+ value: "{{ .Release.Namespace }}"
- name: SCALEDOBJECT_NAME
- value: {{ include "zoo-project-dru.fullname" . }}-{{ .Values.keda.scaleTargetRef.deployment }}-scaler
+ value: "{{ include "zoo-project-dru.fullname" . }}-{{ .Values.keda.scaleTargetRef.deployment }}-scaler"
+
+ {{- if (.Values.global.postgresql.auth.existingSecret) }}
+ - name: PG_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: password
+ optional: true
+ - name: PG_USER
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: username
+ optional: true
- name: PG_HOST
- value: {{ .Release.Name }}-postgresql
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: host
+ optional: true
- name: PG_PORT
- value: "5432"
- - name: PG_USER
- value: {{ .Values.global.postgresql.auth.username | default "zoo" }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: port
+ optional: true
+ - name: PG_DATABASE
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: database
+ optional: true
+ {{- else }}
+ {{- if .Values.postgresql.enabled }}
+ - name: PG_HOST
+ value: "{{ include "zoo-project-dru.postgresql.servicename" . }}"
+ - name: PG_DATABASE
+ value: {{ .Values.global.postgresql.auth.database | quote}}
- name: PG_PASSWORD
+ {{- if .Values.postgresql.useSecret }}
valueFrom:
secretKeyRef:
- name: {{ .Values.global.postgresql.auth.existingSecret | default (print .Release.Name "-postgresql") }}
+ name: {{ .Values.postgresql.secretName | default (printf "%s-postgresql-secret" (include "zoo-project-dru.fullname" .)) }}
key: password
+ {{- else }}
+ value: {{ .Values.global.postgresql.auth.password | quote}}
+ {{- end }}
+ - name: PG_USER
+ value: {{ .Values.global.postgresql.auth.username | quote}}
+ - name: PG_PORT
+ value: {{ .Values.global.postgresql.service.ports.postgresql | quote}}
+ {{- else }}
+ # External PostgreSQL (not managed by chart)
+ - name: PG_HOST
+ value: "{{ include "zoo-project-dru.fullname" . }}-postgresql-service"
- name: PG_DATABASE
- value: {{ .Values.global.postgresql.auth.database | default "zoo" }}
+ value: {{ .Values.global.postgresql.auth.database | quote}}
+ - name: PG_PASSWORD
+ value: {{ .Values.global.postgresql.auth.password | quote}}
+ - name: PG_USER
+ value: {{ .Values.global.postgresql.auth.username | quote}}
+ - name: PG_PORT
+ value: "5432"
+ {{- end }}
+ {{- end }}
ports:
- containerPort: 8080
name: metrics
diff --git a/zoo-project-dru/templates/keda-postgresql-query-configmap.yaml b/zoo-project-dru/templates/keda-postgresql-query-configmap.yaml
new file mode 100644
index 0000000..15a491f
--- /dev/null
+++ b/zoo-project-dru/templates/keda-postgresql-query-configmap.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.keda.enabled .Values.keda.triggers.postgresql.enabled .Values.keda.triggers.postgresql.useConfigMap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "zoo-project-dru.keda.postgresql.configmap" . }}
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-weight": "1" # Create ConfigMap before KEDA resources
+data:
+ query.sql: |
+{{ .Files.Get "files/keda/postgresql-scaler-query.sql" | indent 4 }}
+{{- end }}
\ No newline at end of file
diff --git a/zoo-project-dru/templates/keda-scaledobject.yaml b/zoo-project-dru/templates/keda-scaledobject.yaml
index 7c76212..a99854c 100644
--- a/zoo-project-dru/templates/keda-scaledobject.yaml
+++ b/zoo-project-dru/templates/keda-scaledobject.yaml
@@ -1,4 +1,4 @@
-{{- if and .Values.keda.enabled .Values.keda.scaleTargetRef }}
+{{- if and .Values.keda.enabled .Values.keda.scaleTargetRef (not .Values.keda.skipScaledObject) }}
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
metadata:
@@ -7,7 +7,7 @@ metadata:
{{- include "zoo-project-dru.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install,post-upgrade
- "helm.sh/hook-weight": "10" # Execute after RabbitMQ and PostgreSQL are ready
+ "helm.sh/hook-weight": "10" # Execute after KEDA CRDs are installed (weight 5)
spec:
scaleTargetRef:
apiVersion: apps/v1
@@ -49,7 +49,12 @@ spec:
- type: postgresql
name: pg_workers_and_jobs
metadata:
+ {{- if .Values.keda.triggers.postgresql.useConfigMap }}
+ queryFromConfigMap: {{ include "zoo-project-dru.keda.postgresql.configmap" . }}
+ queryFromConfigMapKey: query.sql
+ {{- else }}
query: {{ .Values.keda.triggers.postgresql.query | quote }}
+ {{- end }}
targetQueryValue: {{ .Values.keda.triggers.postgresql.targetQueryValue | quote }}
{{- if .Values.keda.triggers.postgresql.activationTargetQueryValue }}
activationTargetQueryValue: {{ .Values.keda.triggers.postgresql.activationTargetQueryValue | quote }}
@@ -62,7 +67,7 @@ spec:
# Use values from configuration since no existing secret is defined
{{- $pgHost := .Values.keda.triggers.postgresql.host }}
{{- if not $pgHost }}
- {{- $pgHost = printf "%s-postgresql.%s.svc.cluster.local" (include "zoo-project-dru.fullname" .) .Release.Namespace }}
+ {{- $pgHost = printf "%s.%s.svc.cluster.local" (include "zoo-project-dru.postgresql.servicename" .) (.Release.Namespace | default "zoo") }}
{{- end }}
host: {{ $pgHost | quote }}
{{- $pgPort := .Values.keda.triggers.postgresql.port }}
diff --git a/zoo-project-dru/templates/keda-secret.yaml b/zoo-project-dru/templates/keda-secret.yaml
index 7e6ca0a..0f5674d 100644
--- a/zoo-project-dru/templates/keda-secret.yaml
+++ b/zoo-project-dru/templates/keda-secret.yaml
@@ -7,7 +7,9 @@ metadata:
{{- include "zoo-project-dru.labels" . | nindent 4 }}
data:
# RabbitMQ connection details
- {{- $rabbitmqHost := printf "amqp://%s-rabbitmq.%s.svc.cluster.local:5672" (include "zoo-project-dru.fullname" .) .Release.Namespace }}
+ {{- $vhost := .Values.rabbitmq.auth.vhost }}
+ {{- if eq $vhost "/" }}{{- $vhost = "%2F" }}{{- end }}
+ {{- $rabbitmqHost := printf "amqp://%s:%s@%s-rabbitmq.%s.svc.cluster.local:5672/%s" .Values.rabbitmq.auth.username .Values.rabbitmq.auth.password (include "zoo-project-dru.fullname" .) .Release.Namespace $vhost }}
host: {{ $rabbitmqHost | b64enc | quote }}
username: {{ .Values.rabbitmq.auth.username | b64enc | quote }}
password: {{ .Values.rabbitmq.auth.password | b64enc | quote }}
@@ -26,8 +28,8 @@ data:
{{- else }}
pg_port: {{ "5432" | b64enc | quote }}
{{- end }}
- # PostgreSQL host (constructed from release name and namespace)
- {{- $pgHost := printf "%s-postgresql.%s.svc.cluster.local" (include "zoo-project-dru.fullname" .) .Release.Namespace }}
+ # PostgreSQL host (constructed from service name and namespace)
+ {{- $pgHost := printf "%s.%s.svc.cluster.local" (include "zoo-project-dru.postgresql.servicename" .) .Release.Namespace }}
pg_host: {{ $pgHost | b64enc | quote }}
{{- end }}
{{- end }}
diff --git a/zoo-project-dru/templates/keda-trigger-auth.yaml b/zoo-project-dru/templates/keda-trigger-auth.yaml
index abccdb0..0c72759 100644
--- a/zoo-project-dru/templates/keda-trigger-auth.yaml
+++ b/zoo-project-dru/templates/keda-trigger-auth.yaml
@@ -9,7 +9,7 @@ metadata:
{{- include "zoo-project-dru.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install,post-upgrade
- "helm.sh/hook-weight": "0"
+ "helm.sh/hook-weight": "3" # Execute after KEDA is ready
spec:
secretTargetRef:
- parameter: host
@@ -33,6 +33,7 @@ metadata:
{{- include "zoo-project-dru.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install,post-upgrade
+ "helm.sh/hook-weight": "3" # Execute after KEDA is ready
"helm.sh/hook-weight": "0"
spec:
secretTargetRef:
diff --git a/zoo-project-dru/templates/keda-wait-crds-job.yaml b/zoo-project-dru/templates/keda-wait-crds-job.yaml
new file mode 100644
index 0000000..91475b7
--- /dev/null
+++ b/zoo-project-dru/templates/keda-wait-crds-job.yaml
@@ -0,0 +1,32 @@
+{{- if and .Values.keda.enabled .Values.keda.waitForCRDs }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "zoo-project-dru.fullname" . }}-wait-keda-crds
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": post-install,post-upgrade
+ "helm.sh/hook-weight": "8" # Execute before ScaledObject (weight 10)
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+spec:
+ template:
+ spec:
+ serviceAccountName: {{ include "zoo-project-dru.serviceAccountName" . }}
+ restartPolicy: Never
+ containers:
+ - name: wait-keda-crds
+ image: alpine/k8s:1.31.13
+ command:
+ - /bin/bash
+ - -c
+ - |
+ echo "Waiting for KEDA CRDs to be available..."
+ timeout 300 bash -c '
+ until kubectl get crd scaledobjects.keda.sh >/dev/null 2>&1; do
+ echo "Waiting for ScaledObject CRD..."
+ sleep 5
+ done
+ '
+ echo "KEDA CRDs are ready!"
+{{- end }}
\ No newline at end of file
diff --git a/zoo-project-dru/templates/node-exporter-patch-job.yaml b/zoo-project-dru/templates/node-exporter-patch-job.yaml
index 20ecfe7..1a35965 100644
--- a/zoo-project-dru/templates/node-exporter-patch-job.yaml
+++ b/zoo-project-dru/templates/node-exporter-patch-job.yaml
@@ -19,7 +19,7 @@ spec:
restartPolicy: OnFailure
containers:
- name: patch-node-exporter
- image: bitnami/kubectl:latest
+ image: alpine/k8s:1.31.13
command:
- /bin/bash
- -c
diff --git a/zoo-project-dru/templates/postgresql-secret.yaml b/zoo-project-dru/templates/postgresql-secret.yaml
new file mode 100644
index 0000000..d9439f1
--- /dev/null
+++ b/zoo-project-dru/templates/postgresql-secret.yaml
@@ -0,0 +1,19 @@
+{{- if and .Values.postgresql.enabled .Values.postgresql.useSecret (not .Values.postgresql.secretName) }}
+apiVersion: v1
+kind: Secret
+metadata:
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "zoo-project-dru.fullname" . }}-postgresql
+ chart: {{ include "zoo-project-dru.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ name: {{ include "zoo-project-dru.fullname" . }}-postgresql-secret
+type: Opaque
+data:
+ password: {{ .Values.global.postgresql.auth.password | b64enc | quote }}
+ username: {{ .Values.global.postgresql.auth.username | b64enc | quote }}
+ database: {{ .Values.global.postgresql.auth.database | b64enc | quote }}
+ host: {{ include "zoo-project-dru.postgresql.servicename" . | b64enc | quote }}
+ port: {{ .Values.global.postgresql.service.ports.postgresql | toString | b64enc | quote }}
+{{- end }}
diff --git a/zoo-project-dru/templates/postgresql-service.yaml b/zoo-project-dru/templates/postgresql-service.yaml
new file mode 100644
index 0000000..3e19ce3
--- /dev/null
+++ b/zoo-project-dru/templates/postgresql-service.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.postgresql.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "zoo-project-dru.fullname" . }}-postgresql
+ chart: {{ include "zoo-project-dru.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ name: {{ include "zoo-project-dru.fullname" . }}-postgresql-service
+spec:
+ ports:
+ - name: postgresql
+ port: {{ .Values.global.postgresql.service.ports.postgresql }}
+ protocol: TCP
+ targetPort: postgresql
+ selector:
+ app: {{ include "zoo-project-dru.fullname" . }}-postgresql
+ release: {{ .Release.Name }}
+ type: ClusterIP
+{{- end }}
diff --git a/zoo-project-dru/templates/postgresql-statefulset.yaml b/zoo-project-dru/templates/postgresql-statefulset.yaml
new file mode 100644
index 0000000..0871dbc
--- /dev/null
+++ b/zoo-project-dru/templates/postgresql-statefulset.yaml
@@ -0,0 +1,101 @@
+{{- if .Values.postgresql.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "zoo-project-dru.fullname" . }}-postgresql
+ chart: {{ include "zoo-project-dru.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ name: {{ include "zoo-project-dru.fullname" . }}-postgresql
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ include "zoo-project-dru.fullname" . }}-postgresql
+ release: {{ .Release.Name }}
+ serviceName: {{ include "zoo-project-dru.fullname" . }}-postgresql-service
+ template:
+ metadata:
+ labels:
+ app: {{ include "zoo-project-dru.fullname" . }}-postgresql
+ release: {{ .Release.Name }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: postgresql
+ image: {{ .Values.postgresql.image.repository }}:{{ .Values.postgresql.image.tag }}
+ imagePullPolicy: {{ .Values.postgresql.image.pullPolicy }}
+ env:
+ - name: PGDATA
+ value: {{ .Values.postgresql.persistence.mountPath }}/pgdata
+ {{- if .Values.global.postgresql.auth.existingSecret }}
+ - name: POSTGRES_DB
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: database
+ optional: true
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: password
+ optional: false
+ - name: POSTGRES_USER
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.global.postgresql.auth.existingSecret }}
+ key: username
+ optional: true
+ {{- else }}
+ - name: POSTGRES_DB
+ value: {{ .Values.global.postgresql.auth.database }}
+ - name: POSTGRES_PASSWORD
+ {{- if .Values.postgresql.useSecret }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.postgresql.secretName | default (printf "%s-postgresql-secret" (include "zoo-project-dru.fullname" .)) }}
+ key: password
+ {{- else }}
+ value: {{ .Values.global.postgresql.auth.password }}
+ {{- end }}
+ - name: POSTGRES_USER
+ value: {{ .Values.global.postgresql.auth.username }}
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.global.postgresql.service.ports.postgresql }}
+ name: postgresql
+ resources:
+ {{- toYaml .Values.postgresql.resources | nindent 10 }}
+ volumeMounts:
+ - name: {{ .Values.postgresql.persistence.volumeName }}
+ mountPath: {{ .Values.postgresql.persistence.mountPath }}
+ {{- if .Values.postgresql.primary.initdb.scriptsConfigMap }}
+ - name: init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ {{- end }}
+ restartPolicy: Always
+ volumes:
+ {{- if .Values.postgresql.primary.initdb.scriptsConfigMap }}
+ - name: init-scripts
+ configMap:
+ name: {{ .Values.postgresql.primary.initdb.scriptsConfigMap }}
+ {{- end }}
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ .Values.postgresql.persistence.volumeName }}
+ spec:
+ {{- if .Values.postgresql.persistence.storageClass }}
+ storageClassName: {{ .Values.postgresql.persistence.storageClass }}
+ {{- end }}
+ accessModes:
+ - {{ .Values.postgresql.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.postgresql.persistence.size }}
+{{- end }}
diff --git a/zoo-project-dru/templates/prometheus-config-patch.yaml b/zoo-project-dru/templates/prometheus-config-patch.yaml
index 1458e12..9529d5d 100644
--- a/zoo-project-dru/templates/prometheus-config-patch.yaml
+++ b/zoo-project-dru/templates/prometheus-config-patch.yaml
@@ -19,7 +19,7 @@ spec:
restartPolicy: OnFailure
containers:
- name: patch-prometheus-config
- image: bitnami/kubectl:latest
+ image: alpine/k8s:1.31.13
command:
- /bin/bash
- -c
diff --git a/zoo-project-dru/templates/rabbitmq-configmap.yaml b/zoo-project-dru/templates/rabbitmq-configmap.yaml
new file mode 100644
index 0000000..94f433c
--- /dev/null
+++ b/zoo-project-dru/templates/rabbitmq-configmap.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.rabbitmq.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-config
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq
+data:
+ rabbitmq.conf: |
+{{- if .Values.rabbitmq.config }}
+{{ .Values.rabbitmq.config | indent 4 }}
+{{- else }}
+{{ tpl (.Files.Get "files/rabbitmq/rabbitmq.conf") . | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/zoo-project-dru/templates/rabbitmq-definition-secret.yaml b/zoo-project-dru/templates/rabbitmq-definition-secret.yaml
index 9e21f2f..6d8a739 100644
--- a/zoo-project-dru/templates/rabbitmq-definition-secret.yaml
+++ b/zoo-project-dru/templates/rabbitmq-definition-secret.yaml
@@ -1,7 +1,11 @@
+{{- if and .Values.rabbitmq.enabled .Values.rabbitmq.autoSetup.enabled }}
apiVersion: v1
kind: Secret
metadata:
- name: load-definition
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-definition
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
type: Opaque
stringData:
- load_definition.json: {{ tpl (.Files.Get .Values.zoo.rabbitmq.definitions) . | quote }}
\ No newline at end of file
+ load_definition.json: {{ tpl (.Files.Get .Values.zoo.rabbitmq.definitions) . | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/zoo-project-dru/templates/rabbitmq-plugins-configmap.yaml b/zoo-project-dru/templates/rabbitmq-plugins-configmap.yaml
new file mode 100644
index 0000000..5bc08e1
--- /dev/null
+++ b/zoo-project-dru/templates/rabbitmq-plugins-configmap.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.rabbitmq.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-plugins
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq
+data:
+ enabled_plugins: |
+ [rabbitmq_management,rabbitmq_prometheus,rabbitmq_web_dispatch,rabbitmq_management_agent].
+{{- end }}
diff --git a/zoo-project-dru/templates/rabbitmq-service.yaml b/zoo-project-dru/templates/rabbitmq-service.yaml
new file mode 100644
index 0000000..c699924
--- /dev/null
+++ b/zoo-project-dru/templates/rabbitmq-service.yaml
@@ -0,0 +1,52 @@
+{{- if .Values.rabbitmq.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq
+spec:
+ type: ClusterIP
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ protocol: TCP
+ - name: management
+ port: 15672
+ targetPort: management
+ protocol: TCP
+ selector:
+ {{- include "zoo-project-dru.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq
+{{- if .Values.rabbitmq.headless }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-headless
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ protocol: TCP
+ - name: management
+ port: 15672
+ targetPort: management
+ protocol: TCP
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ protocol: TCP
+ selector:
+ {{- include "zoo-project-dru.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq
+{{- end }}
+{{- end }}
diff --git a/zoo-project-dru/templates/rabbitmq-setup-job.yaml b/zoo-project-dru/templates/rabbitmq-setup-job.yaml
new file mode 100644
index 0000000..62739f1
--- /dev/null
+++ b/zoo-project-dru/templates/rabbitmq-setup-job.yaml
@@ -0,0 +1,53 @@
+{{- if and .Values.rabbitmq.enabled .Values.rabbitmq.autoSetup.enabled }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-setup
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq-setup
+spec:
+ ttlSecondsAfterFinished: 0 # Delete job immediately after completion
+ template:
+ metadata:
+ labels:
+ {{- include "zoo-project-dru.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: rabbitmq-setup
+ spec:
+ restartPolicy: Never
+ containers:
+ - name: rabbitmq-setup
+ image: curlimages/curl:latest
+ command:
+ - /bin/sh
+ - -c
+ - |
+ set -e
+ echo "Waiting for RabbitMQ management API to be ready..."
+
+ # Wait for RabbitMQ management interface
+ until curl -f -u {{ .Values.rabbitmq.auth.username }}:{{ .Values.rabbitmq.auth.password }} \
+ http://{{ include "zoo-project-dru.rabbitmq.serviceName" . }}:15672/api/overview >/dev/null 2>&1; do
+ echo "Waiting for RabbitMQ management API..."
+ sleep 10
+ done
+
+ echo "RabbitMQ management API is ready. Importing definitions..."
+
+ # Import definitions via HTTP API
+ curl -u {{ .Values.rabbitmq.auth.username }}:{{ .Values.rabbitmq.auth.password }} \
+ -H "Content-Type: application/json" \
+ -X POST \
+ -d @/etc/rabbitmq/definitions.json \
+ http://{{ include "zoo-project-dru.rabbitmq.serviceName" . }}:15672/api/definitions
+
+ echo "Definitions imported successfully!"
+ volumeMounts:
+ - name: definitions
+ mountPath: /etc/rabbitmq/definitions.json
+ subPath: load_definition.json
+ volumes:
+ - name: definitions
+ secret:
+ secretName: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-definition
+{{- end }}
diff --git a/zoo-project-dru/templates/rabbitmq-statefulset.yaml b/zoo-project-dru/templates/rabbitmq-statefulset.yaml
new file mode 100644
index 0000000..675a11f
--- /dev/null
+++ b/zoo-project-dru/templates/rabbitmq-statefulset.yaml
@@ -0,0 +1,117 @@
+{{- if .Values.rabbitmq.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq
+ labels:
+ {{- include "zoo-project-dru.labels" . | nindent 4 }}
+ app.kubernetes.io/component: rabbitmq
+spec:
+ serviceName: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-headless
+ replicas: {{ .Values.rabbitmq.replicas }}
+ selector:
+ matchLabels:
+ {{- include "zoo-project-dru.selectorLabels" . | nindent 6 }}
+ app.kubernetes.io/component: rabbitmq
+ template:
+ metadata:
+ labels:
+ {{- include "zoo-project-dru.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: rabbitmq
+ spec:
+ containers:
+ - name: rabbitmq
+ image: "{{ .Values.rabbitmq.image.repository }}:{{ .Values.rabbitmq.image.tag }}"
+ imagePullPolicy: {{ .Values.rabbitmq.image.pullPolicy }}
+ ports:
+ - name: amqp
+ containerPort: 5672
+ protocol: TCP
+ - name: management
+ containerPort: 15672
+ protocol: TCP
+ - name: epmd
+ containerPort: 4369
+ protocol: TCP
+ env:
+ - name: RABBITMQ_DEFAULT_USER
+ value: {{ .Values.rabbitmq.auth.username | quote }}
+ - name: RABBITMQ_DEFAULT_PASS
+ value: {{ .Values.rabbitmq.auth.password | quote }}
+ - name: RABBITMQ_DEFAULT_VHOST
+ value: {{ .Values.rabbitmq.auth.vhost | quote }}
+ {{- if .Values.rabbitmq.clustering.enabled }}
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_NODENAME
+ value: "rabbit@$(POD_NAME).{{ include "zoo-project-dru.fullname" . }}-rabbitmq-headless.{{ .Release.Namespace }}.svc.cluster.local"
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ {{- end }}
+ livenessProbe:
+ exec:
+ command:
+ - rabbitmq-diagnostics
+ - -q
+ - ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - rabbitmq-diagnostics
+ - -q
+ - check_running
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ failureThreshold: 3
+ resources:
+ {{- toYaml .Values.rabbitmq.resources | nindent 10 }}
+ volumeMounts:
+ {{- if .Values.rabbitmq.persistence.enabled }}
+ - name: data
+ mountPath: /var/lib/rabbitmq/mnesia
+ {{- end }}
+ {{- if .Values.rabbitmq.config }}
+ - name: config
+ mountPath: /etc/rabbitmq/rabbitmq.conf
+ subPath: rabbitmq.conf
+ {{- end }}
+ - name: definitions
+ mountPath: /etc/rabbitmq/definitions.json
+ subPath: load_definition.json
+ - name: plugins
+ mountPath: /etc/rabbitmq/enabled_plugins
+ subPath: enabled_plugins
+ volumes:
+ {{- if .Values.rabbitmq.config }}
+ - name: config
+ configMap:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-config
+ {{- end }}
+ - name: plugins
+ configMap:
+ name: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-plugins
+ - name: definitions
+ secret:
+ secretName: {{ include "zoo-project-dru.fullname" . }}-rabbitmq-definition
+ {{- if .Values.rabbitmq.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.rabbitmq.persistence.size }}
+ {{- if .Values.rabbitmq.persistence.storageClass }}
+ storageClassName: {{ .Values.rabbitmq.persistence.storageClass }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/zoo-project-dru/templates/redis-service.yaml b/zoo-project-dru/templates/redis-service.yaml
new file mode 100644
index 0000000..b853369
--- /dev/null
+++ b/zoo-project-dru/templates/redis-service.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.redis.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "zoo-project-dru.fullname" . }}-redis
+ chart: {{ include "zoo-project-dru.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ name: {{ include "zoo-project-dru.fullname" . }}-redis-service
+spec:
+ ports:
+ - name: redis
+ port: {{ .Values.redis.port }}
+ protocol: TCP
+ targetPort: redis
+ selector:
+ app: {{ include "zoo-project-dru.fullname" . }}-redis
+ release: {{ .Release.Name }}
+ type: ClusterIP
+{{- end }}
diff --git a/zoo-project-dru/templates/redis-statefulset.yaml b/zoo-project-dru/templates/redis-statefulset.yaml
new file mode 100644
index 0000000..3499513
--- /dev/null
+++ b/zoo-project-dru/templates/redis-statefulset.yaml
@@ -0,0 +1,69 @@
+{{- if .Values.redis.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ include "zoo-project-dru.fullname" . }}-redis
+ chart: {{ include "zoo-project-dru.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+ name: {{ include "zoo-project-dru.fullname" . }}-redis
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ include "zoo-project-dru.fullname" . }}-redis
+ release: {{ .Release.Name }}
+ serviceName: {{ include "zoo-project-dru.fullname" . }}-redis-service
+ template:
+ metadata:
+ labels:
+ app: {{ include "zoo-project-dru.fullname" . }}-redis
+ release: {{ .Release.Name }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: redis
+ image: {{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}
+ imagePullPolicy: {{ .Values.redis.image.pullPolicy }}
+ {{- if .Values.redis.auth.enabled }}
+ env:
+ - name: REDIS_PASSWORD
+ value: {{ .Values.redis.auth.password | quote }}
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.redis.port }}
+ name: redis
+ resources:
+ {{- toYaml .Values.redis.resources | nindent 10 }}
+ {{- if .Values.redis.persistence.enabled }}
+ volumeMounts:
+ - name: {{ .Values.redis.persistence.volumeName }}
+ mountPath: /data
+ {{- end }}
+ {{- if .Values.redis.auth.enabled }}
+ command:
+ - redis-server
+ - --requirepass
+ - $(REDIS_PASSWORD)
+ {{- end }}
+ restartPolicy: Always
+ {{- if .Values.redis.persistence.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ .Values.redis.persistence.volumeName }}
+ spec:
+ {{- if .Values.redis.persistence.storageClass }}
+ storageClassName: {{ .Values.redis.persistence.storageClass }}
+ {{- end }}
+ accessModes:
+ - {{ .Values.redis.persistence.accessMode }}
+ resources:
+ requests:
+ storage: {{ .Values.redis.persistence.size }}
+ {{- end }}
+{{- end }}
diff --git a/zoo-project-dru/values.yaml b/zoo-project-dru/values.yaml
index 7aab9b5..0369a8c 100644
--- a/zoo-project-dru/values.yaml
+++ b/zoo-project-dru/values.yaml
@@ -84,6 +84,8 @@ autoscaling:
keda:
# Enable KEDA as a subchart dependency
enabled: false
+ # Skip ScaledObject creation if KEDA CRDs are not ready
+ skipScaledObject: false
# KEDA subchart configuration
# When enabled, KEDA will be deployed as part of this chart
# See: https://github.com/kedacore/charts/tree/main/keda
@@ -116,7 +118,7 @@ keda:
namespaceOverride: "kyverno-system" # Dedicated namespace for Kyverno
policies:
zoofpmProtection:
- enabled: true # Policy to protect zoofpm pods
+ enabled: false # Policy to protect zoofpm pods - disabled since Kyverno is not installed
failurePolicy: "Enforce" # Strictly block deletions (Enforce/Audit)
background: false # Real-time validation only
protectZoofpm: true # Prevent deletion of zoofpm pods
@@ -133,66 +135,10 @@ keda:
triggers:
postgresql:
enabled: true
- # STRICT query - Absolute protection of pods by IP
- # GUARANTEE: No pod with its IP in workers.host can be deleted
- query: |
- WITH active_worker_ips AS (
- -- IPs of pods that have active workers - ABSOLUTE PROTECTION
- SELECT DISTINCT host as protected_ip
- FROM workers
- WHERE status = 1 AND host IS NOT NULL
- ),
- protected_count AS (
- -- MINIMUM number of pods to keep (those with workers)
- SELECT COUNT(*) as must_keep_minimum
- FROM active_worker_ips
- ),
- workload_needs AS (
- -- Calculation based on total workload
- SELECT
- SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END) as active_workers,
- -- Pods needed for workload (async_worker=10 per pod)
- -- Scale-to-zero if no active workers
- CASE
- WHEN SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END) > 0
- THEN GREATEST(CEIL(SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END)::decimal / 10), 1)
- ELSE 0 -- Scale-to-zero if no workers
- END as pods_for_workload
- FROM workers
- WHERE host IS NOT NULL
- ),
- service_needs AS (
- -- Pods needed for running services
- SELECT
- COUNT(*) as running_services,
- -- Only if workers exist AND services
- CASE
- WHEN EXISTS(SELECT 1 FROM workers WHERE status = 1 AND host IS NOT NULL)
- AND COUNT(*) > 0
- THEN GREATEST(CEIL(COUNT(*)::decimal / 10), 1)
- ELSE 0 -- Scale-to-zero if no active workers
- END as pods_for_services
- FROM services
- WHERE end_time IS NULL AND fstate NOT IN ('Succeeded', 'Failed')
- )
- SELECT
- -- Hybrid logic: active workers AND pending services
- GREATEST(
- -- Calculation based on active workers (1 pod per 10 workers)
- CASE
- WHEN (SELECT COUNT(*) FROM workers WHERE status = 1 AND host IS NOT NULL) > 0
- THEN CEIL((SELECT COUNT(*)::decimal FROM workers WHERE status = 1 AND host IS NOT NULL) / 10)
- ELSE 0
- END,
- -- Calculation based on active services (1 pod per 5 services)
- CASE
- WHEN (SELECT COUNT(*) FROM services WHERE end_time IS NULL AND fstate NOT IN ('Succeeded', 'Failed')) > 0
- THEN CEIL((SELECT COUNT(*)::decimal FROM services WHERE end_time IS NULL AND fstate NOT IN ('Succeeded', 'Failed')) / 5)
- ELSE 0
- END,
- -- Scale-to-zero if no activity
- 0
- )
+ # Use ConfigMap for the PostgreSQL query (recommended for complex queries)
+ useConfigMap: true
+ # Alternative: inline query (set useConfigMap: false and uncomment the query below)
+ # query: "SELECT COUNT(*) FROM workers WHERE status = 1"
targetQueryValue: "1.0" # 1:1 scaling - no over-allocation
activationTargetQueryValue: "0.1" # Scale-up at the slightest activity
host: "" # Will be auto-generated from postgresql service
@@ -218,7 +164,7 @@ zoo:
promoteHead: true
detectEntrypoint: false
rabbitmq:
- definitions: "files/rabbitmq/definitions.json"
+ definitions: "files/rabbitmq/definitions.json.tpl"
zoofpm:
image:
@@ -280,8 +226,39 @@ workflow:
CwlRunnerEndpoint: "calrissian-runner"
postgresql:
- defineEnvironmentVariables: false
+ # Configuration for official PostgreSQL Docker image
enabled: true
+ name: postgresql-db
+ serviceName: postgresql-db-service
+
+ # Security configuration
+ useSecret: false # Set to true to store credentials in a Kubernetes Secret
+ secretName: "" # Optional: specify an existing secret name, otherwise auto-generated
+
+ image:
+ repository: postgres
+ tag: "16-alpine"
+ pullPolicy: IfNotPresent
+
+ # Resources configuration
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 1Gi
+ requests:
+ cpu: 250m
+ memory: 256Mi
+
+ # Persistence configuration
+ persistence:
+ enabled: true
+ volumeName: postgresql-data
+ mountPath: /var/lib/postgresql/data
+ accessMode: ReadWriteOnce
+ size: 8Gi
+ storageClass: ""
+
+ # Init scripts configuration (keep existing ConfigMap)
primary:
initdb:
scriptsConfigMap: "postgresql-primary-init-scripts"
@@ -299,14 +276,45 @@ global:
# rabbitmq
rabbitmq:
+ enabled: true
+ autoSetup:
+ enabled: true
+ image:
+ repository: rabbitmq
+ tag: "4.1.4-alpine"
+ pullPolicy: IfNotPresent
+
+ replicas: 1
+
+ # Auto-setup: automatically enable management plugin and import definitions
+ autoSetup:
+ enabled: true
+
auth:
- username: RABBITMQ_USERNAME
- password: CHANGEME
- loadDefinition:
+ username: "zoo"
+ password: "CHANGEME"
+ vhost: "/"
+
+ clustering:
+ enabled: false
+
+ resources:
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ requests:
+ cpu: 250m
+ memory: 256Mi
+
+ persistence:
enabled: true
- existingSecret: load-definition
- extraConfiguration: |
- load_definitions = /app/load_definition.json
+ size: 8Gi
+ storageClass: ""
+
+ # RabbitMQ configuration
+ # If config is set, it overrides the default file-based configuration
+ # If not set (or empty), the configuration will be loaded from files/rabbitmq/rabbitmq.conf
+ config: ""
persistence:
enabled: true
@@ -348,6 +356,62 @@ cookiecutter:
minio:
enabled: false
+ # Configuration for official MinIO chart (https://charts.min.io/)
+ mode: standalone # or 'distributed' for production
+ replicas: 1
+
+ # Authentication
+ rootUser: minio-admin
+ rootPassword: minio-secret-password
+
+ # Backward compatibility: keep s3-service name
+ fullnameOverride: "s3-service"
+
+ # Create default buckets after install
+ buckets:
+ - name: eoepca
+ policy: none
+ purge: false
+ - name: results
+ policy: none
+ purge: false
+
+ # Resources
+ resources:
+ requests:
+ memory: "1Gi"
+ cpu: "500m"
+ limits:
+ memory: "2Gi"
+ cpu: "1000m"
+
+ # Persistence
+ persistence:
+ enabled: true
+ size: 10Gi
+ storageClass: "" # Use default storage class
+
+ # Service configuration
+ service:
+ type: ClusterIP
+ port: 9000
+
+ # Console (web UI)
+ consoleService:
+ type: ClusterIP
+ port: 9001
+
+ # Environment variables
+ environment:
+ MINIO_BROWSER: "on"
+ MINIO_DOMAIN: ""
+
+ # Security context
+ securityContext:
+ enabled: true
+ runAsUser: 1000
+ runAsGroup: 1000
+ fsGroup: 1000
websocketd:
enabled: false
@@ -358,11 +422,38 @@ websocketd:
tag: 67449315857b54bbc970f02c7aa4fd10a94721f0
redis:
+ # Configuration for official Redis Docker image
enabled: false
- replica:
- replicaCount: 1
+ name: redis-db
+ serviceName: redis-db-service
+ port: 6379
+
+ image:
+ repository: redis
+ tag: "alpine3.22"
+ pullPolicy: IfNotPresent
+
+ # Authentication configuration
auth:
enabled: false
+ password: ""
+
+ # Resources configuration
+ resources:
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+ # Persistence configuration
+ persistence:
+ enabled: true
+ volumeName: redis-data
+ accessMode: ReadWriteOnce
+ size: 4Gi
+ storageClass: ""
iam:
enabled: true
@@ -393,8 +484,6 @@ webui:
oidc:
issuer: https://auth.geolabs.fr/realms/zooproject
remoteUserClaim: email
- clientId: webapp
- clientSecret: onKWPIoKeRAThAkLjSINJZ6gd1qVzhXs
providerTokenEndpointAuth: client_secret_basic
authVerifyJwksUri: https://auth.geolabs.fr/realms/zooproject/protocol/openid-connect/certs
scope: "openid email"
@@ -406,6 +495,9 @@ argo:
# Options for automatic token management
autoTokenManagement: true # Enable automatic token retrieval
restartOnTokenUpdate: false # Restart pods after token update
+ # Service account configuration for Argo workflows
+ serviceAccount:
+ name: "argo-workflow"
# Ingress configuration for Argo Workflows UI
ingress:
enabled: false
@@ -460,10 +552,10 @@ argo-workflows:
secure: false
accessKeySecret:
name: s3-service
- key: root-user
+ key: rootUser
secretKeySecret:
name: s3-service
- key: root-password
+ key: rootPassword
stageInOut:
enabled: true
diff --git a/zoo-project-dru/values_apple.yaml b/zoo-project-dru/values_apple.yaml
index 9d5d24d..42afc76 100644
--- a/zoo-project-dru/values_apple.yaml
+++ b/zoo-project-dru/values_apple.yaml
@@ -14,17 +14,16 @@ minio:
# minio chart value parameters description can be found here:
# https://github.com/bitnami/charts/tree/main/bitnami/minio
enabled: true
- auth:
- rootUser: minio-admin
- rootPassword: minio-secret-password
- # to access the dashboard from browser run the following port-forward command:
- # kubectl port-forward svc/zoo-project-dru-minio 9001:9001 -n zp
persistence:
enabled: true
storageClass: hostpath
size: 2Gi
accessMode: ReadWriteOnce
defaultBuckets: "eoepca"
+ buckets:
+ - name: eoepca
+ policy: none
+ purge: false
fullnameOverride: "s3-service"
cookiecutter:
diff --git a/zoo-project-dru/values_argo.yaml b/zoo-project-dru/values_argo.yaml
index 0a5c33f..02e95c4 100644
--- a/zoo-project-dru/values_argo.yaml
+++ b/zoo-project-dru/values_argo.yaml
@@ -1,4 +1,19 @@
# ZOO-Project-DRU Helm Chart Configuration with Argo Workflows support
+
+
+# # Optimization: Use shorter names to reduce Helm secret size
+# nameOverride: "zoo"
+# fullnameOverride: "zoo"
+
+# Optimization: Disable optional components to reduce Helm secret size (avoid >1MB limit)
+# These optimizations reduce secret size by disabling large ConfigMaps
+zoo:
+ openapi:
+ enabled: false # Disable OpenAPI spec (saves ~29KB)
+ security:
+ enabled: false # Disable security service (saves ~11KB)
+ includeOptionalConfigs: false # Disable web assets (saves ~2KB)
+
argo:
enabled: true
installCRDs: true # Install CRDs automatically
@@ -45,8 +60,8 @@ argo:
insecure: true
# MinIO secret configuration
secretName: "s3-service"
- accessKeySecretKey: "root-user"
- secretKeySecretKey: "root-password"
+ accessKeySecretKey: "rootUser"
+ secretKeySecretKey: "rootPassword"
# Ingress configuration for Argo Workflows UI
ingress:
@@ -90,24 +105,30 @@ workflow:
additionalInputs:
s3_bucket: results
region_name: us-east-1
- aws_secret_access_key: minio-secret-password
- aws_access_key_id: minio-admin
+ # Use template helpers to get credentials dynamically
+ aws_secret_access_key: "minio-secret-password"
+ aws_access_key_id: "minio-admin"
endpoint_url: http://s3-service.zoo.svc.cluster.local:9000
minio:
# minio chart value parameters description can be found here:
# https://github.com/bitnami/charts/tree/main/bitnami/minio
enabled: true
- auth:
- rootUser: minio-admin
- rootPassword: minio-secret-password
- # to access the dashboard from browser run the following port-forward command:
- # kubectl port-forward svc/s3-service 9001:9001 -n zoo
+ # Authentication
+ rootUser: minio-admin
+ rootPassword: minio-secret-password
persistence:
enabled: true
size: 2Gi
accessMode: ReadWriteOnce
- defaultBuckets: "eoepca results"
+ defaultBuckets: "eoepca"
+ buckets:
+ - name: eoepca
+ policy: none
+ purge: false
+ - name: results
+ policy: none
+ purge: false
fullnameOverride: "s3-service"
cookiecutter:
@@ -168,10 +189,10 @@ argo-workflows:
insecure: true
accessKeySecret:
name: s3-service
- key: root-user
+ key: rootUser
secretKeySecret:
name: s3-service
- key: root-password
+ key: rootPassword
# Workflow Controller Configuration
controller:
@@ -277,9 +298,11 @@ argo-workflows:
install: false
keep: true
+# Note: Monitoring stack disabled to keep under 1MB Helm secret limit
+# To enable monitoring, deploy kube-prometheus-stack separately:
+# helm install monitoring prometheus-community/kube-prometheus-stack -n monitoring --create-namespace
monitoring:
- enabled: true
- disableProblematicTargets: true # Disable patch since node-exporter is correctly configured
+ enabled: false
# Disable patch since node-exporter is already correctly configured below
nodeExporterPatchEnabled: true
kube-prometheus-stack:
@@ -316,31 +339,55 @@ monitoring:
serviceMonitorSelector: {}
ruleSelector: {}
- # Grafana
+ # Grafana (heavily optimized to reduce secret size)
grafana:
enabled: true
- defaultDashboardsEnabled: true
+ defaultDashboardsEnabled: false # Disable all default dashboards
adminPassword: admin
+ sidecar:
+ dashboards:
+ enabled: false # Disable dashboard sidecar completely
- # Kube-state-metrics
+ # Disable all default dashboard ConfigMaps that cause size issues
+ defaultRules:
+ create: false # Disable default PrometheusRules (saves ~200KB)
+
+ # Disable other dashboard-related components
+ kubeApiServer:
+ enabled: false
+ kubelet:
+ enabled: false
+ kubeControllerManager:
+ enabled: false
+ coreDns:
+ enabled: false
+ kubeEtcd:
+ enabled: false
+ kubeScheduler:
+ enabled: false
+ kubeProxy:
+ enabled: false
+ kubeStateMetrics:
+ enabled: false
+ nodeExporter:
+ enabled: false
+
+ # Kube-state-metrics (disabled to reduce secret size)
kube-state-metrics:
- enabled: true
- podSecurityPolicy:
- enabled: false
+ enabled: false # Saves ~100KB in PrometheusRules
- # Alertmanager
+ # Alertmanager (disabled to reduce secret size)
alertmanager:
- enabled: true
- podSecurityPolicy:
- enabled: false
+ enabled: false # Saves ~50KB in configuration
# Disable PodSecurityPolicy globalement
podSecurityPolicy:
enabled: false
-# Argo Events Chart Configuration
+# Argo Events disabled to reduce Helm secret size
+# Deploy separately if needed: helm install argo-events argo/argo-events
argo-events:
- enabled: true
+ enabled: false
# Désactiver l'installation des CRDs (ils sont déjà installés)
crds:
diff --git a/zoo-project-dru/values_minikube.yaml b/zoo-project-dru/values_minikube.yaml
index 21fb5f1..55b8dbc 100644
--- a/zoo-project-dru/values_minikube.yaml
+++ b/zoo-project-dru/values_minikube.yaml
@@ -1,20 +1,25 @@
useKubeProxy: true
+# Optimization: Use shorter names to reduce Helm secret size
+# nameOverride: "zoo"
+# fullnameOverride: "zoo"
+
minio:
# minio chart value parameters description can be found here:
# https://github.com/bitnami/charts/tree/main/bitnami/minio
enabled: true
- auth:
- rootUser: minio-admin
- rootPassword: minio-secret-password
- # to access the dashboard from browser run the following port-forward command:
- # kubectl port-forward svc/zoo-project-dru-minio 9001:9001 -n zp
persistence:
enabled: true
storageClass: standard
size: 2Gi
accessMode: ReadWriteOnce
defaultBuckets: "eoepca"
+ # Create default buckets using official MinIO chart format
+ buckets:
+ - name: eoepca
+ policy: none
+ purge: false
+ # Keep backward compatibility with s3-service name
fullnameOverride: "s3-service"
cookiecutter:
@@ -26,6 +31,14 @@ iam:
openeoAuth:
enabled: false
+# Optimization: disable optional components to reduce Helm secret size
+zoo:
+ openapi:
+ enabled: false # Disable OpenAPI spec (saves ~29KB)
+ security:
+ enabled: false # Disable security service (saves ~11KB)
+ includeOptionalConfigs: false # Disable web assets (saves ~2KB)
+
webui:
enabled: false
@@ -35,3 +48,40 @@ websocketd:
redis:
enabled: true
+
+rabbitmq:
+ enabled: true
+ autoSetup:
+ enabled: true
+ persistence:
+ storageClass: standard
+ size: 1Gi
+ resources:
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+# KEDA Configuration for Minikube
+# Note: KEDA is installed by Skaffold profile, not by this chart
+# keda:
+# enabled: false # Disable subchart, KEDA installed separately
+# skipScaledObject: true # Skip ScaledObject creation until KEDA is ready
+# operator:
+# replicaCount: 1
+# metricsServer:
+# replicaCount: 1
+# policies:
+# zoofpmProtection:
+# enabled: true
+#
+# # Use ConfigMap for PostgreSQL trigger (reduces Helm secret size)
+# triggers:
+# postgresql:
+# useConfigMap: true
+
+# # Enable eviction controller for minikube
+# evictionController:
+# enabled: true