diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml new file mode 100644 index 000000000..040860018 --- /dev/null +++ b/.github/workflows/pull-request.yml @@ -0,0 +1,267 @@ +# Create minikube test deployments on different kubernetes versions +name: Silta chart tests + +on: + # # Run for pull requests, but there's an additional draft filter later on + # pull_request: + # types: [opened, synchronize, reopened, ready_for_review] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + + # schedule: + # # Run compatability tests each Monday at 9 + # - cron: '0 9 * * 1' + +jobs: + minikube-test: + name: Minikube + runs-on: ubuntu-latest + strategy: + matrix: + # Available minikube kubernetes version list: + # "minikube config defaults kubernetes-version" + # and https://kubernetes.io/releases/patch-releases/ + kubernetes-version: ["1.28.3"] + # kubernetes-version: ["v1.22.17", "v1.23.17", "v1.24.17", "v1.25.16", "1.26.11", "1.27.8", "1.28.4", "latest"] + env: + CLUSTER_DOMAIN: minikube.local.wdr.io + K8S_PROJECT_REPO_DIR: k8s-project-repositories + if: github.event.pull_request.draft == false + steps: + - uses: actions/checkout@v4 + - name: Silta CLI setup + run: | + mkdir -p ~/.local/bin + + # Latest tagged release + latest_release_url=$(curl -s https://api.github.com/repos/wunderio/silta-cli/releases/latest | jq -r '.assets[] | .browser_download_url | select(endswith("linux-amd64.tar.gz"))') + curl -sL $latest_release_url | tar xz -C ~/.local/bin + + silta version + - name: Helm and repository setup + run: | + # Install Helm 3 + HELM_VERSION=v3.6.3 + curl -o /tmp/helm.tar.gz https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz \ + && tar -zxvf /tmp/helm.tar.gz -C /tmp \ + && mv /tmp/linux-amd64/helm ~/.local/bin/helm \ + && helm repo add jetstack https://charts.jetstack.io \ + && helm repo add instana https://agents.instana.io/helm \ + && helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner \ + && helm repo add twun https://helm.twun.io \ + && helm repo add bitnami https://charts.bitnami.com/bitnami \ + && helm repo add wunderio https://storage.googleapis.com/charts.wdr.io \ + && helm repo add percona https://percona.github.io/percona-helm-charts/ \ + && helm repo add mysql-operator https://mysql.github.io/mysql-operator/ \ + && helm repo add elastic https://helm.elastic.co \ + && helm repo add codecentric https://codecentric.github.io/helm-charts \ + && helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx \ + && helm repo add nginx-stable https://helm.nginx.com/stable \ + && helm plugin install https://github.com/quintush/helm-unittest --version 0.2.4 \ + && helm repo update + + - name: Download and start minikube + run: | + CLUSTER_DOCKER_REGISTRY=registry.${CLUSTER_DOMAIN}:80 + + curl -Lo ~/.local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x ~/.local/bin/minikube + minikube version + minikube start \ + --kubernetes-version "${{ matrix.kubernetes-version }}" \ + --insecure-registry "${CLUSTER_DOCKER_REGISTRY}" \ + --cni auto \ + --wait all + # Could use "medyagh/setup-minikube" but it does not have a way to pass "--insecure-registry" flag + # https://github.com/medyagh/setup-minikube/pull/33 + # - name: Start minikube 1.21.14 + # with: + # # "stable" for the latest stable build, or "latest" for the latest development build + # kubernetes-version: v1.21.14 + # insecure-registry: "registry.minikube.local.wdr.io:80" + # uses: medyagh/setup-minikube@master + - name: MetalLB setup + run: | + MINIKUBE_IP=$(minikube ip) + + ############## + # MetalLB setup + # https://github.com/kubernetes/minikube/issues/10307#issuecomment-1024575716 + + METALLB_IP_START=${MINIKUBE_IP} + METALLB_IP_END=${MINIKUBE_IP} + + minikube addons enable metallb + sleep 10 + + # Patch MetalLB config with updated IP address range + kubectl apply -f - -n metallb-system << EOF + apiVersion: v1 + kind: ConfigMap + metadata: + name: config + namespace: metallb-system + data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - ${METALLB_IP_START}-${METALLB_IP_END} + EOF + + # # Patch MetalLB images to use the correct registry + # # Workaround for https://github.com/metallb/metallb/issues/1862 + # # Remove once this is tagged and released (> v1.29.0) + # # https://github.com/kubernetes/minikube/pull/16056 + # image="quay.io/metallb/controller:v0.9.6@sha256:6932cf255dd7f06f550c7f106b9a206be95f847ab8cb77aafac7acd27def0b00" + # kubectl scale -n metallb-system deployment/controller --replicas=0 + # kubectl patch deployment -n metallb-system controller --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "'${image}'"}]' + # kubectl scale -n metallb-system deployment/controller --replicas=1 + # image="quay.io/metallb/speaker:v0.9.6@sha256:7a400205b4986acd3d2ff32c29929682b8ff8d830837aff74f787c757176fa9f" + # kubectl patch daemonset -n metallb-system speaker --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "'${image}'"}]' + + sleep 5 + + NAMESPACE=metallb-system + APP=metallb + TIMEOUT=30s + + function metallb_logs() { + echo "Timed out waiting for ${COMPONENT} to become ready" + kubectl get events -n ${NAMESPACE} + kubectl logs --sort-by='.metadata.creationTimestamp' -l app=${APP} -l component=${COMPONENT} -n ${NAMESPACE} + exit 1 + } + + for COMPONENT in controller speaker + do + kubectl wait \ + --for condition=ready pod \ + -l app=${APP} -l component=${COMPONENT} \ + -n ${NAMESPACE} \ + --timeout=${TIMEOUT} || metallb_logs + done + + - name: silta-cluster chart setup and test + run: | + + MINIKUBE_IP=$(minikube ip) + + helm upgrade --install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --version v1.8.0 \ + --set installCRDs=true \ + --set global.logLevel=1 \ + --wait + + helm upgrade --install silta-cluster wunderio/silta-cluster \ + --create-namespace \ + --namespace silta-cluster \ + --set clusterDomain=${CLUSTER_DOMAIN} \ + --values silta-cluster/minikube.yml \ + --wait + + # Cluster landing page test + curl --resolve ${CLUSTER_DOMAIN}:443:${MINIKUBE_IP} https://${CLUSTER_DOMAIN} -ILk --fail + curl --resolve ${CLUSTER_DOMAIN}:80:${MINIKUBE_IP} --resolve ${CLUSTER_DOMAIN}:443:${MINIKUBE_IP} http://${CLUSTER_DOMAIN} -IL --fail + + - name: Install mysql operator + run: | + # Install CRD for mysql-operator (forked version) + kubectl apply -f https://raw.githubusercontent.com/wunderio/mysql-operator/fork/deploy/deploy-crds.yaml + + # Install mysql-operator (forked version) + helm upgrade --install mysql-operator mysql-operator/mysql-operator \ + --namespace mysql-operator --create-namespace \ + --set image.registry="wunderio" \ + --set image.repository="" \ + --set image.name="mysql-community-operator" \ + --set image.tag="8.3.0-2.1.2-mod" \ + --wait + + - name: Build Drupal chart images, deploy and test + run: | + + function info { + echo "Error occurred, printing debug info" + kubectl get pods -A + kubectl get events -A + kubectl get innodbcluster -A + } + + trap "info" ERR + + MINIKUBE_IP=$(minikube ip) + CLUSTER_DOCKER_REGISTRY=registry.${CLUSTER_DOMAIN}:80 + + # Composer install + # PHP_COMPOSER_VERSION=2.1.12 + # php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" && \ + # php composer-setup.php --version=${PHP_COMPOSER_VERSION} --install-dir=$HOME/.local/bin --filename=composer && \ + # php -r "unlink('composer-setup.php');" && \ + # composer --version + + composer install -n --prefer-dist --ignore-platform-reqs --optimize-autoloader + + # Tunnel to in-cluster docker registry. Required due to docker push inability to use selfsigned/insecure repositories that ain't local + # Find a free port. Credit: stefanobaghino / https://unix.stackexchange.com/posts/423052/revisions + DOCKER_REGISTRY_PORT=$(comm -23 <(seq 5000 6000 | sort) <(ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n 1) + BRIDGED_DOCKER_REGISTRY="localhost:${DOCKER_REGISTRY_PORT}" + kubectl -n silta-cluster port-forward service/silta-cluster-docker-registry $DOCKER_REGISTRY_PORT:80 2>&1 >/dev/null & + + # Build images + + NGINX_IMAGE=/drupal-project-k8s/test-drupal-nginx:latest + PHP_IMAGE=/drupal-project-k8s/test-drupal-php:latest + SHELL_IMAGE=/drupal-project-k8s/test-drupal-shell:latest + + docker build --tag ${BRIDGED_DOCKER_REGISTRY}${NGINX_IMAGE} -f "silta/nginx.Dockerfile" ./web + docker image push ${BRIDGED_DOCKER_REGISTRY}${NGINX_IMAGE} + + docker build --tag ${BRIDGED_DOCKER_REGISTRY}${PHP_IMAGE} -f "silta/php.Dockerfile" . + docker image push ${BRIDGED_DOCKER_REGISTRY}${PHP_IMAGE} + + docker build --tag ${BRIDGED_DOCKER_REGISTRY}${SHELL_IMAGE} -f "silta/shell.Dockerfile" . + docker image push ${BRIDGED_DOCKER_REGISTRY}${SHELL_IMAGE} + + # Dependency build for local chart + helm dependency build "./charts/drupal" + + # Chart unit tests + helm unittest ./charts/drupal --helm3 + + # Dry-run drupal chart with test values + helm install --dry-run --generate-name ./charts/drupal --values charts/drupal/test.values.yaml + + silta ci release deploy \ + --release-name test \ + --chart-name ./charts/drupal \ + --branchname test \ + --silta-environment-name test \ + --nginx-image-url ${CLUSTER_DOCKER_REGISTRY}${NGINX_IMAGE} \ + --php-image-url ${CLUSTER_DOCKER_REGISTRY}${PHP_IMAGE} \ + --shell-image-url ${CLUSTER_DOCKER_REGISTRY}${SHELL_IMAGE} \ + --cluster-domain "${CLUSTER_DOMAIN}" \ + --cluster-type minikube \ + --db-root-pass "rootpw" \ + --db-user-pass "dbpw" \ + --gitauth-username "test" \ + --gitauth-password "test" \ + --namespace drupal-project-k8s \ + --helm-flags "--set ssl.issuer=selfsigned" \ + --silta-config silta/silta.yml,silta/silta.minikube.yml \ + --deployment-timeout 15m + + kubectl exec -it deploy/test-shell -n drupal-project-k8s -- drush si -y + + # Web request test + curl http://test.drupal-project-k8s.${CLUSTER_DOMAIN} \ + --user silta:demo --location-trusted \ + --head --insecure --location \ + --resolve test.drupal-project-k8s.${CLUSTER_DOMAIN}:80:${MINIKUBE_IP} \ + --resolve test.drupal-project-k8s.${CLUSTER_DOMAIN}:443:${MINIKUBE_IP} \ + --retry 5 --retry-delay 5 \ + --fail diff --git a/.gitignore b/.gitignore index fe83847df..2f246108e 100644 --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,5 @@ node_modules # Ignore helm chart dependencies. charts/*/charts /.editorconfig -/.gitattributes \ No newline at end of file +/.gitattributes +helm-output.log diff --git a/charts/drupal/templates/_helpers.tpl b/charts/drupal/templates/_helpers.tpl index c619a11db..73cb90714 100644 --- a/charts/drupal/templates/_helpers.tpl +++ b/charts/drupal/templates/_helpers.tpl @@ -137,6 +137,19 @@ imagePullSecrets: name: {{ .Release.Name }}-mariadb key: mariadb-password {{- end }} +{{- if .Values.mysql.enabled }} +- name: MYSQL_DB_USER + value: "root" +- name: MYSQL_DB_NAME + value: "drupal" +- name: MYSQL_DB_HOST + value: "{{ include "silta.mysql-cluster.name" . }}.{{ .Release.Namespace }}.svc.cluster.local" +- name: MYSQL_DB_PASS + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-mysql + key: rootPassword +{{- end }} {{- if index ( index .Values "pxc-db" ) "enabled" }} - name: PXC_DB_USER value: "root" @@ -163,6 +176,19 @@ imagePullSecrets: name: {{ .Release.Name }}-mariadb key: mariadb-password {{- end }} +{{- if and .Values.mysql.enabled ( eq .Values.db.primary "mysql" ) }} +- name: DB_USER + value: "root" +- name: DB_NAME + value: "drupal" +- name: DB_HOST + value: "{{ include "silta.mysql-cluster.name" . }}.{{ .Release.Namespace }}.svc.cluster.local" +- name: DB_PASS + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-mysql + key: rootPassword +{{- end }} {{- if and ( index ( index .Values "pxc-db" ) "enabled" ) ( eq .Values.db.primary "pxc-db" ) }} - name: DB_USER value: "root" @@ -681,3 +707,8 @@ autoscaling/v2beta1 {{ fail "Cannot use domain prefixes together with domain masking"}} {{- end -}} {{- end -}} + +{{- define "silta.mysql-cluster.name" }} +{{- $releaseNameHash := sha256sum .Release.Name | trunc 3 }} +{{- (gt (len .Release.Name) 21) | ternary ( print (.Release.Name | trunc 18) print $releaseNameHash ) .Release.Name }}-mysql +{{- end }} \ No newline at end of file diff --git a/charts/drupal/templates/drupal-deployment.yaml b/charts/drupal/templates/drupal-deployment.yaml index e15508a46..00c708b34 100644 --- a/charts/drupal/templates/drupal-deployment.yaml +++ b/charts/drupal/templates/drupal-deployment.yaml @@ -202,6 +202,34 @@ spec: values: - mariadb topologyKey: topology.kubernetes.io/zone + # Preferrably keep pods on the same node as the mysql database. + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: release + operator: In + values: + - "{{ .Release.Name }}" + - key: component + operator: In + values: + - mysqld + topologyKey: kubernetes.io/hostname + # Preferrably keep pods in the same zone as the mysql database. + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: release + operator: In + values: + - "{{ .Release.Name }}" + - key: component + operator: In + values: + - mysqld + topologyKey: topology.kubernetes.io/zone podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 10 diff --git a/charts/drupal/templates/mysql-innodb.yaml b/charts/drupal/templates/mysql-innodb.yaml new file mode 100644 index 000000000..a72210553 --- /dev/null +++ b/charts/drupal/templates/mysql-innodb.yaml @@ -0,0 +1,77 @@ +{{- if .Values.mysql.enabled }} +{{- if not ( .Capabilities.APIVersions.Has "mysql.oracle.com/v2" ) }} +{{- fail "API mysql.oracle.com/v2 unavailable, ask cluster administrator to enable Mysql InnoDBCluster support in Silta cluster first!" }} +{{- end }} +{{- if or (gt (.Values.mysql.replicas | toString | atoi) 9) (lt (.Values.mysql.replicas | toString | atoi) 1) }} +{{- fail "Invalid mysql.replicas value, must be between 1 and 9" }} +{{- end }} +{{- if or (gt (.Values.mysql.router.replicas | toString | atoi) 9) (lt (.Values.mysql.router.replicas | toString | atoi) 1) }} +{{- fail "Invalid mysql.router.replicas value, must be between 1 and 9" }} +{{- end }} +apiVersion: mysql.oracle.com/v2 +kind: InnoDBCluster +metadata: + name: {{ include "silta.mysql-cluster.name" . }} + labels: + release: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} + annotations: + # Trigger resource update to scale deployment back up when it was scaled down + last-update: {{ dateInZone "2006-01-02T15:04:05.999Z" (now) "UTC" }} + +spec: + # See: https://dev.mysql.com/doc/mysql-operator/en/mysql-operator-properties.html#mysql-operator-title-spec-innodbcluster + {{- if .Values.mysql.version }} + version: {{ .Values.mysql.version }} + {{- end }} + instances: {{ .Values.mysql.replicas }} + router: + instances: {{ .Values.mysql.router.replicas }} + dpLabels: + release: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} + podSpec: + containers: + - name: router + resources: + {{- .Values.mysql.router.resources | toYaml | nindent 10 }} + secretName: {{ .Release.Name }}-mysql + tlsUseSelfSigned: true + stsLabels: + release: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} + datadirVolumeClaimLabels: + release: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} + datadirVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 5Gi + podSpec: + nodeSelector: + {{- .Values.mysql.nodeSelector | toYaml | nindent 8 }} + tolerations: + {{- include "drupal.tolerations" .Values.mysql.nodeSelector | nindent 8 }} + containers: + - name: mysql + resources: + {{- .Values.mysql.resources | toYaml | nindent 8 }} + {{- if .Values.mysql.mycnf }} + mycnf: + {{ .Values.mysql.mycnf | toYaml | nindent 4 }} + {{- end }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-mysql + labels: + release: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} +type: Opaque +data: + rootUser: {{ print "root" | b64enc | quote }} + rootPassword: {{ .Values.mysql.secrets.root | b64enc | quote }} + rootHost: {{ print "%" | b64enc | quote }} +{{- end }} diff --git a/charts/drupal/values.schema.json b/charts/drupal/values.schema.json index 2c1f8bfc7..ab40ef3de 100644 --- a/charts/drupal/values.schema.json +++ b/charts/drupal/values.schema.json @@ -483,6 +483,14 @@ "enabled": { "type": "boolean" } } }, + "mysql": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "replicas": { "type": "integer" }, + "routerReplicas": { "type": "integer" } + } + }, "pxc-db": { "type": "object", "properties": { @@ -494,7 +502,7 @@ "properties": { "primary": { "type": "string", - "enum": [ "", "mariadb", "pxc-db" ] + "enum": [ "", "mariadb", "mysql", "pxc-db" ] } } }, diff --git a/charts/drupal/values.yaml b/charts/drupal/values.yaml index eb1b88793..fdead6908 100644 --- a/charts/drupal/values.yaml +++ b/charts/drupal/values.yaml @@ -749,6 +749,42 @@ db: # Available options: mariadb, pxc-db primary: mariadb +# Mysql InnoDBCluster +# see: https://github.com/mysql/mysql-operator +mysql: + enabled: false + # version: 8.3.0 + replicas: 1 + router: + replicas: 1 + resources: + requests: + cpu: 50m + memory: 16Mi + limits: + memory: 32Mi + secrets: + root: drupal + nodeSelector: {} + resources: + requests: + cpu: 100m + memory: 800Mi + limits: + cpu: 500m + memory: 1600Mi + # see: https://dev.mysql.com/doc/mysql-operator/en/mysql-operator-properties.html#mysql-operator-spec-innodbclusterspeclogs + # mycnf: | + # [mysqld] + # innodb_buffer_pool_size=200M + # innodb_log_file_size=2G + # Only provisioned initially, updates are not supported. Patch persistent volume claim if needed. + datadirVolumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 2Gi + # Percona XtraDB Cluster # see: https://github.com/percona/percona-helm-charts/blob/4ec113774f41a31167c2f9efd4c93f3d387e7f04/charts/pxc-db/values.yaml pxc-db: diff --git a/silta-cluster/minikube.yml b/silta-cluster/minikube.yml new file mode 100644 index 000000000..247e377ab --- /dev/null +++ b/silta-cluster/minikube.yml @@ -0,0 +1,60 @@ +traefik: + replicas: 1 + ssl: + enabled: true + service: + annotations: + metallb.universe.tf/allow-shared-ip: "shared" + # metallb shared ip works only with "Cluster" TP + externalTrafficPolicy: Cluster + +ssl: + enabled: true + email: admin@example.com + issuer: selfsigned + +csi-rclone: + enabled: true + params: + remote: "s3" + remotePath: "projectname" + + # Minio as S3 provider + s3-provider: "Minio" + s3-endpoint: "http://silta-cluster-minio:9000" + # Default credentials of minio chart https://github.com/minio/charts/blob/master/minio/values.yaml + s3-access-key-id: "YOURACCESSKEY" + s3-secret-access-key: "YOURSECRETKEY" + # nodePlugin: + # kubeletBasePath: "/var/snap/microk8s/common/var/lib/kubelet" + +minio: + enabled: true + resources: + requests: + memory: 512M + persistence: + size: 5Gi + +gitAuth: + enabled: true + port: 2222 + keyserver: + enabled: false + authorizedKeys: [] + annotations: + metallb.universe.tf/allow-shared-ip: "shared" + # metallb shared ip works only with "Cluster" TP + externalTrafficPolicy: Cluster + +sshKeyServer: + enabled: false + +# Deployment remover +deploymentRemover: + enabled: false + +docker-registry: + enabled: true + secrets: + htpasswd: false diff --git a/silta/php.Dockerfile b/silta/php.Dockerfile index 61e1d5fa3..7b5159b6f 100644 --- a/silta/php.Dockerfile +++ b/silta/php.Dockerfile @@ -1,6 +1,10 @@ # Dockerfile for the Drupal container. FROM wunderio/silta-php-fpm:8.2-fpm-v1 +# Fixes ERROR 1045 (28000): Plugin caching_sha2_password could not be loaded: Error loading shared library /usr/lib/mariadb/plugin/caching_sha2_password.so: No such file or directory +# Fixes ERROR 2059 (HY000): Authentication method dummy_fallback_auth is not supported +RUN apk add mariadb-connector-c + COPY --chown=www-data:www-data . /app USER www-data diff --git a/silta/shell.Dockerfile b/silta/shell.Dockerfile index 5c7dc7b6e..04223b6dc 100644 --- a/silta/shell.Dockerfile +++ b/silta/shell.Dockerfile @@ -1,4 +1,8 @@ # Dockerfile for the Drupal container. FROM wunderio/silta-php-shell:php8.2-v1 +# Fixes ERROR 1045 (28000): Plugin caching_sha2_password could not be loaded: Error loading shared library /usr/lib/mariadb/plugin/caching_sha2_password.so: No such file or directory +# Fixes ERROR 2059 (HY000): Authentication method dummy_fallback_auth is not supported +RUN apk add mariadb-connector-c + COPY --chown=www-data:www-data . /app diff --git a/silta/silta.minikube.yml b/silta/silta.minikube.yml new file mode 100644 index 000000000..e69de29bb diff --git a/silta/silta.yml b/silta/silta.yml index 8feafebe3..f1eb6e3ef 100644 --- a/silta/silta.yml +++ b/silta/silta.yml @@ -5,10 +5,10 @@ # for all possible options. varnish: - enabled: true + enabled: false elasticsearch: - enabled: true + enabled: false memcached: enabled: false @@ -55,3 +55,25 @@ php: # The ~ symbol will be replaced by a random digit from 0 to 9. # This will avoid running all cron jobs at the same time. schedule: '~ 0 31 2 *' + +db: + primary: mysql + +mysql: + enabled: true + replicas: 3 + router: + replicas: 2 + # nodeSelector: + # silta-role: drupal + # resources: + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 400m + # memory: 2Gi + # TODO: test mycnf & logs + +mariadb: + enabled: false